mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
16
README.md
16
README.md
@@ -1,6 +1,6 @@
|
||||
## Security Onion 2.1.0.rc2
|
||||
## Security Onion 2.2.0.rc3
|
||||
|
||||
Security Onion 2.1.0 RC2 is here!
|
||||
Security Onion 2.2.0 RC3 is here!
|
||||
|
||||
### Warnings and Disclaimers
|
||||
|
||||
@@ -14,24 +14,24 @@ Security Onion 2.1.0 RC2 is here!
|
||||
|
||||
### Release Notes
|
||||
|
||||
https://docs.securityonion.net/en/2.1/release-notes.html
|
||||
https://docs.securityonion.net/en/2.2/release-notes.html
|
||||
|
||||
### Requirements
|
||||
|
||||
https://docs.securityonion.net/en/2.1/hardware.html
|
||||
https://docs.securityonion.net/en/2.2/hardware.html
|
||||
|
||||
### Download
|
||||
|
||||
https://docs.securityonion.net/en/2.1/download.html
|
||||
https://docs.securityonion.net/en/2.2/download.html
|
||||
|
||||
### Installation
|
||||
|
||||
https://docs.securityonion.net/en/2.1/installation.html
|
||||
https://docs.securityonion.net/en/2.2/installation.html
|
||||
|
||||
### FAQ
|
||||
|
||||
https://docs.securityonion.net/en/2.1/faq.html
|
||||
https://docs.securityonion.net/en/2.2/faq.html
|
||||
|
||||
### Feedback
|
||||
|
||||
https://docs.securityonion.net/en/2.1/community-support.html
|
||||
https://docs.securityonion.net/en/2.2/community-support.html
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
### 2.1.0-rc2 ISO image built on 2020/08/23
|
||||
### 2.2.0-rc3 ISO image built on 2020/09/17
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.1.0-rc2 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.1.0-rc2.iso
|
||||
2.2.0-rc3 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.2.0-rc3.iso
|
||||
|
||||
MD5: 9EAE772B64F5B3934C0DB7913E38D6D4
|
||||
SHA1: D0D347AE30564871DE81203C0CE53B950F8732CE
|
||||
SHA256: 888AC7758C975FAA0A7267E5EFCB082164AC7AC8DCB3B370C06BA0B8493DAC44
|
||||
MD5: 051883501C905653ACBCEC513C294778
|
||||
SHA1: 0A66F6636F53B268E7FFB743A3136AC5CC3E0E96
|
||||
SHA256: 5A9F303954AF1B1D271CE526E5DCBFC28F3FFC0621B291A29F0F7F2E8EB11C43
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.1.0-rc2.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.2.0-rc3.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
||||
@@ -24,22 +24,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.1.0-rc2.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.2.0-rc3.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.1.0-rc2.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.2.0-rc3.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.1.0-rc2.iso.sig securityonion-2.1.0-rc2.iso
|
||||
gpg --verify securityonion-2.2.0-rc3.iso.sig securityonion-2.2.0-rc3.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Sun 23 Aug 2020 04:37:00 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Thu 17 Sep 2020 10:05:27 AM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
@@ -47,4 +47,4 @@ Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
|
||||
```
|
||||
|
||||
Once you've verified the ISO image, you're ready to proceed to our Installation guide:
|
||||
https://docs.securityonion.net/en/2.1/installation.html
|
||||
https://docs.securityonion.net/en/2.2/installation.html
|
||||
|
||||
79
files/analyst/README
Normal file
79
files/analyst/README
Normal file
@@ -0,0 +1,79 @@
|
||||
The following GUI tools are available on the analyst workstation:
|
||||
|
||||
chromium
|
||||
url: https://www.chromium.org/Home
|
||||
To run chromium, click Applications > Internet > Chromium Web Browser
|
||||
|
||||
Wireshark
|
||||
url: https://www.wireshark.org/
|
||||
To run Wireshark, click Applications > Internet > Wireshark Network Analyzer
|
||||
|
||||
NetworkMiner
|
||||
url: https://www.netresec.com
|
||||
To run NetworkMiner, click Applications > Internet > NetworkMiner
|
||||
|
||||
The following CLI tools are available on the analyst workstation:
|
||||
|
||||
bit-twist
|
||||
url: http://bittwist.sourceforge.net
|
||||
To run bit-twist, open a terminal and type: bittwist -h
|
||||
|
||||
chaosreader
|
||||
url: http://chaosreader.sourceforge.net
|
||||
To run chaosreader, open a terminal and type: chaosreader -h
|
||||
|
||||
dnsiff
|
||||
url: https://www.monkey.org/~dugsong/dsniff/
|
||||
To run dsniff, open a terminal and type: dsniff -h
|
||||
|
||||
foremost
|
||||
url: http://foremost.sourceforge.net
|
||||
To run foremost, open a terminal and type: foremost -h
|
||||
|
||||
hping3
|
||||
url: http://www.hping.org/hping3.html
|
||||
To run hping3, open a terminal and type: hping3 -h
|
||||
|
||||
netsed
|
||||
url: http://silicone.homelinux.org/projects/netsed/
|
||||
To run netsed, open a terminal and type: netsed -h
|
||||
|
||||
ngrep
|
||||
url: https://github.com/jpr5/ngrep
|
||||
To run ngrep, open a terminal and type: ngrep -h
|
||||
|
||||
scapy
|
||||
url: http://www.secdev.org/projects/scapy/
|
||||
To run scapy, open a terminal and type: scapy
|
||||
|
||||
ssldump
|
||||
url: http://www.rtfm.com/ssldump/
|
||||
To run ssldump, open a terminal and type: ssldump -h
|
||||
|
||||
sslsplit
|
||||
url: https://github.com/droe/sslsplit
|
||||
To run sslsplit, open a terminal and type: sslsplit -h
|
||||
|
||||
tcpdump
|
||||
url: http://www.tcpdump.org
|
||||
To run tcpdump, open a terminal and type: tcpdump -h
|
||||
|
||||
tcpflow
|
||||
url: https://github.com/simsong/tcpflow
|
||||
To run tcpflow, open a terminal and type: tcpflow -h
|
||||
|
||||
tcpstat
|
||||
url: https://frenchfries.net/paul/tcpstat/
|
||||
To run tcpstat, open a terminal and type: tcpstat -h
|
||||
|
||||
tcptrace
|
||||
url: http://www.tcptrace.org
|
||||
To run tcptrace, open a terminal and type: tcptrace -h
|
||||
|
||||
tcpxtract
|
||||
url: http://tcpxtract.sourceforge.net/
|
||||
To run tcpxtract, open a terminal and type: tcpxtract -h
|
||||
|
||||
whois
|
||||
url: http://www.linux.it/~md/software/
|
||||
To run whois, open a terminal and type: whois -h
|
||||
@@ -5,7 +5,7 @@
|
||||
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
|
||||
{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
|
||||
{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
|
||||
{% set ZEEKVER = salt['pillar.get']('global:zeekversion', 'COMMUNITY') %}
|
||||
{% set ZEEKVER = salt['pillar.get']('global:mdengine', 'COMMUNITY') %}
|
||||
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
|
||||
|
||||
eval:
|
||||
|
||||
@@ -26,6 +26,7 @@ firewall:
|
||||
- 4200
|
||||
- 5601
|
||||
- 6379
|
||||
- 7788
|
||||
- 8086
|
||||
- 8090
|
||||
- 9001
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
import logging
|
||||
import sys
|
||||
from time import time
|
||||
from os.path import getsize
|
||||
|
||||
allowed_functions = ['is_enabled', 'zeek']
|
||||
states_to_apply = []
|
||||
@@ -85,8 +87,21 @@ def zeek():
|
||||
else:
|
||||
zeek_restart = 0
|
||||
|
||||
__salt__['telegraf.send']('healthcheck zeek_restart=%i' % zeek_restart)
|
||||
|
||||
#__salt__['telegraf.send']('healthcheck zeek_restart=%i' % zeek_restart)
|
||||
# write out to file in /nsm/zeek/logs/ for telegraf to read for zeek restart
|
||||
try:
|
||||
if getsize("/nsm/zeek/logs/zeek_restart.log") >= 1000000:
|
||||
openmethod = "w"
|
||||
else:
|
||||
openmethod = "a"
|
||||
except FileNotFoundError:
|
||||
openmethod = "a"
|
||||
|
||||
influxtime = int(time() * 1000000000)
|
||||
with open("/nsm/zeek/logs/zeek_restart.log", openmethod) as f:
|
||||
f.write('healthcheck zeek_restart=%i %i\n' % (zeek_restart, influxtime))
|
||||
|
||||
|
||||
if calling_func == 'execute' and zeek_restart:
|
||||
apply_states()
|
||||
|
||||
|
||||
4
salt/_modules/so.py
Normal file
4
salt/_modules/so.py
Normal file
@@ -0,0 +1,4 @@
|
||||
#!py
|
||||
|
||||
def status():
|
||||
return __salt__['cmd.run']('/sbin/so-status')
|
||||
12
salt/airgap/files/yum.conf
Normal file
12
salt/airgap/files/yum.conf
Normal file
@@ -0,0 +1,12 @@
|
||||
[main]
|
||||
cachedir=/var/cache/yum/$basearch/$releasever
|
||||
keepcache=0
|
||||
debuglevel=2
|
||||
logfile=/var/log/yum.log
|
||||
exactarch=1
|
||||
obsoletes=1
|
||||
gpgcheck=1
|
||||
plugins=1
|
||||
installonly_limit=2
|
||||
bugtracker_url=http://bugs.centos.org/set_project.php?project_id=23&ref=http://bugs.centos.org/bug_report_page.php?category=yum
|
||||
distroverpkg=centos-release
|
||||
60
salt/airgap/init.sls
Normal file
60
salt/airgap/init.sls
Normal file
@@ -0,0 +1,60 @@
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
airgapyum:
|
||||
file.managed:
|
||||
- name: /etc/yum/yum.conf
|
||||
- source: salt://airgap/files/yum.conf
|
||||
|
||||
airgap_repo:
|
||||
pkgrepo.managed:
|
||||
- humanname: Airgap Repo
|
||||
- baseurl: https://{{ MANAGER }}/repo
|
||||
- gpgcheck: 0
|
||||
- sslverify: 0
|
||||
|
||||
agbase:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-Base.repo
|
||||
|
||||
agcr:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-CR.repo
|
||||
|
||||
agdebug:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-Debuginfo.repo
|
||||
|
||||
agfasttrack:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-fasttrack.repo
|
||||
|
||||
agmedia:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-Media.repo
|
||||
|
||||
agsources:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-Sources.repo
|
||||
|
||||
agvault:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-Vault.repo
|
||||
|
||||
agkernel:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/CentOS-x86_64-kernel.repo
|
||||
|
||||
agepel:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/epel.repo
|
||||
|
||||
agtesting:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/epel-testing.repo
|
||||
|
||||
agssrepo:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/saltstack.repo
|
||||
|
||||
agwazrepo:
|
||||
file.absent:
|
||||
- name: /etc/yum.repos.d/wazuh.repo
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'ca' in top_states %}
|
||||
|
||||
{% set manager = salt['grains.get']('master') %}
|
||||
/etc/salt/minion.d/signing_policies.conf:
|
||||
file.managed:
|
||||
@@ -51,4 +56,12 @@ cakeyperms:
|
||||
- replace: False
|
||||
- name: /etc/pki/ca.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
- group: 939
|
||||
|
||||
{% else %}
|
||||
|
||||
ca_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: ca_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
2
salt/common/cron/sensor-rotate
Normal file
2
salt/common/cron/sensor-rotate
Normal file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
/usr/sbin/logrotate -f /opt/so/conf/sensor-rotate.conf > /dev/null 2>&1
|
||||
10
salt/common/files/sensor-rotate.conf
Normal file
10
salt/common/files/sensor-rotate.conf
Normal file
@@ -0,0 +1,10 @@
|
||||
/opt/so/log/sensor_clean.log
|
||||
{
|
||||
daily
|
||||
rotate 2
|
||||
missingok
|
||||
nocompress
|
||||
create
|
||||
sharedscripts
|
||||
endscript
|
||||
}
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'common' in top_states %}
|
||||
|
||||
{% set role = grains.id.split('_') | last %}
|
||||
|
||||
# Remove variables.txt from /tmp - This is temp
|
||||
@@ -88,7 +93,7 @@ heldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.2.13-2
|
||||
- docker-ce: 5:19.03.9~3-0~ubuntu-bionic
|
||||
- docker-ce: 5:19.03.12~3-0~ubuntu-bionic
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
|
||||
@@ -124,7 +129,7 @@ heldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.2.13-3.2.el7
|
||||
- docker-ce: 3:19.03.11-3.el7
|
||||
- docker-ce: 3:19.03.12-3.el7
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% endif %}
|
||||
@@ -163,4 +168,39 @@ utilsyncscripts:
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
sensorrotatescript:
|
||||
file.managed:
|
||||
- name: /usr/local/bin/sensor-rotate
|
||||
- source: salt://common/cron/sensor-rotate
|
||||
- mode: 755
|
||||
|
||||
sensorrotateconf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/sensor-rotate.conf
|
||||
- source: salt://common/files/sensor-rotate.conf
|
||||
- mode: 644
|
||||
|
||||
/usr/local/bin/sensor-rotate:
|
||||
cron.present:
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '0'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
{% endif %}
|
||||
|
||||
# Make sure Docker is always running
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
|
||||
{% else %}
|
||||
|
||||
common_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: common_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -2,4 +2,4 @@
|
||||
'containers': [
|
||||
'so-zeek'
|
||||
]
|
||||
} %}
|
||||
} %}
|
||||
@@ -5,6 +5,9 @@
|
||||
# to the list predefined by the role / minion id affix
|
||||
{% macro append_containers(pillar_name, k, compare )%}
|
||||
{% if salt['pillar.get'](pillar_name~':'~k, {}) != compare %}
|
||||
{% if k == 'enabled' %}
|
||||
{% set k = pillar_name %}
|
||||
{% endif %}
|
||||
{% from 'common/maps/'~k~'.map.jinja' import docker as d with context %}
|
||||
{% for li in d['containers'] %}
|
||||
{{ docker['containers'].append(li) }}
|
||||
@@ -21,7 +24,7 @@
|
||||
{% if role in ['eval', 'managersearch', 'manager', 'standalone'] %}
|
||||
{{ append_containers('manager', 'grafana', 0) }}
|
||||
{{ append_containers('global', 'fleet_manager', 0) }}
|
||||
{{ append_containers('manager', 'wazuh', 0) }}
|
||||
{{ append_containers('global', 'wazuh', 0) }}
|
||||
{{ append_containers('manager', 'thehive', 0) }}
|
||||
{{ append_containers('manager', 'playbook', 0) }}
|
||||
{{ append_containers('manager', 'freq', 0) }}
|
||||
@@ -29,11 +32,11 @@
|
||||
{% endif %}
|
||||
|
||||
{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %}
|
||||
{{ append_containers('global', 'strelka', 0) }}
|
||||
{{ append_containers('strelka', 'enabled', 0) }}
|
||||
{% endif %}
|
||||
|
||||
{% if role in ['heavynode', 'standalone'] %}
|
||||
{{ append_containers('global', 'zeekversion', 'SURICATA') }}
|
||||
{{ append_containers('global', 'mdengine', 'SURICATA') }}
|
||||
{% endif %}
|
||||
|
||||
{% if role == 'searchnode' %}
|
||||
@@ -41,5 +44,5 @@
|
||||
{% endif %}
|
||||
|
||||
{% if role == 'sensor' %}
|
||||
{{ append_containers('global', 'zeekversion', 'SURICATA') }}
|
||||
{{ append_containers('global', 'mdengine', 'SURICATA') }}
|
||||
{% endif %}
|
||||
@@ -19,14 +19,29 @@ IMAGEREPO=securityonion
|
||||
|
||||
# Check for prerequisites
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Define a banner to separate sections
|
||||
banner="========================================================================="
|
||||
|
||||
header() {
|
||||
echo
|
||||
printf '%s\n' "$banner" "$*" "$banner"
|
||||
echo
|
||||
printf '%s\n' "$banner" "$*" "$banner"
|
||||
}
|
||||
|
||||
lookup_pillar() {
|
||||
key=$1
|
||||
cat /opt/so/saltstack/local/pillar/global.sls | grep $key | awk '{print $2}'
|
||||
}
|
||||
|
||||
lookup_pillar_secret() {
|
||||
key=$1
|
||||
cat /opt/so/saltstack/local/pillar/secrets.sls | grep $key | awk '{print $2}'
|
||||
}
|
||||
|
||||
check_container() {
|
||||
docker ps | grep "$1:" > /dev/null 2>&1
|
||||
return $?
|
||||
}
|
||||
54
salt/common/tools/sbin/so-cortex-user-add
Executable file
54
salt/common/tools/sbin/so-cortex-user-add
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <new-user-name>"
|
||||
echo ""
|
||||
echo "Adds a new user to Cortex. The new password will be read from STDIN."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
CORTEX_KEY=$(lookup_pillar cortexkey)
|
||||
CORTEX_IP=$(lookup_pillar managerip)
|
||||
CORTEX_ORG_NAME=$(lookup_pillar cortexorgname)
|
||||
CORTEX_USER=$USER
|
||||
|
||||
# Read password for new user from stdin
|
||||
test -t 0
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Enter new password:"
|
||||
fi
|
||||
read -s CORTEX_PASS
|
||||
|
||||
# Create new user in Cortex
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }")
|
||||
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully added user to Cortex."
|
||||
else
|
||||
echo "Unable to add user to Cortex; user might already exist."
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
|
||||
57
salt/common/tools/sbin/so-cortex-user-enable
Executable file
57
salt/common/tools/sbin/so-cortex-user-enable
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <user-name> <true|false>"
|
||||
echo ""
|
||||
echo "Enables or disables a user in Cortex."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
CORTEX_KEY=$(lookup_pillar cortexkey)
|
||||
CORTEX_IP=$(lookup_pillar managerip)
|
||||
CORTEX_USER=$USER
|
||||
|
||||
case "${2^^}" in
|
||||
FALSE | NO | 0)
|
||||
CORTEX_STATUS=Locked
|
||||
;;
|
||||
TRUE | YES | 1)
|
||||
CORTEX_STATUS=Ok
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
||||
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }")
|
||||
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully updated user in Cortex."
|
||||
else
|
||||
echo "Failed to update user in Cortex."
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
|
||||
@@ -1,4 +1,6 @@
|
||||
{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
|
||||
{%- set mainint = salt['pillar.get']('host:mainint') %}
|
||||
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
|
||||
|
||||
#!/bin/bash
|
||||
# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
|
||||
#
|
||||
@@ -16,7 +18,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
default_conf_dir=/opt/so/conf
|
||||
ELASTICSEARCH_HOST="{{ MANAGERIP}}"
|
||||
ELASTICSEARCH_HOST="{{ MYIP }}"
|
||||
ELASTICSEARCH_PORT=9200
|
||||
#ELASTICSEARCH_AUTH=""
|
||||
|
||||
|
||||
@@ -17,6 +17,28 @@
|
||||
. /usr/sbin/so-common
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
cat << EOF
|
||||
This program will switch from the open source version of the Elastic Stack to the Features version licensed under the Elastic license.
|
||||
If you proceed, then we will download new Docker images and restart services.
|
||||
|
||||
Please review the Elastic license:
|
||||
https://raw.githubusercontent.com/elastic/elasticsearch/master/licenses/ELASTIC-LICENSE.txt
|
||||
|
||||
Please also note that, if you have a distributed deployment and continue with this change, Elastic traffic between nodes will change from encrypted to cleartext!
|
||||
(We expect to support Elastic Features Security at some point in the future.)
|
||||
|
||||
Do you agree to the terms of the Elastic license and understand the note about encryption?
|
||||
|
||||
If so, type AGREE to accept the Elastic license and continue. Otherwise, just press Enter to exit this program without making any changes.
|
||||
EOF
|
||||
|
||||
read INPUT
|
||||
if [ "$INPUT" != "AGREE" ]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
echo "Please wait while switching to Elastic Features."
|
||||
|
||||
manager_check() {
|
||||
# Check to see if this is a manager
|
||||
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
|
||||
|
||||
59
salt/common/tools/sbin/so-fleet-user-add
Executable file
59
salt/common/tools/sbin/so-fleet-user-add
Executable file
@@ -0,0 +1,59 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <new-user-name>"
|
||||
echo ""
|
||||
echo "Adds a new user to Fleet. The new password will be read from STDIN."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
MYSQL_PASS=$(lookup_pillar_secret mysql)
|
||||
FLEET_IP=$(lookup_pillar fleet_ip)
|
||||
FLEET_USER=$USER
|
||||
|
||||
# Read password for new user from stdin
|
||||
test -t 0
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Enter new password:"
|
||||
fi
|
||||
read -s FLEET_PASS
|
||||
|
||||
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Failed to generate Fleet password hash."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
|
||||
"INSERT INTO users (password,salt,username,email,admin,enabled) VALUES ('$FLEET_HASH','','$FLEET_USER','$FLEET_USER',1,1)" 2>&1)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully added user to Fleet."
|
||||
else
|
||||
echo "Unable to add user to Fleet; user might already exist."
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
58
salt/common/tools/sbin/so-fleet-user-enable
Executable file
58
salt/common/tools/sbin/so-fleet-user-enable
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <user-name>"
|
||||
echo ""
|
||||
echo "Enables or disables a user in Fleet."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
MYSQL_PASS=$(lookup_pillar_secret mysql)
|
||||
FLEET_IP=$(lookup_pillar fleet_ip)
|
||||
FLEET_USER=$USER
|
||||
|
||||
case "${2^^}" in
|
||||
FALSE | NO | 0)
|
||||
FLEET_STATUS=0
|
||||
;;
|
||||
TRUE | YES | 1)
|
||||
FLEET_STATUS=1
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
||||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
|
||||
"UPDATE users SET enabled=$FLEET_STATUS WHERE username='$FLEET_USER'" 2>&1)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully updated user in Fleet."
|
||||
else
|
||||
echo "Failed to update user in Fleet."
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
@@ -19,18 +19,22 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
echo $banner
|
||||
printf "Restarting $1...\n\nThis could take a while if another Salt job is running. \nRun this command with --force to stop all Salt jobs before proceeding.\n"
|
||||
echo $banner
|
||||
if [ $# -ge 1 ]; then
|
||||
|
||||
if [ "$2" = "--force" ]
|
||||
then
|
||||
printf "\nForce-stopping all Salt jobs before proceeding\n\n"
|
||||
salt-call saltutil.kill_all_jobs
|
||||
echo $banner
|
||||
printf "Restarting $1...\n\nThis could take a while if another Salt job is running. \nRun this command with --force to stop all Salt jobs before proceeding.\n"
|
||||
echo $banner
|
||||
|
||||
if [ "$2" = "--force" ]; then
|
||||
printf "\nForce-stopping all Salt jobs before proceeding\n\n"
|
||||
salt-call saltutil.kill_all_jobs
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
"cortex") docker stop so-thehive-cortex so-thehive && docker rm so-thehive-cortex so-thehive && salt-call state.apply hive queue=True;;
|
||||
"steno") docker stop so-steno && docker rm so-steno && salt-call state.apply pcap queue=True;;
|
||||
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
|
||||
esac
|
||||
else
|
||||
echo -e "\nPlease provide an argument by running like so-restart $component, or by using the component-specific script.\nEx. so-restart filebeat, or so-filebeat-restart\n"
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
"cortex") docker stop so-thehive-cortex so-thehive && docker rm so-thehive-cortex so-thehive && salt-call state.apply hive queue=True;;
|
||||
"steno") docker stop so-steno && docker rm so-steno && salt-call state.apply pcap queue=True;;
|
||||
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
|
||||
esac
|
||||
|
||||
@@ -115,7 +115,5 @@ if [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; then
|
||||
clean
|
||||
CUR_USAGE=$(df -P $SENSOR_DIR | tail -1 | awk '{print $5}' | tr -d %)
|
||||
done
|
||||
else
|
||||
echo "$(date) - Current usage value of $CUR_USAGE not greater than CRIT_DISK_USAGE value of $CRIT_DISK_USAGE..." >> $LOG
|
||||
fi
|
||||
|
||||
|
||||
@@ -19,18 +19,21 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
echo $banner
|
||||
printf "Starting $1...\n\nThis could take a while if another Salt job is running. \nRun this command with --force to stop all Salt jobs before proceeding.\n"
|
||||
echo $banner
|
||||
if [ $# -ge 1 ]; then
|
||||
echo $banner
|
||||
printf "Starting $1...\n\nThis could take a while if another Salt job is running. \nRun this command with --force to stop all Salt jobs before proceeding.\n"
|
||||
echo $banner
|
||||
|
||||
if [ "$2" = "--force" ]
|
||||
then
|
||||
printf "\nForce-stopping all Salt jobs before proceeding\n\n"
|
||||
salt-call saltutil.kill_all_jobs
|
||||
if [ "$2" = "--force" ]; then
|
||||
printf "\nForce-stopping all Salt jobs before proceeding\n\n"
|
||||
salt-call saltutil.kill_all_jobs
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
"all") salt-call state.highstate queue=True;;
|
||||
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
|
||||
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
||||
esac
|
||||
else
|
||||
echo -e "\nPlease provide an argument by running like so-start $component, or by using the component-specific script.\nEx. so-start filebeat, or so-filebeat-start\n"
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
"all") salt-call state.highstate queue=True;;
|
||||
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
|
||||
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
||||
esac
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{%- from 'common/maps/so-status.map.jinja' import docker with context %}
|
||||
{%- set container_list = docker['containers'] | sort %}
|
||||
{%- set container_list = docker['containers'] | sort | unique %}
|
||||
|
||||
if ! [ "$(id -u)" = 0 ]; then
|
||||
echo "This command must be run as root"
|
||||
@@ -27,6 +27,7 @@ ERROR_STRING="ERROR"
|
||||
SUCCESS_STRING="OK"
|
||||
PENDING_STRING="PENDING"
|
||||
MISSING_STRING='MISSING'
|
||||
CALLER=$(ps -o comm= $PPID)
|
||||
declare -a BAD_STATUSES=("removing" "paused" "exited" "dead")
|
||||
declare -a PENDING_STATUSES=("paused" "created" "restarting")
|
||||
declare -a GOOD_STATUSES=("running")
|
||||
@@ -71,9 +72,9 @@ compare_lists() {
|
||||
# {% endraw %}
|
||||
|
||||
create_expected_container_list() {
|
||||
{% for item in container_list%}
|
||||
{% for item in container_list -%}
|
||||
expected_container_list+=("{{ item }}")
|
||||
{% endfor %}
|
||||
{% endfor -%}
|
||||
}
|
||||
|
||||
populate_container_lists() {
|
||||
@@ -93,7 +94,7 @@ populate_container_lists() {
|
||||
for line in "${docker_raw_list[@]}"; do
|
||||
container_name="$( echo $line | sed -e 's/Name:\(.*\),State:\(.*\)/\1/' )" # Get value in the first search group (container names)
|
||||
container_state="$( echo $line | sed -e 's/Name:\(.*\),State:\(.*\)/\2/' )" # Get value in the second search group (container states)
|
||||
|
||||
|
||||
temp_container_name_list+=( "${container_name}" )
|
||||
temp_container_state_list+=( "${container_state}" )
|
||||
done
|
||||
@@ -149,33 +150,78 @@ print_line() {
|
||||
printf "%s \n" " ]"
|
||||
}
|
||||
|
||||
main() {
|
||||
local focus_color="\e[1;34m"
|
||||
printf "\n"
|
||||
printf "${focus_color}%b\e[0m" "Checking Docker status\n\n"
|
||||
non_term_print_line() {
|
||||
local service_name=${1}
|
||||
local service_state="$( parse_status ${2} )"
|
||||
|
||||
systemctl is-active --quiet docker
|
||||
if [[ $? = 0 ]]; then
|
||||
print_line "Docker" "running"
|
||||
else
|
||||
print_line "Docker" "exited"
|
||||
fi
|
||||
local PADDING_CONSTANT=10
|
||||
|
||||
populate_container_lists
|
||||
|
||||
printf "\n"
|
||||
printf "${focus_color}%b\e[0m" "Checking container statuses\n\n"
|
||||
|
||||
local num_containers=${#container_name_list[@]}
|
||||
|
||||
for i in $(seq 0 $(($num_containers - 1 ))); do
|
||||
print_line ${container_name_list[$i]} ${container_state_list[$i]}
|
||||
printf " $service_name "
|
||||
for i in $(seq 0 $(( 40 - $PADDING_CONSTANT - ${#service_name} - ${#service_state} ))); do
|
||||
printf "-"
|
||||
done
|
||||
printf " [ "
|
||||
printf "$service_state"
|
||||
printf "%s \n" " ]"
|
||||
}
|
||||
|
||||
printf "\n"
|
||||
main() {
|
||||
|
||||
# if running from salt
|
||||
if [ "$CALLER" == 'salt-call' ] || [ "$CALLER" == 'salt-minion' ]; then
|
||||
printf "\n"
|
||||
printf "Checking Docker status\n\n"
|
||||
|
||||
systemctl is-active --quiet docker
|
||||
if [[ $? = 0 ]]; then
|
||||
non_term_print_line "Docker" "running"
|
||||
else
|
||||
non_term_print_line "Docker" "exited"
|
||||
fi
|
||||
|
||||
populate_container_lists
|
||||
|
||||
printf "\n"
|
||||
printf "Checking container statuses\n\n"
|
||||
|
||||
local num_containers=${#container_name_list[@]}
|
||||
|
||||
for i in $(seq 0 $(($num_containers - 1 ))); do
|
||||
non_term_print_line ${container_name_list[$i]} ${container_state_list[$i]}
|
||||
done
|
||||
|
||||
printf "\n"
|
||||
|
||||
# else if running from a terminal
|
||||
else
|
||||
|
||||
local focus_color="\e[1;34m"
|
||||
printf "\n"
|
||||
printf "${focus_color}%b\e[0m" "Checking Docker status\n\n"
|
||||
|
||||
systemctl is-active --quiet docker
|
||||
if [[ $? = 0 ]]; then
|
||||
print_line "Docker" "running"
|
||||
else
|
||||
print_line "Docker" "exited"
|
||||
fi
|
||||
|
||||
populate_container_lists
|
||||
|
||||
printf "\n"
|
||||
printf "${focus_color}%b\e[0m" "Checking container statuses\n\n"
|
||||
|
||||
local num_containers=${#container_name_list[@]}
|
||||
|
||||
for i in $(seq 0 $(($num_containers - 1 ))); do
|
||||
print_line ${container_name_list[$i]} ${container_state_list[$i]}
|
||||
done
|
||||
|
||||
printf "\n"
|
||||
fi
|
||||
}
|
||||
|
||||
# {% endraw %}
|
||||
|
||||
|
||||
main
|
||||
main
|
||||
@@ -19,11 +19,15 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
echo $banner
|
||||
printf "Stopping $1...\n"
|
||||
echo $banner
|
||||
if [ $# -ge 1 ]; then
|
||||
echo $banner
|
||||
printf "Stopping $1...\n"
|
||||
echo $banner
|
||||
|
||||
case $1 in
|
||||
*) docker stop so-$1 ; docker rm so-$1 ;;
|
||||
esac
|
||||
case $1 in
|
||||
*) docker stop so-$1 ; docker rm so-$1 ;;
|
||||
esac
|
||||
else
|
||||
echo -e "\nPlease provide an argument by running like so-stop $component, or by using the component-specific script.\nEx. so-stop filebeat, or so-filebeat-stop\n"
|
||||
fi
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Usage: so-tcpreplay "/opt/so/samples/*"
|
||||
# Usage: so-tcpreplay "/opt/samples/*"
|
||||
|
||||
REPLAY_ENABLED=$(docker images | grep so-tcpreplay)
|
||||
REPLAY_RUNNING=$(docker ps | grep so-tcpreplay)
|
||||
|
||||
43
salt/common/tools/sbin/so-test
Normal file
43
salt/common/tools/sbin/so-test
Normal file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Usage: so-test
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
REPLAY_ENABLED=$(docker images | grep so-tcpreplay)
|
||||
REPLAY_RUNNING=$(docker ps | grep so-tcpreplay)
|
||||
|
||||
if [ "$REPLAY_ENABLED" != "" ] && [ "$REPLAY_RUNNING" != "" ]; then
|
||||
docker cp so-tcpreplay:/opt/samples /opt/samples
|
||||
docker exec -it so-tcpreplay /usr/local/bin/tcpreplay -i bond0 -M10 /opt/samples/*
|
||||
echo
|
||||
echo "PCAP's have been replayed - it is normal to see some warnings."
|
||||
echo
|
||||
else
|
||||
echo "Replay functionality not enabled! Enabling Now...."
|
||||
echo
|
||||
echo "Note that you will need internet access to download the appropriate components"
|
||||
/usr/sbin/so-start tcpreplay
|
||||
echo "Replay functionality enabled. Replaying PCAPs Now...."
|
||||
docker cp so-tcpreplay:/opt/samples /opt/samples
|
||||
docker exec -it so-tcpreplay /usr/local/bin/tcpreplay -i bond0 -M10 /opt/samples/*
|
||||
echo
|
||||
echo "PCAP's have been replayed - it is normal to see some warnings."
|
||||
echo
|
||||
fi
|
||||
|
||||
52
salt/common/tools/sbin/so-thehive-user-add
Executable file
52
salt/common/tools/sbin/so-thehive-user-add
Executable file
@@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <new-user-name>"
|
||||
echo ""
|
||||
echo "Adds a new user to TheHive. The new password will be read from STDIN."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
THEHIVE_KEY=$(lookup_pillar hivekey)
|
||||
THEHIVE_IP=$(lookup_pillar managerip)
|
||||
THEHIVE_USER=$USER
|
||||
|
||||
# Read password for new user from stdin
|
||||
test -t 0
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Enter new password:"
|
||||
fi
|
||||
read -s THEHIVE_PASS
|
||||
|
||||
# Create new user in TheHive
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" "https://$THEHIVE_IP/thehive/api/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
|
||||
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully added user to TheHive."
|
||||
else
|
||||
echo "Unable to add user to TheHive; user might already exist."
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
57
salt/common/tools/sbin/so-thehive-user-enable
Executable file
57
salt/common/tools/sbin/so-thehive-user-enable
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <user-name> <true|false>"
|
||||
echo ""
|
||||
echo "Enables or disables a user in thehive."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
THEHIVE_KEY=$(lookup_pillar hivekey)
|
||||
THEHIVE_IP=$(lookup_pillar managerip)
|
||||
THEHIVE_USER=$USER
|
||||
|
||||
case "${2^^}" in
|
||||
FALSE | NO | 0)
|
||||
THEHIVE_STATUS=Locked
|
||||
;;
|
||||
TRUE | YES | 1)
|
||||
THEHIVE_STATUS=Ok
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
||||
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" "https://$THEHIVE_IP/thehive/api/user/${THEHIVE_USER}" -d "{\"status\":\"${THEHIVE_STATUS}\" }")
|
||||
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully updated user in thehive."
|
||||
else
|
||||
echo "Failed to update user in thehive."
|
||||
echo "$resp"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
@@ -8,26 +8,16 @@
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
got_root() {
|
||||
|
||||
# Make sure you are root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# Make sure the user is root
|
||||
got_root
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [[ $# < 1 || $# > 2 ]]; then
|
||||
echo "Usage: $0 <list|add|update|delete|validate|valemail|valpass> [email]"
|
||||
echo "Usage: $0 <list|add|update|enable|disable|validate|valemail|valpass> [email]"
|
||||
echo ""
|
||||
echo " list: Lists all user email addresses currently defined in the identity system"
|
||||
echo " add: Adds a new user to the identity system; requires 'email' parameter"
|
||||
echo " update: Updates a user's password; requires 'email' parameter"
|
||||
echo " delete: Deletes an existing user; requires 'email' parameter"
|
||||
echo " enable: Enables a user; requires 'email' parameter"
|
||||
echo " disable: Disables a user; requires 'email' parameter"
|
||||
echo " validate: Validates that the given email address and password are acceptable for defining a new user; requires 'email' parameter"
|
||||
echo " valemail: Validates that the given email address is acceptable for defining a new user; requires 'email' parameter"
|
||||
echo " valpass: Validates that a password is acceptable for defining a new user"
|
||||
@@ -74,7 +64,7 @@ function findIdByEmail() {
|
||||
email=$1
|
||||
|
||||
response=$(curl -Ss ${kratosUrl}/identities)
|
||||
identityId=$(echo "${response}" | jq ".[] | select(.addresses[0].value == \"$email\") | .id")
|
||||
identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
|
||||
echo $identityId
|
||||
}
|
||||
|
||||
@@ -124,7 +114,7 @@ function listUsers() {
|
||||
response=$(curl -Ss ${kratosUrl}/identities)
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
echo "${response}" | jq -r ".[] | .addresses[0].value" | sort
|
||||
echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
|
||||
}
|
||||
|
||||
function createUser() {
|
||||
@@ -133,17 +123,8 @@ function createUser() {
|
||||
now=$(date -u +%FT%TZ)
|
||||
addUserJson=$(cat <<EOF
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"expires_at": "2099-01-31T12:00:00Z",
|
||||
"value": "${email}",
|
||||
"verified": true,
|
||||
"verified_at": "${now}",
|
||||
"via": "so-add-user"
|
||||
}
|
||||
],
|
||||
"traits": {"email":"${email}"},
|
||||
"traits_schema_id": "default"
|
||||
"schema_id": "default"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
@@ -163,6 +144,36 @@ EOF
|
||||
updatePassword $identityId
|
||||
}
|
||||
|
||||
function updateStatus() {
|
||||
email=$1
|
||||
status=$2
|
||||
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
response=$(curl -Ss "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
|
||||
if [[ "$status" == "locked" ]]; then
|
||||
config=$(echo $oldConfig | sed -e 's/hashed/locked/')
|
||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to lock credential record"
|
||||
|
||||
echo "delete from sessions where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to invalidate sessions"
|
||||
else
|
||||
config=$(echo $oldConfig | sed -e 's/locked/hashed/')
|
||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to unlock credential record"
|
||||
fi
|
||||
|
||||
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
|
||||
response=$(curl -Ss -XPUT ${kratosUrl}/identities/$identityId -d "$updatedJson")
|
||||
[[ $? != 0 ]] && fail "Unable to mark user as locked"
|
||||
|
||||
}
|
||||
|
||||
function updateUser() {
|
||||
email=$1
|
||||
|
||||
@@ -189,7 +200,9 @@ case "${operation}" in
|
||||
|
||||
validateEmail "$email"
|
||||
createUser "$email"
|
||||
echo "Successfully added new user"
|
||||
echo "Successfully added new user to SOC"
|
||||
check_container thehive && echo $password | so-thehive-user-add "$email"
|
||||
check_container fleet && echo $password | so-fleet-user-add "$email"
|
||||
;;
|
||||
|
||||
"list")
|
||||
@@ -205,12 +218,34 @@ case "${operation}" in
|
||||
echo "Successfully updated user"
|
||||
;;
|
||||
|
||||
"enable")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
updateStatus "$email" 'active'
|
||||
echo "Successfully enabled user"
|
||||
check_container thehive && so-thehive-user-enable "$email" true
|
||||
check_container fleet && so-fleet-user-enable "$email" true
|
||||
;;
|
||||
|
||||
"disable")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
updateStatus "$email" 'locked'
|
||||
echo "Successfully disabled user"
|
||||
check_container thehive && so-thehive-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-enable "$email" false
|
||||
;;
|
||||
|
||||
"delete")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
deleteUser "$email"
|
||||
echo "Successfully deleted user"
|
||||
echo "Successfully deleted user"
|
||||
check_container thehive && so-thehive-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-enable "$email" false
|
||||
;;
|
||||
|
||||
"validate")
|
||||
|
||||
2
salt/common/tools/sbin/so-user-disable
Executable file
2
salt/common/tools/sbin/so-user-disable
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
so-user disable $*
|
||||
2
salt/common/tools/sbin/so-user-enable
Executable file
2
salt/common/tools/sbin/so-user-enable
Executable file
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
so-user enable $*
|
||||
22
salt/common/tools/sbin/so-wazuh-agent-manage
Executable file
22
salt/common/tools/sbin/so-wazuh-agent-manage
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
if docker ps |grep so-wazuh >/dev/null 2>&1; then
|
||||
docker exec -it so-wazuh /var/ossec/bin/manage_agents "$@"
|
||||
else
|
||||
echo "Wazuh manager is not running. Please start it with so-wazuh-start."
|
||||
fi
|
||||
22
salt/common/tools/sbin/so-wazuh-agent-upgrade
Executable file
22
salt/common/tools/sbin/so-wazuh-agent-upgrade
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
if docker ps |grep so-wazuh >/dev/null 2>&1; then
|
||||
docker exec -it so-wazuh /var/ossec/bin/agent_upgrade "$@"
|
||||
else
|
||||
echo "Wazuh manager is not running. Please start it with so-wazuh-start."
|
||||
fi
|
||||
@@ -14,10 +14,10 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
|
||||
clone_dir="/tmp"
|
||||
output_dir="/opt/so/saltstack/default/salt/strelka/rules"
|
||||
#mkdir -p $output_dir
|
||||
mkdir -p $output_dir
|
||||
repos="$output_dir/repos.txt"
|
||||
ignorefile="$output_dir/ignore.txt"
|
||||
|
||||
@@ -25,8 +25,70 @@ deletecounter=0
|
||||
newcounter=0
|
||||
updatecounter=0
|
||||
|
||||
gh_status=$(curl -s -o /dev/null -w "%{http_code}" http://github.com)
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
|
||||
|
||||
clone_dir="/nsm/repo/rules/strelka"
|
||||
repo_name="signature-base"
|
||||
mkdir -p /opt/so/saltstack/default/salt/strelka/rules/signature-base
|
||||
|
||||
[ -f $clone_dir/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
|
||||
|
||||
# Copy over rules
|
||||
for i in $(find $clone_dir/yara -name "*.yar*"); do
|
||||
rule_name=$(echo $i | awk -F '/' '{print $NF}')
|
||||
repo_sum=$(sha256sum $i | awk '{print $1}')
|
||||
|
||||
# Check rules against those in ignore list -- don't copy if ignored.
|
||||
if ! grep -iq $rule_name $ignorefile; then
|
||||
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
|
||||
|
||||
# For existing rules, check to see if they need to be updated, by comparing checksums
|
||||
if [ $existing_rules -gt 0 ];then
|
||||
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
|
||||
if [ "$repo_sum" != "$local_sum" ]; then
|
||||
echo "Checksums do not match!"
|
||||
echo "Updating $rule_name..."
|
||||
cp $i $output_dir/$repo_name;
|
||||
((updatecounter++))
|
||||
fi
|
||||
else
|
||||
# If rule doesn't exist already, we'll add it
|
||||
echo "Adding new rule: $rule_name..."
|
||||
cp $i $output_dir/$repo_name
|
||||
((newcounter++))
|
||||
fi
|
||||
fi;
|
||||
done
|
||||
|
||||
# Check to see if we have any old rules that need to be removed
|
||||
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
|
||||
is_repo_rule=$(find $clone_dir -name "$i" | wc -l)
|
||||
if [ $is_repo_rule -eq 0 ]; then
|
||||
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
|
||||
rm $output_dir/$repo_name/$i
|
||||
((deletecounter++))
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Done!"
|
||||
|
||||
if [ "$newcounter" -gt 0 ];then
|
||||
echo "$newcounter new rules added."
|
||||
fi
|
||||
|
||||
if [ "$updatecounter" -gt 0 ];then
|
||||
echo "$updatecounter rules updated."
|
||||
fi
|
||||
|
||||
if [ "$deletecounter" -gt 0 ];then
|
||||
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
|
||||
fi
|
||||
|
||||
{% else %}
|
||||
|
||||
gh_status=$(curl -s -o /dev/null -w "%{http_code}" http://github.com)
|
||||
clone_dir="/tmp"
|
||||
if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
|
||||
|
||||
while IFS= read -r repo; do
|
||||
@@ -68,7 +130,7 @@ if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
|
||||
fi
|
||||
fi;
|
||||
done
|
||||
|
||||
|
||||
# Check to see if we have any old rules that need to be removed
|
||||
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
|
||||
is_repo_rule=$(find $clone_dir/$repo_name -name "$i" | wc -l)
|
||||
@@ -100,3 +162,4 @@ else
|
||||
echo "No connectivity to Github...exiting..."
|
||||
exit 1
|
||||
fi
|
||||
{%- endif -%}
|
||||
@@ -38,7 +38,8 @@ manager_check() {
|
||||
|
||||
clean_dockers() {
|
||||
# Place Holder for cleaning up old docker images
|
||||
echo ""
|
||||
echo "Trying to clean up old dockers."
|
||||
docker system prune -a -f
|
||||
}
|
||||
|
||||
clone_to_tmp() {
|
||||
@@ -121,45 +122,80 @@ pillar_changes() {
|
||||
# This function is to add any new pillar items if needed.
|
||||
echo "Checking to see if pillar changes are needed."
|
||||
|
||||
# Move baseurl in global.sls
|
||||
if [[ "$INSTALLEDVERSION" =~ rc.1 ]]; then
|
||||
# Move the static file to global.sls
|
||||
echo "Migrating static.sls to global.sls"
|
||||
mv -v /opt/so/saltstack/local/pillar/static.sls /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1
|
||||
sed -i '1c\global:' /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1
|
||||
[[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2
|
||||
[[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3
|
||||
|
||||
# Moving baseurl from minion sls file to inside global.sls
|
||||
local line=$(grep '^ url_base:' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls)
|
||||
sed -i '/^ url_base:/d' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls;
|
||||
sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls;
|
||||
}
|
||||
|
||||
# Adding play values to the global.sls
|
||||
local HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)
|
||||
local CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)
|
||||
sed -i "/^global:/a \\ hiveplaysecret: $HIVEPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
|
||||
sed -i "/^global:/a \\ cortexplaysecret: $CORTEXPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
|
||||
rc1_to_rc2() {
|
||||
|
||||
# Move storage nodes to hostname for SSL
|
||||
# Get a list we can use:
|
||||
grep -A1 searchnode /opt/so/saltstack/local/pillar/data/nodestab.sls | grep -v '\-\-' | sed '$!N;s/\n/ /' | awk '{print $1,$3}' | awk '/_searchnode:/{gsub(/\_searchnode:/, "_searchnode"); print}' >/tmp/nodes.txt
|
||||
# Remove the nodes from cluster settings
|
||||
while read p; do
|
||||
local NAME=$(echo $p | awk '{print $1}')
|
||||
local IP=$(echo $p | awk '{print $2}')
|
||||
echo "Removing the old cross cluster config for $NAME"
|
||||
curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_cluster/settings -d '{"persistent":{"cluster":{"remote":{"'$NAME'":{"skip_unavailable":null,"seeds":null}}}}}'
|
||||
done </tmp/nodes.txt
|
||||
# Add the nodes back using hostname
|
||||
while read p; do
|
||||
local NAME=$(echo $p | awk '{print $1}')
|
||||
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
|
||||
local IP=$(echo $p | awk '{print $2}')
|
||||
echo "Adding the new cross cluster config for $NAME"
|
||||
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
|
||||
done </tmp/nodes.txt
|
||||
# Move the static file to global.sls
|
||||
echo "Migrating static.sls to global.sls"
|
||||
mv -v /opt/so/saltstack/local/pillar/static.sls /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1
|
||||
sed -i '1c\global:' /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1
|
||||
|
||||
# Moving baseurl from minion sls file to inside global.sls
|
||||
local line=$(grep '^ url_base:' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls)
|
||||
sed -i '/^ url_base:/d' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls;
|
||||
sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls;
|
||||
|
||||
fi
|
||||
# Adding play values to the global.sls
|
||||
local HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)
|
||||
local CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)
|
||||
sed -i "/^global:/a \\ hiveplaysecret: $HIVEPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
|
||||
sed -i "/^global:/a \\ cortexplaysecret: $CORTEXPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
|
||||
|
||||
# Move storage nodes to hostname for SSL
|
||||
# Get a list we can use:
|
||||
grep -A1 searchnode /opt/so/saltstack/local/pillar/data/nodestab.sls | grep -v '\-\-' | sed '$!N;s/\n/ /' | awk '{print $1,$3}' | awk '/_searchnode:/{gsub(/\_searchnode:/, "_searchnode"); print}' >/tmp/nodes.txt
|
||||
# Remove the nodes from cluster settings
|
||||
while read p; do
|
||||
local NAME=$(echo $p | awk '{print $1}')
|
||||
local IP=$(echo $p | awk '{print $2}')
|
||||
echo "Removing the old cross cluster config for $NAME"
|
||||
curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_cluster/settings -d '{"persistent":{"cluster":{"remote":{"'$NAME'":{"skip_unavailable":null,"seeds":null}}}}}'
|
||||
done </tmp/nodes.txt
|
||||
# Add the nodes back using hostname
|
||||
while read p; do
|
||||
local NAME=$(echo $p | awk '{print $1}')
|
||||
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
|
||||
local IP=$(echo $p | awk '{print $2}')
|
||||
echo "Adding the new cross cluster config for $NAME"
|
||||
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
|
||||
done </tmp/nodes.txt
|
||||
|
||||
INSTALLEDVERSION=rc.2
|
||||
|
||||
}
|
||||
|
||||
rc2_to_rc3() {
|
||||
|
||||
# move location of local.rules
|
||||
cp /opt/so/saltstack/default/salt/idstools/localrules/local.rules /opt/so/saltstack/local/salt/idstools/local.rules
|
||||
|
||||
if [ -f /opt/so/saltstack/local/salt/idstools/localrules/local.rules ]; then
|
||||
cat /opt/so/saltstack/local/salt/idstools/localrules/local.rules >> /opt/so/saltstack/local/salt/idstools/local.rules
|
||||
fi
|
||||
rm -rf /opt/so/saltstack/local/salt/idstools/localrules
|
||||
rm -rf /opt/so/saltstack/default/salt/idstools/localrules
|
||||
|
||||
# Rename mdengine to MDENGINE
|
||||
sed -i "s/ zeekversion/ mdengine/g" /opt/so/saltstack/local/pillar/global.sls
|
||||
# Enable Strelka Rules
|
||||
sed -i "/ rules:/c\ rules: 1" /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
}
|
||||
|
||||
space_check() {
|
||||
# Check to see if there is enough space
|
||||
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
||||
if [ "$CURRENTSPACE" -lt "10" ]; then
|
||||
echo "You are low on disk space. Upgrade will try and clean up space.";
|
||||
clean_dockers
|
||||
else
|
||||
echo "Plenty of space for upgrading"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
update_dockers() {
|
||||
@@ -245,7 +281,7 @@ update_version() {
|
||||
# Update the version to the latest
|
||||
echo "Updating the Security Onion version file."
|
||||
echo $NEWVERSION > /etc/soversion
|
||||
sed -i "s/$INSTALLEDVERSION/$NEWVERSION/g" /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i "/ soversion:/c\ soversion: $NEWVERSION" /opt/so/saltstack/local/pillar/global.sls
|
||||
}
|
||||
|
||||
upgrade_check() {
|
||||
@@ -343,7 +379,7 @@ echo ""
|
||||
|
||||
echo "Let's see if we need to update Security Onion."
|
||||
upgrade_check
|
||||
|
||||
space_check
|
||||
|
||||
echo ""
|
||||
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
|
||||
@@ -362,8 +398,6 @@ echo "Making pillar changes."
|
||||
pillar_changes
|
||||
echo ""
|
||||
|
||||
echo "Cleaning up old dockers."
|
||||
clean_dockers
|
||||
echo ""
|
||||
echo "Updating dockers to $NEWVERSION."
|
||||
update_dockers
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'curator' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
@@ -131,3 +136,11 @@ so-curator:
|
||||
|
||||
# End Curator Cron Jobs
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
curator_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: curator_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'docker' in top_states %}
|
||||
|
||||
installdocker:
|
||||
pkg.installed:
|
||||
- name: docker-ce
|
||||
@@ -5,4 +10,12 @@ installdocker:
|
||||
# Make sure Docker is running!
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- enable: True
|
||||
|
||||
{% else %}
|
||||
|
||||
docker_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: docker_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -12,6 +12,10 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'domainstats' in top_states %}
|
||||
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
|
||||
@@ -51,3 +55,11 @@ so-domainstats:
|
||||
- user: domainstats
|
||||
- binds:
|
||||
- /opt/so/log/domainstats:/var/log/domain_stats
|
||||
|
||||
{% else %}
|
||||
|
||||
domainstats_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: domainstats_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -35,7 +35,7 @@ hive_alert_config:
|
||||
title: '{match[rule][name]}'
|
||||
type: 'NIDS'
|
||||
source: 'SecurityOnion'
|
||||
description: "`SOC Hunt Pivot:` \n\n <https://{{MANAGER}}/#/hunt?q=network.community_id%3A%20%20%22{match[network][community_id]}%22%20%7C%20groupby%20source.ip%20destination.ip,event.module,%20event.dataset> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MANAGER}}/kibana/app/kibana#/dashboard/30d0ac90-729f-11ea-8dd2-9d8795a1200b?_g=(filters:!(('$state':(store:globalState),meta:(alias:!n,disabled:!f,index:'*:so-*',key:network.community_id,negate:!f,params:(query:'{match[network][community_id]}'),type:phrase),query:(match_phrase:(network.community_id:'{match[network][community_id]}')))),refreshInterval:(pause:!t,value:0),time:(from:now-7d,to:now))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
|
||||
description: "`SOC Hunt Pivot:` \n\n <https://{{MANAGER}}/#/hunt?q=network.community_id%3A%20%20%22{match[network][community_id]}%22%20%7C%20groupby%20source.ip%20destination.ip,event.module,%20event.dataset> \n\n `Kibana Dashboard Pivot:` \n\n <https://{{MANAGER}}/kibana/app/kibana#/dashboard/30d0ac90-729f-11ea-8dd2-9d8795a1200b?_g=(filters:!(('$state':(store:globalState),meta:(alias:!n,disabled:!f,index:'2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29',key:network.community_id,negate:!f,params:(query:'{match[network][community_id]}'),type:phrase),query:(match_phrase:(network.community_id:'{match[network][community_id]}')))),refreshInterval:(pause:!t,value:0),time:(from:now-7d,to:now))> \n\n `IPs: `{match[source][ip]}:{match[source][port]} --> {match[destination][ip]}:{match[destination][port]} \n\n `Signature:`{match[rule][rule]}"
|
||||
severity: 2
|
||||
tags: ['{match[rule][uuid]}','{match[source][ip]}','{match[destination][ip]}']
|
||||
tlp: 3
|
||||
|
||||
@@ -12,9 +12,16 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'elastalert' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{%- set MANAGER_URL = salt['pillar.get']('global:url_base', '') %}
|
||||
{%- set MANAGER_IP = salt['pillar.get']('global:managerip', '') %}
|
||||
|
||||
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone'] %}
|
||||
{% set esalert = salt['pillar.get']('manager:elastalert', '1') %}
|
||||
@@ -100,6 +107,12 @@ elastaconf:
|
||||
- group: 933
|
||||
- template: jinja
|
||||
|
||||
wait_for_elasticsearch:
|
||||
module.run:
|
||||
- http.wait_for_successful_query:
|
||||
- url: 'http://{{MANAGER}}:9200/_cat/indices/.kibana*'
|
||||
- wait_for: 180
|
||||
|
||||
so-elastalert:
|
||||
docker_container.running:
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elastalert:{{ VERSION }}
|
||||
@@ -112,5 +125,16 @@ so-elastalert:
|
||||
- /opt/so/log/elastalert:/var/log/elastalert:rw
|
||||
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
|
||||
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/config/elastalert_config.yaml:ro
|
||||
|
||||
- extra_hosts:
|
||||
- {{MANAGER_URL}}:{{MANAGER_IP}}
|
||||
- require:
|
||||
- module: wait_for_elasticsearch
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
elastalert_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: elastalert_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
9
salt/elasticsearch/files/ingest/import.wel
Normal file
9
salt/elasticsearch/files/ingest/import.wel
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"description" : "import.wel",
|
||||
"processors" : [
|
||||
{ "remove": { "field": ["event.created","timestamp", "winlog.event_data.UtcTime", "event_record_id"], "ignore_failure": true } },
|
||||
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
|
||||
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
@@ -4,6 +4,8 @@
|
||||
{ "set": { "if": "ctx.winlog?.channel != null", "field": "event.module", "value": "windows_eventlog", "override": false, "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.winlog?.channel != null", "field": "event.dataset", "value": "{{winlog.channel}}", "override": true } },
|
||||
{ "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } },
|
||||
{ "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } },
|
||||
{ "set": { "field": "event.category", "value": "host", "override": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }
|
||||
]
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
"processors" : [
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "dot_expander": { "field": "seen.indicator", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.seen.indicator", "target_field": "intel.indicator", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "seen.indicator_type", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.seen.indicator_type", "target_field": "intel.indicator_type", "ignore_missing": true } },
|
||||
|
||||
@@ -12,6 +12,11 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'elasticsearch' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
@@ -232,9 +237,17 @@ so-elasticsearch-pipelines:
|
||||
- file: esyml
|
||||
- file: so-elasticsearch-pipelines-file
|
||||
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone'] and TEMPLATES %}
|
||||
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-import'] and TEMPLATES %}
|
||||
so-elasticsearch-templates:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elasticsearch-templates
|
||||
- cwd: /opt/so
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
elasticsearch_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: elasticsearch_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -6,11 +6,61 @@
|
||||
"number_of_replicas":0,
|
||||
"number_of_shards":1,
|
||||
"index.refresh_interval":"30s",
|
||||
"index.routing.allocation.require.box_type":"hot"
|
||||
"index.routing.allocation.require.box_type":"hot",
|
||||
"analysis": {
|
||||
"analyzer": {
|
||||
"es_security_analyzer": {
|
||||
"type": "custom",
|
||||
"filter": [ "path_hierarchy_pattern_filter", "lowercase" ],
|
||||
"tokenizer": "whitespace"
|
||||
},
|
||||
"es_security_search_analyzer": {
|
||||
"type": "custom",
|
||||
"filter": [ "lowercase" ],
|
||||
"tokenizer": "whitespace"
|
||||
},
|
||||
"es_security_search_quote_analyzer": {
|
||||
"type": "custom",
|
||||
"filter": [ "lowercase" ],
|
||||
"tokenizer": "whitespace"
|
||||
}
|
||||
},
|
||||
"filter" : {
|
||||
"path_hierarchy_pattern_filter": {
|
||||
"type" : "pattern_capture",
|
||||
"preserve_original": true,
|
||||
"patterns": [
|
||||
"((?:[^\\\\]*\\\\)*)(.*)",
|
||||
"((?:[^/]*/)*)(.*)"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings":{
|
||||
"dynamic":false,
|
||||
"date_detection":false,
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings": {
|
||||
"match_mapping_type": "string",
|
||||
"mapping": {
|
||||
"type": "text",
|
||||
"fields": {
|
||||
"keyword": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"security": {
|
||||
"type": "text",
|
||||
"analyzer": "es_security_analyzer",
|
||||
"search_analyzer": "es_security_search_analyzer",
|
||||
"search_quote_analyzer": "es_security_search_quote_analyzer"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"properties":{
|
||||
"@timestamp":{
|
||||
"type":"date"
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
|
||||
|
||||
{%- set HOSTNAME = salt['grains.get']('host', '') %}
|
||||
{%- set ZEEKVER = salt['pillar.get']('global:zeekversion', 'COMMUNITY') %}
|
||||
{%- set ZEEKVER = salt['pillar.get']('global:mdengine', 'COMMUNITY') %}
|
||||
{%- set WAZUHENABLED = salt['pillar.get']('global:wazuh', '0') %}
|
||||
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
|
||||
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
|
||||
|
||||
@@ -11,6 +11,11 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'filebeat' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
@@ -69,3 +74,11 @@ so-filebeat:
|
||||
- 0.0.0.0:514:514/udp
|
||||
- watch:
|
||||
- file: /opt/so/conf/filebeat/etc/filebeat.yml
|
||||
|
||||
{% else %}
|
||||
|
||||
filebeat_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: filebeat_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,3 +1,4 @@
|
||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
{% import_yaml 'firewall/portgroups.yaml' as portgroups %}
|
||||
{% set portgroups = portgroups.firewall.aliases.ports %}
|
||||
|
||||
@@ -109,6 +110,9 @@ role:
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
- {{ portgroups.cortex_es_rest }}
|
||||
- {{ portgroups.cortex_es_node }}
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
- {{ portgroups.agrules }}
|
||||
{% endif %}
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.acng }}
|
||||
@@ -117,6 +121,9 @@ role:
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
- {{ portgroups.yum }}
|
||||
{% endif %}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
@@ -200,6 +207,7 @@ role:
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.yum }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
@@ -282,7 +290,8 @@ role:
|
||||
- {{ portgroups.osquery_8080 }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.yum }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'firewall' in top_states %}
|
||||
|
||||
# Firewall Magic for the grid
|
||||
{% from 'firewall/map.jinja' import hostgroups with context %}
|
||||
{% from 'firewall/map.jinja' import assigned_hostgroups with context %}
|
||||
@@ -128,3 +133,11 @@ iptables_drop_all_the_things:
|
||||
- chain: LOGGING
|
||||
- jump: DROP
|
||||
- save: True
|
||||
|
||||
{% else %}
|
||||
|
||||
firewall_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: firewall_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -9,6 +9,9 @@ firewall:
|
||||
acng:
|
||||
tcp:
|
||||
- 3142
|
||||
agrules:
|
||||
tcp:
|
||||
- 7788
|
||||
beats_5044:
|
||||
tcp:
|
||||
- 5044
|
||||
@@ -94,3 +97,6 @@ firewall:
|
||||
wazuh_authd:
|
||||
tcp:
|
||||
- 1515
|
||||
yum:
|
||||
tcp:
|
||||
- 443
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{% set ENROLLSECRET = salt['cmd.run']('docker exec so-fleet fleetctl get enroll-secret') %}
|
||||
{% set ENROLLSECRET = salt['cmd.run']('docker exec so-fleet fleetctl get enroll-secret default') %}
|
||||
{% set MAININT = salt['pillar.get']('host:mainint') %}
|
||||
{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %}
|
||||
|
||||
|
||||
@@ -132,4 +132,4 @@ so-fleet:
|
||||
- watch:
|
||||
- /opt/so/conf/fleet/etc
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
@@ -12,6 +12,10 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'freqserver' in top_states %}
|
||||
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
|
||||
@@ -52,3 +56,11 @@ so-freq:
|
||||
- binds:
|
||||
- /opt/so/log/freq_server:/var/log/freq_server:rw
|
||||
|
||||
{% else %}
|
||||
|
||||
freqserver_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: freqserver_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -3860,70 +3860,6 @@
|
||||
"value": "{{ MONINT }}"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "Outbound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$Interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "net",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"rawQuery": false,
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"bytes_sent"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"1s"
|
||||
],
|
||||
"type": "derivative"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"*8"
|
||||
],
|
||||
"type": "math"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "host",
|
||||
"operator": "=",
|
||||
"value": "{{ SERVERNAME }}"
|
||||
},
|
||||
{
|
||||
"condition": "AND",
|
||||
"key": "interface",
|
||||
"operator": "=",
|
||||
"value": "{{ MONINT }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
|
||||
@@ -583,7 +583,7 @@
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "{{ SERVERNAME }} - REDIS Unparsed Queue",
|
||||
"title": "{{ SERVERNAME }} - Redis Queue",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
@@ -621,134 +621,6 @@
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"cacheTimeout": null,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "InfluxDB",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 4,
|
||||
"x": 20,
|
||||
"y": 0
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 21,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": false,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"links": [],
|
||||
"nullPointMode": "connected",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$Interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "redisqueue",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"parsed"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "host",
|
||||
"operator": "=",
|
||||
"value": "{{ SERVERNAME }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "{{ SERVERNAME }} - REDIS Parsed Queue",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"decimals": 0,
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"datasource": "InfluxDB",
|
||||
@@ -1351,7 +1223,7 @@
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "{{ SERVERNAME }} - REDIS CPU Usage",
|
||||
"title": "{{ SERVERNAME }} - Redis CPU Usage",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
@@ -1485,7 +1357,7 @@
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "{{ SERVERNAME }} - REDIS Memory Usage",
|
||||
"title": "{{ SERVERNAME }} - Redis Memory Usage",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
@@ -4043,6 +3915,138 @@
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "InfluxDB",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 4,
|
||||
"x": 20,
|
||||
"y": 5
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 40,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": false,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "connected",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "influxsize",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"kbytes"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "host",
|
||||
"operator": "=",
|
||||
"value": "{{ SERVERNAME }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "{{ SERVERNAME }} - InfluxDB Size",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:526",
|
||||
"format": "deckbytes",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:527",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
|
||||
@@ -588,7 +588,7 @@
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "{{ SERVERNAME }} - REDIS Unparsed Queue",
|
||||
"title": "{{ SERVERNAME }} - Redis Queue",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
@@ -627,132 +627,6 @@
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "InfluxDB",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 4,
|
||||
"x": 20,
|
||||
"y": 0
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 51,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": false,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "connected",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "redisqueue",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"parsed"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "host",
|
||||
"operator": "=",
|
||||
"value": "{{ SERVERNAME }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "{{ SERVERNAME }} - REDIS Parsed Queue",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:1367",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:1368",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"cacheTimeout": null,
|
||||
"datasource": "InfluxDB",
|
||||
@@ -1352,7 +1226,7 @@
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "{{ SERVERNAME }} - REDIS CPU Usage",
|
||||
"title": "{{ SERVERNAME }} - Redis CPU Usage",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
@@ -1485,7 +1359,7 @@
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "{{ SERVERNAME }} - REDIS Memory Usage",
|
||||
"title": "{{ SERVERNAME }} - Redis Memory Usage",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
@@ -4787,6 +4661,138 @@
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "InfluxDB",
|
||||
"fieldConfig": {
|
||||
"defaults": {
|
||||
"custom": {}
|
||||
},
|
||||
"overrides": []
|
||||
},
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 4,
|
||||
"x": 20,
|
||||
"y": 5
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 57,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": false,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "connected",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "influxsize",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"kbytes"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "host",
|
||||
"operator": "=",
|
||||
"value": "{{ SERVERNAME }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "{{ SERVERNAME }} - InfluxDB Size",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"$$hashKey": "object:140",
|
||||
"format": "deckbytes",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"$$hashKey": "object:141",
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": false
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
}
|
||||
],
|
||||
"refresh": false,
|
||||
|
||||
@@ -3420,70 +3420,6 @@
|
||||
"value": "{{ MONINT }}"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"alias": "OutBound",
|
||||
"dsType": "influxdb",
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$Interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "net",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "default",
|
||||
"query": "SELECT 8 * derivative(mean(\"bytes_sent\"),1s) FROM \"net\" WHERE \"host\" = 'JumpHost' AND \"interface\" = 'eth0' AND $timeFilter GROUP BY time($interval) fill(null)",
|
||||
"rawQuery": false,
|
||||
"refId": "B",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"bytes_sent"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "mean"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"1s"
|
||||
],
|
||||
"type": "derivative"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"*8"
|
||||
],
|
||||
"type": "math"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "host",
|
||||
"operator": "=",
|
||||
"value": "{{ SERVERNAME }}"
|
||||
},
|
||||
{
|
||||
"condition": "AND",
|
||||
"key": "interface",
|
||||
"operator": "=",
|
||||
"value": "{{ MONINT }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'grafana' in top_states %}
|
||||
|
||||
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
@@ -230,4 +235,12 @@ so-grafana:
|
||||
- watch:
|
||||
- file: /opt/so/conf/grafana/*
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
grafana_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: grafana_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'healthcheck' in top_states %}
|
||||
|
||||
{% set CHECKS = salt['pillar.get']('healthcheck:checks', {}) %}
|
||||
{% set ENABLED = salt['pillar.get']('healthcheck:enabled', False) %}
|
||||
{% set SCHEDULE = salt['pillar.get']('healthcheck:schedule', 300) %}
|
||||
@@ -23,3 +28,11 @@ healthcheck_schedule_{{ STATUS[0] }}:
|
||||
healthcheck_schedule_{{ STATUS[1] }}:
|
||||
schedule.{{ STATUS[1] }}:
|
||||
- name: healthcheck
|
||||
|
||||
{% else %}
|
||||
|
||||
healthcheck_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: healthcheck_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,21 +1,32 @@
|
||||
{%- set URLS = salt['pillar.get']('idstools:config:urls') -%}
|
||||
{%- set RULESET = salt['pillar.get']('idstools:config:ruleset') -%}
|
||||
{%- set OINKCODE = salt['pillar.get']('idstools:config:oinkcode', '' ) -%}
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') -%}
|
||||
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') -%}
|
||||
{%- if ISAIRGAP is sameas true -%}
|
||||
--merged=/opt/so/rules/nids/all.rules
|
||||
--local=/opt/so/rules/nids/local.rules
|
||||
--url=http://{{ MANAGERIP }}:7788/rules/emerging-all.rules
|
||||
--disable=/opt/so/idstools/etc/disable.conf
|
||||
--enable=/opt/so/idstools/etc/enable.conf
|
||||
--modify=/opt/so/idstools/etc/modify.conf
|
||||
{%- else -%}
|
||||
--suricata-version=5.0
|
||||
--merged=/opt/so/rules/nids/all.rules
|
||||
--local=/opt/so/rules/nids/local.rules
|
||||
--disable=/opt/so/idstools/etc/disable.conf
|
||||
--enable=/opt/so/idstools/etc/enable.conf
|
||||
--modify=/opt/so/idstools/etc/modify.conf
|
||||
{%- if RULESET == 'ETOPEN' %}
|
||||
{%- if RULESET == 'ETOPEN' -%}
|
||||
--etopen
|
||||
{%- elif RULESET == 'ETPRO' %}
|
||||
{%- elif RULESET == 'ETPRO' -%}
|
||||
--etpro={{ OINKCODE }}
|
||||
{%- elif RULESET == 'TALOS' %}
|
||||
{%- elif RULESET == 'TALOS' -%}
|
||||
--url=https://www.snort.org/rules/snortrules-snapshot-2983.tar.gz?oinkcode={{ OINKCODE }}
|
||||
{%- endif %}
|
||||
{%- if URLS != None %}
|
||||
{%- for URL in URLS %}
|
||||
{%- endif -%}
|
||||
{%- endif -%}
|
||||
{%- if URLS != None -%}
|
||||
{%- for URL in URLS -%}
|
||||
--url={{ URL }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- endfor -%}
|
||||
{%- endif -%}
|
||||
|
||||
@@ -12,6 +12,11 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'idstools' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
@@ -55,7 +60,7 @@ rulesdir:
|
||||
synclocalnidsrules:
|
||||
file.managed:
|
||||
- name: /opt/so/rules/nids/local.rules
|
||||
- source: salt://idstools/localrules/local.rules
|
||||
- source: salt://idstools/local.rules
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
@@ -69,3 +74,11 @@ so-idstools:
|
||||
- /opt/so/rules/nids:/opt/so/rules/nids:rw
|
||||
- watch:
|
||||
- file: idstoolsetcsync
|
||||
|
||||
{% else %}
|
||||
|
||||
idstools_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: idstools_state_not_allowed
|
||||
|
||||
{% endif%}
|
||||
@@ -1 +0,0 @@
|
||||
# Put your own custom Snort/Suricata rules in /opt/so/saltstack/local/salt/idstools/localrules/.
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'influxdb' in top_states %}
|
||||
|
||||
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
@@ -40,4 +45,12 @@ so-influxdb:
|
||||
- watch:
|
||||
- file: influxdbconf
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
influxdb_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: influxdb_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -3,8 +3,6 @@
|
||||
# {%- set FLEET_NODE = salt['pillar.get']('global:fleet_node', False) -%}
|
||||
# {%- set MANAGER = salt['pillar.get']('global:url_base', '') %}
|
||||
|
||||
KIBANA_VERSION="7.6.1"
|
||||
|
||||
# Copy template file
|
||||
cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_objects.ndjson
|
||||
|
||||
@@ -17,4 +15,4 @@ cp /opt/so/conf/kibana/saved_objects.ndjson.template /opt/so/conf/kibana/saved_o
|
||||
sed -i "s/PLACEHOLDER/{{ MANAGER }}/g" /opt/so/conf/kibana/saved_objects.ndjson
|
||||
|
||||
# Load saved objects
|
||||
curl -X POST "localhost:5601/api/saved_objects/_import" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1
|
||||
curl -X POST "localhost:5601/api/saved_objects/_import?overwrite=true" -H "kbn-xsrf: true" --form file=@/opt/so/conf/kibana/saved_objects.ndjson > /dev/null 2>&1
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'kibana' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
@@ -114,4 +119,12 @@ so-kibana-config-load:
|
||||
# - shell: /bin/bash
|
||||
# - runas: socore
|
||||
# - source: salt://kibana/bin/keepkibanahappy.sh
|
||||
# - template: jinja
|
||||
# - template: jinja
|
||||
|
||||
{% else %}
|
||||
|
||||
kibana_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: kibana_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -12,6 +12,11 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'logstash' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
@@ -195,3 +200,11 @@ so-logstash:
|
||||
{% for TEMPLATE in TEMPLATES %}
|
||||
- file: es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
|
||||
logstash_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: logstash_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -12,10 +12,16 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'manager' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% set managerproxy = salt['pillar.get']('global:managerupdate', '0') %}
|
||||
{% set STRELKA_RULES = salt['pillar.get']('strelka:rules', '1') %}
|
||||
|
||||
socore_own_saltstack:
|
||||
file.directory:
|
||||
@@ -76,3 +82,17 @@ so-aptcacherng:
|
||||
- /opt/so/conf/aptcacher-ng/etc/acng.conf:/etc/apt-cacher-ng/acng.conf:ro
|
||||
|
||||
{% endif %}
|
||||
|
||||
strelka_yara_update:
|
||||
cron.present:
|
||||
- user: root
|
||||
- name: '/usr/sbin/so-yara-update > /dev/null 2>&1'
|
||||
- hour: '7'
|
||||
- minute: '1'
|
||||
{% else %}
|
||||
|
||||
manager_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: manager_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -12,6 +12,10 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'minio' in top_states %}
|
||||
|
||||
{% set access_key = salt['pillar.get']('minio:access_key', '') %}
|
||||
{% set access_secret = salt['pillar.get']('minio:access_secret', '') %}
|
||||
@@ -56,4 +60,12 @@ so-minio:
|
||||
- /opt/so/conf/minio/etc:/.minio:rw
|
||||
- /etc/pki/minio.key:/.minio/certs/private.key:ro
|
||||
- /etc/pki/minio.crt:/.minio/certs/public.crt:ro
|
||||
- entrypoint: "/usr/bin/docker-entrypoint.sh server --certs-dir /.minio/certs --address :9595 /data"
|
||||
- entrypoint: "/usr/bin/docker-entrypoint.sh server --certs-dir /.minio/certs --address :9595 /data"
|
||||
|
||||
{% else %}
|
||||
|
||||
minio_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: minio_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,5 +1,18 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'motd' in top_states %}
|
||||
|
||||
so_motd:
|
||||
file.managed:
|
||||
- name: /etc/motd
|
||||
- source: salt://motd/files/so_motd.jinja
|
||||
- template: jinja
|
||||
|
||||
{% else %}
|
||||
|
||||
motd_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: motd_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'mysql' in top_states %}
|
||||
|
||||
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) %}
|
||||
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
@@ -93,3 +98,11 @@ so-mysql:
|
||||
- onchanges:
|
||||
- docker_container: so-mysql
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
mysql_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: mysql_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -2,6 +2,8 @@
|
||||
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
|
||||
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
|
||||
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
@@ -29,7 +31,7 @@ http {
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
client_max_body_size 1024M;
|
||||
client_max_body_size 2500M;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
@@ -65,6 +67,23 @@ http {
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
server {
|
||||
listen 7788;
|
||||
server_name _;
|
||||
root /opt/socore/html/repo;
|
||||
location /rules/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
}
|
||||
{%- endif %}
|
||||
|
||||
|
||||
{% if FLEET_MANAGER %}
|
||||
server {
|
||||
@@ -180,7 +199,20 @@ http {
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
}
|
||||
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
location /repo/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
location /grafana/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
|
||||
@@ -29,7 +29,7 @@ http {
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
client_max_body_size 1024M;
|
||||
client_max_body_size 2500M;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
|
||||
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
|
||||
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
@@ -29,7 +31,7 @@ http {
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
client_max_body_size 1024M;
|
||||
client_max_body_size 2500M;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
@@ -65,6 +67,22 @@ http {
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
server {
|
||||
listen 7788;
|
||||
server_name _;
|
||||
root /opt/socore/html/repo;
|
||||
location /rules/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
}
|
||||
{%- endif %}
|
||||
|
||||
{% if FLEET_MANAGER %}
|
||||
server {
|
||||
@@ -232,6 +250,19 @@ http {
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
location /repo/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
{%- if FLEET_NODE %}
|
||||
location /fleet/ {
|
||||
return 301 https://{{ FLEET_IP }}/fleet;
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
|
||||
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
|
||||
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
@@ -29,7 +31,7 @@ http {
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
client_max_body_size 1024M;
|
||||
client_max_body_size 2500M;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
@@ -65,6 +67,23 @@ http {
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
server {
|
||||
listen 7788;
|
||||
server_name _;
|
||||
root /opt/socore/html/repo;
|
||||
location /rules/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
}
|
||||
{%- endif %}
|
||||
|
||||
|
||||
{% if FLEET_MANAGER %}
|
||||
server {
|
||||
@@ -180,6 +199,19 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
location /repo/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
location /grafana/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
{%- set FLEET_MANAGER = salt['pillar.get']('global:fleet_manager') %}
|
||||
{%- set FLEET_NODE = salt['pillar.get']('global:fleet_node') %}
|
||||
{%- set FLEET_IP = salt['pillar.get']('global:fleet_ip', None) %}
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
@@ -29,7 +30,7 @@ http {
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
client_max_body_size 1024M;
|
||||
client_max_body_size 2500M;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
@@ -65,6 +66,23 @@ http {
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
server {
|
||||
listen 7788;
|
||||
server_name _;
|
||||
root /opt/socore/html/repo;
|
||||
location /rules/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
}
|
||||
{%- endif %}
|
||||
|
||||
|
||||
{% if FLEET_MANAGER %}
|
||||
server {
|
||||
@@ -182,6 +200,20 @@ http {
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
{%- if ISAIRGAP is sameas true %}
|
||||
location /repo/ {
|
||||
allow all;
|
||||
sendfile on;
|
||||
sendfile_max_chunk 1m;
|
||||
autoindex on;
|
||||
autoindex_exact_size off;
|
||||
autoindex_format html;
|
||||
autoindex_localtime on;
|
||||
}
|
||||
|
||||
{%- endif %}
|
||||
|
||||
|
||||
location /grafana/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
rewrite /grafana/(.*) /$1 break;
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'nginx' in top_states %}
|
||||
|
||||
{% set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %}
|
||||
{% set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap') %}
|
||||
|
||||
# Drop the correct nginx config based on role
|
||||
nginxconfdir:
|
||||
@@ -72,6 +78,9 @@ so-nginx:
|
||||
- /etc/pki/managerssl.crt:/etc/pki/nginx/server.crt:ro
|
||||
- /etc/pki/managerssl.key:/etc/pki/nginx/server.key:ro
|
||||
- /opt/so/conf/fleet/packages:/opt/socore/html/packages
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
- /nsm/repo:/opt/socore/html/repo:ro
|
||||
{% endif %}
|
||||
# ATT&CK Navigator binds
|
||||
- /opt/so/conf/navigator/navigator_config.json:/opt/socore/html/navigator/assets/config.json:ro
|
||||
- /opt/so/conf/navigator/nav_layer_playbook.json:/opt/socore/html/navigator/assets/playbook.json:ro
|
||||
@@ -79,9 +88,20 @@ so-nginx:
|
||||
- port_bindings:
|
||||
- 80:80
|
||||
- 443:443
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
- 7788:7788
|
||||
{% endif %}
|
||||
{%- if FLEETMANAGER or FLEETNODE %}
|
||||
- 8090:8090
|
||||
{%- endif %}
|
||||
- watch:
|
||||
- file: nginxconf
|
||||
- file: nginxconfdir
|
||||
|
||||
{% else %}
|
||||
|
||||
nginx_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: nginx_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -12,6 +12,10 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'nodered' in top_states %}
|
||||
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
|
||||
@@ -75,3 +79,10 @@ so-nodered-flows:
|
||||
- name: /usr/sbin/so-nodered-load-flows
|
||||
- cwd: /
|
||||
|
||||
{% else %}
|
||||
|
||||
nodered_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: nodered_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -3,7 +3,7 @@
|
||||
{%- set CHECKININTERVALMS = salt['pillar.get']('pcap:sensor_checkin_interval_ms', 10000) -%}
|
||||
{
|
||||
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
|
||||
"logLevel":"debug",
|
||||
"logLevel":"info",
|
||||
"agent": {
|
||||
"pollIntervalMs": {{ CHECKININTERVALMS if CHECKININTERVALMS else 10000 }},
|
||||
"serverUrl": "https://{{ MANAGER }}/sensoroniagents",
|
||||
|
||||
@@ -12,6 +12,11 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'pcap' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
@@ -160,3 +165,11 @@ so-sensoroni:
|
||||
- /opt/so/log/sensoroni:/opt/sensoroni/logs:rw
|
||||
- watch:
|
||||
- file: /opt/so/conf/sensoroni/sensoroni.json
|
||||
|
||||
{% else %}
|
||||
|
||||
pcap_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: pcap_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'playbook' in top_states %}
|
||||
|
||||
{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
@@ -96,4 +101,12 @@ so-playbookruleupdatecron:
|
||||
- name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '6'
|
||||
- hour: '6'
|
||||
|
||||
{% else %}
|
||||
|
||||
playbook_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: playbook_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -12,6 +12,11 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'redis' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
@@ -64,3 +69,11 @@ so-redis:
|
||||
- entrypoint: "redis-server /usr/local/etc/redis/redis.conf"
|
||||
- watch:
|
||||
- file: /opt/so/conf/redis/etc
|
||||
|
||||
{% else %}
|
||||
|
||||
redis_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: redis_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'registry' in top_states %}
|
||||
|
||||
# Create the config directory for the docker registry
|
||||
dockerregistryconfdir:
|
||||
file.directory:
|
||||
@@ -51,3 +56,11 @@ so-dockerregistry:
|
||||
- /nsm/docker-registry/docker:/var/lib/registry/docker:rw
|
||||
- /etc/pki/registry.crt:/etc/pki/registry.crt:ro
|
||||
- /etc/pki/registry.key:/etc/pki/registry.key:ro
|
||||
|
||||
{% else %}
|
||||
|
||||
registry_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: registry_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
28
salt/salt/engines/checkmine.py
Normal file
28
salt/salt/engines/checkmine.py
Normal file
@@ -0,0 +1,28 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
from time import sleep
|
||||
from os import remove
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def start(interval=30):
|
||||
log.info("checkmine engine started")
|
||||
minionid = __grains__['id']
|
||||
while True:
|
||||
try:
|
||||
ca_crt = __salt__['saltutil.runner']('mine.get', tgt=minionid, fun='x509.get_pem_entries')[minionid]['/etc/pki/ca.crt']
|
||||
log.info('Successfully queried Salt mine for the CA.')
|
||||
except:
|
||||
log.error('Could not pull CA from the Salt mine.')
|
||||
log.info('Removing /var/cache/salt/master/minions/%s/mine.p to force Salt mine to be repopulated.' % minionid)
|
||||
try:
|
||||
remove('/var/cache/salt/master/minions/%s/mine.p' % minionid)
|
||||
log.info('Removed /var/cache/salt/master/minions/%s/mine.p' % minionid)
|
||||
except FileNotFoundError:
|
||||
log.error('/var/cache/salt/master/minions/%s/mine.p does not exist' % minionid)
|
||||
|
||||
__salt__['mine.send'](name='x509.get_pem_entries', glob_path='/etc/pki/ca.crt')
|
||||
log.warning('Salt mine repopulated with /etc/pki/ca.crt')
|
||||
|
||||
sleep(interval)
|
||||
6
salt/salt/files/engines.conf
Normal file
6
salt/salt/files/engines.conf
Normal file
@@ -0,0 +1,6 @@
|
||||
engines_dirs:
|
||||
- /etc/salt/engines
|
||||
|
||||
engines:
|
||||
- checkmine:
|
||||
interval: 30
|
||||
@@ -3,12 +3,12 @@
|
||||
|
||||
{% if grains.os|lower == 'ubuntu' %}
|
||||
{% set COMMON = 'salt-common' %}
|
||||
{% elif grains.os|lower == 'centos' %}
|
||||
{% elif grains.os|lower in ['centos', 'redhat'] %}
|
||||
{% set COMMON = 'salt' %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.saltversion|string != SALTVERSION|string %}
|
||||
{% if grains.os|lower == 'centos' %}
|
||||
{% if grains.os|lower in ['centos', 'redhat'] %}
|
||||
{% set UPGRADECOMMAND = 'yum versionlock delete "salt-*" && sh /usr/sbin/bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %}
|
||||
{% elif grains.os|lower == 'ubuntu' %}
|
||||
{% set UPGRADECOMMAND = 'apt-mark unhold salt-common && apt-mark unhold salt-minion && sh /usr/sbin/bootstrap-salt.sh -F -x python3 stable ' ~ SALTVERSION %}
|
||||
|
||||
@@ -1,3 +1,11 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'salt.master' in top_states %}
|
||||
|
||||
include:
|
||||
- salt.minion
|
||||
|
||||
salt_master_package:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
@@ -8,4 +16,27 @@ salt_master_package:
|
||||
salt_master_service:
|
||||
service.running:
|
||||
- name: salt-master
|
||||
- enable: True
|
||||
- enable: True
|
||||
|
||||
checkmine_engine:
|
||||
file.managed:
|
||||
- name: /etc/salt/engines/checkmine.py
|
||||
- source: salt://salt/engines/checkmine.py
|
||||
- makedirs: True
|
||||
- watch_in:
|
||||
- service: salt_minion_service
|
||||
|
||||
engines_config:
|
||||
file.managed:
|
||||
- name: /etc/salt/minion.d/engines.conf
|
||||
- source: salt://salt/files/engines.conf
|
||||
- watch_in:
|
||||
- service: salt_minion_service
|
||||
|
||||
{% else %}
|
||||
|
||||
salt_master_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: salt_master_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -6,52 +6,39 @@ selfservice:
|
||||
password:
|
||||
enabled: true
|
||||
|
||||
settings:
|
||||
privileged_session_max_age: 1m
|
||||
after:
|
||||
profile:
|
||||
hooks:
|
||||
- hook: verify
|
||||
|
||||
verify:
|
||||
return_to: https://{{ WEBACCESS }}/
|
||||
flows:
|
||||
settings:
|
||||
ui_url: https://{{ WEBACCESS }}/?r=/settings
|
||||
|
||||
logout:
|
||||
redirect_to: https://{{ WEBACCESS }}/login/
|
||||
verification:
|
||||
ui_url: https://{{ WEBACCESS }}/
|
||||
|
||||
login:
|
||||
request_lifespan: 10m
|
||||
login:
|
||||
ui_url: https://{{ WEBACCESS }}/login/
|
||||
|
||||
registration:
|
||||
request_lifespan: 10m
|
||||
after:
|
||||
password:
|
||||
hooks:
|
||||
- hook: session
|
||||
- hook: verify
|
||||
error:
|
||||
ui_url: https://{{ WEBACCESS }}/login/
|
||||
|
||||
registration:
|
||||
ui_url: https://{{ WEBACCESS }}/login/
|
||||
|
||||
default_browser_return_url: https://{{ WEBACCESS }}/
|
||||
whitelisted_return_urls:
|
||||
- http://127.0.0.1
|
||||
|
||||
log:
|
||||
level: debug
|
||||
format: json
|
||||
|
||||
secrets:
|
||||
session:
|
||||
default:
|
||||
- {{ KRATOSKEY }}
|
||||
|
||||
urls:
|
||||
login_ui: https://{{ WEBACCESS }}/login/
|
||||
registration_ui: https://{{ WEBACCESS }}/login/
|
||||
error_ui: https://{{ WEBACCESS }}/login/
|
||||
settings_ui: https://{{ WEBACCESS }}/?r=/settings
|
||||
verify_ui: https://{{ WEBACCESS }}/
|
||||
mfa_ui: https://{{ WEBACCESS }}/
|
||||
|
||||
self:
|
||||
public: https://{{ WEBACCESS }}/auth/
|
||||
admin: https://{{ WEBACCESS }}/kratos/
|
||||
default_return_to: https://{{ WEBACCESS }}/
|
||||
whitelisted_return_to_urls:
|
||||
- http://127.0.0.1
|
||||
serve:
|
||||
public:
|
||||
base_url: https://{{ WEBACCESS }}/auth/
|
||||
admin:
|
||||
base_url: https://{{ WEBACCESS }}/kratos/
|
||||
|
||||
hashers:
|
||||
argon2:
|
||||
@@ -62,8 +49,7 @@ hashers:
|
||||
key_length: 32
|
||||
|
||||
identity:
|
||||
traits:
|
||||
default_schema_url: file:///kratos-conf/schema.json
|
||||
default_schema_url: file:///kratos-conf/schema.json
|
||||
|
||||
courier:
|
||||
smtp:
|
||||
|
||||
@@ -4,37 +4,46 @@
|
||||
"title": "Person",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"email": {
|
||||
"type": "string",
|
||||
"format": "email",
|
||||
"title": "E-Mail",
|
||||
"minLength": 6,
|
||||
"ory.sh/kratos": {
|
||||
"credentials": {
|
||||
"password": {
|
||||
"identifier": true
|
||||
"traits": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"email": {
|
||||
"type": "string",
|
||||
"format": "email",
|
||||
"title": "E-Mail",
|
||||
"minLength": 6,
|
||||
"ory.sh/kratos": {
|
||||
"credentials": {
|
||||
"password": {
|
||||
"identifier": true
|
||||
}
|
||||
},
|
||||
"verification": {
|
||||
"via": "email"
|
||||
}
|
||||
},
|
||||
"verification": {
|
||||
"via": "email"
|
||||
}
|
||||
}
|
||||
},
|
||||
"firstName": {
|
||||
"type": "string",
|
||||
"title": "First Name"
|
||||
},
|
||||
"lastName": {
|
||||
"type": "string",
|
||||
"title": "Last Name"
|
||||
},
|
||||
"role": {
|
||||
"type": "string",
|
||||
"title": "Role"
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"title": "Status"
|
||||
}
|
||||
},
|
||||
"firstName": {
|
||||
"type": "string",
|
||||
"title": "First Name"
|
||||
},
|
||||
"lastName": {
|
||||
"type": "string",
|
||||
"title": "Last Name"
|
||||
},
|
||||
"role": {
|
||||
"type": "string",
|
||||
"title": "Role"
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"email"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
"required": [
|
||||
"email"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,21 +1,28 @@
|
||||
{
|
||||
"title": "Security Onion 2.1.0 RC2 is here!",
|
||||
"title": "Security Onion 2.2.0 RC3 is here!",
|
||||
"changes": [
|
||||
{ "summary": "Known Issues <ul><li>Once you update your grid to RC2, any new nodes that join the grid must be RC2 so if you try to join a new RC1 node it will fail. For best results, use the latest RC2 ISO (or RC2 installer from github) when joining to an RC2 grid.</li><li>Shipping Windows Eventlogs with Osquery will fail intermittently with utf8 errors logged in the Application log. This is scheduled to be fixed in Osquery 4.5.</li><li>When running soup to upgrade from RC1 to RC2, there is a Salt error that occurs during the final highstate. This error is related to the patch_os_schedule and can be ignored as it will not occur again in subsequent highstates.</li><li>When Search Nodes are upgraded from RC1 to RC2, there is a chance of a race condition where certificates are missing. This will show errors in the manager log to the remote node. To fix this run the following on the search node that is having the issue:<ol><li>Stop elasticsearch - <i>sudo so-elasticsearch-stop</i></li><li>Run the SSL state - <i>sudo salt-call state.apply ssl</i></li><li>Restart elasticsearch - <i>sudo so-elasticsearch-restart</i></li></ol></li></ul>" },
|
||||
{ "summary": "Fixed an issue where the console was timing out and making it appear that the installer was hung." },
|
||||
{ "summary": "Introduced <i>Import</i> node, which is ideal for running so-import-pcap to import pcap files and view the resulting logs in Hunt or Kibana." },
|
||||
{ "summary": "Suricata stats.log now rotates once a day. If you have a bunch of suriloss defunct processes on nodes that have it, do the following:<ul><li>Stop suricata - <i>sudo so-suricata-stop</i></li><li>Remove the current stats.log - <i>sudo rm /opt/so/log/suricata/stats.log</i></li><li>Reboot the machine - <i>shutdown -r now</i></li></ul>" },
|
||||
{ "summary": "Moved static.sls to global.sls to align the name with the functionality." },
|
||||
{ "summary": "Traffic between nodes in a distributed deployment is now fully encrypted." },
|
||||
{ "summary": "Playbook<ul><li>Elastalert now runs active Plays every 3 minutes</li><li>Changed default rule-update config to only import Windows rules from the Sigma Community repo</li><li>Lots of bug fixes & stability improvements</li></ul>" },
|
||||
{ "summary": "Ingest Node parsing updates for Osquery and Winlogbeat - implemented single pipeline for Windows eventlogs & sysmon logs" },
|
||||
{ "summary": "Upgraded Osquery to 4.4 and re-enabled auto-updates." },
|
||||
{ "summary": "Upgraded to Salt 3001.1" },
|
||||
{ "summary": "Upgraded Wazuh to 3.13.1" },
|
||||
{ "summary": "Hunt interface now shows the timezone being used for the selected date range." },
|
||||
{ "summary": "Fixed Cortex initialization so that TheHive integration and initial user set is correctly configured." },
|
||||
{ "summary": "Improved management of TheHive/Cortex credentials." },
|
||||
{ "summary": "SOC now allows for arbitrary, time-bounded PCAP job creation, with optional filtering by host and port." },
|
||||
{ "summary": "Historical release notes can be found on our docs website: <a href='https://docs.securityonion.net/en/2.1/release-notes.html'>https://docs.securityonion.net/en/2.1/release-notes.html</a>" }
|
||||
{ "summary": "Known Issues <ul><li>Installing in VMware Fusion using Fusion's internal DNS server may result in Setup incorrectly claiming that the installation failed. To avoid this, configure the VM to bypass Fusion's internal DNS server and go directly to an upstream DNS server instead. <a href=https://github.com/Security-Onion-Solutions/securityonion/issues/1333>https://github.com/Security-Onion-Solutions/securityonion/issues/1333</a></li><li>Once you update your grid to RC3, any new nodes that join the grid must be RC3 so if you try to join a new RC1 node it will fail. For best results, use the latest RC3 ISO (or RC3 installer from github) when joining to an RC3 grid.</li><li>Shipping Windows Eventlogs with Osquery will fail intermittently with utf8 errors logged in the Application log. This is scheduled to be fixed in Osquery 4.5.</li><li>When running soup to upgrade from RC1/RC2 to RC3, there is a Salt error that occurs during the final highstate. This error is related to the patch_os_schedule and can be ignored as it will not occur again in subsequent highstates.</li><li>When Search Nodes are upgraded from RC1 to RC3, there is a chance of a race condition where certificates are missing. This will show errors in the manager log to the remote node. To fix this run the following on the search node that is having the issue:<ol><li>Stop elasticsearch - <i>sudo so-elasticsearch-stop</i></li><li>Run the SSL state - <i>sudo salt-call state.apply ssl</i></li><li>Restart elasticsearch - <i>sudo so-elasticsearch-restart</i></li></ol></li></ul>" },
|
||||
{ "summary": "Setup now includes an option for airgap installations" },
|
||||
{ "summary": "Playbook now works properly when installed in airgap mode" },
|
||||
{ "summary": "Added so-analyst script to create an analyst workstation with GNOME desktop, Chromium browser, Wireshark, and NetworkMiner" },
|
||||
{ "summary": "Upgraded Zeek to version 3.0.10 to address a recent security issue" },
|
||||
{ "summary": "Upgraded Docker to latest version" },
|
||||
{ "summary": "Re-worked IDSTools to make it easier to modify" },
|
||||
{ "summary": "Added so-* tools to the default path so you can now tab complete" },
|
||||
{ "summary": "so-status can now be run from a manager node to get the status of a remote node. Run salt <target> so.status" },
|
||||
{ "summary": "Salt now prevents states from running on a node that it shouldn't so you can't, for example, accidentally apply the elasticsearch state on a forward node" },
|
||||
{ "summary": "Added logic to check for Salt mine corruption and recover automatically" },
|
||||
{ "summary": "Collapsed Hunt filter icons and action links into a new quick action bar that will appear when a field value is clicked; actions include:<ul><li>Filtering the hunt query</li><li>Pivot to PCAP</li><li>Create an alert in TheHive</li><li>Google search for the value</li><li>Analyze the value on VirusTotal.com</li></ul>" },
|
||||
{ "summary": "Fixed minor bugs in Hunt user interface relating to most-recently used queries, tooltips, and more" },
|
||||
{ "summary": "so-user-add now automatically adds users to Fleet and TheHive (in addition to SOC)" },
|
||||
{ "summary": "Introduced so-user-disable and so-user-enable commands which allows administrators to lock out users that are no longer permitted to use Security Onion" },
|
||||
{ "summary": "Added icon to SOC Users list representing their active or locked out status" },
|
||||
{ "summary": "Removed User delete action from SOC interface in favor of disabling users for audit purposes" },
|
||||
{ "summary": "Prune old PCAP job data from sensors once the results are streamed back to the manager node" },
|
||||
{ "summary": "Hunt filtering to a specific value will search across all fields instead of only the field that was originally clicked" },
|
||||
{ "summary": "Limiting PCAP jobs to extract at most 2GB from a sensor to avoid users accidentally requesting unreasonably large PCAP via the web interface" },
|
||||
{ "summary": "so-test is back - run it to easily replay PCAPs and verify that all the components are working as expected (Requires Internet Access)" },
|
||||
{ "summary": "New Elasticsearch subfield (.security) based on the new community-driven analyzer from @neu5ron - <a href=https://github.com/neu5ron/es_stk>https://github.com/neu5ron/es_stk</a>" },
|
||||
{ "summary": "Playbook now uses the new .security subfield for case-insensitive wildcard searches" }
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,14 +63,14 @@
|
||||
"::socks": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "socks.name", "socks.request.host", "socks.request.port", "socks.status", "log.id.uid" ],
|
||||
"::software": ["soc_timestamp", "source.ip", "software.name", "software.type" ],
|
||||
"::ssh": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssh.version", "ssh.hassh_version", "ssh.direction", "ssh.client", "ssh.server", "log.id.uid" ],
|
||||
"::ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssl.cipher", "ssl.curve", "ssl.certificate.subject", "ssl.validation_status", "ssl.version", "log.id.uid" ],
|
||||
"::ssl": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "ssl.server_name", "ssl.certificate.subject", "ssl.validation_status", "ssl.version", "log.id.uid" ],
|
||||
"::syslog": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "syslog.facility", "network.protocol", "syslog.severity", "log.id.uid" ],
|
||||
"::tunnels": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "tunnel_type", "action", "log.id.uid" ],
|
||||
"::weird": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "weird.name", "log.id.uid" ],
|
||||
"::x509": ["soc_timestamp", "x509.certificate.subject", "x509.certificate.key.type", "x509.certificate.key.length", "x509.certificate.issuer", "log.id.id" ],
|
||||
":firewall:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "direction", "interface", "action", "reason" ],
|
||||
":osquery:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ],
|
||||
":ossec:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.name", "rule.level", "rule.category", "process.name", "user.name", "user.escalated", "location", "process.name" ],
|
||||
":ossec:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.name", "rule.level", "rule.category", "process.name", "user.name", "user.escalated", "location" ],
|
||||
":strelka:file": ["soc_timestamp", "scan.exiftool.OriginalFileName", "file.size", "hash.md5", "scan.exiftool.CompanyName", "scan.exiftool.Description", "scan.exiftool.Directory", "scan.exiftool.FileType", "scan.exiftool.FileOS", "log.id.fuid" ],
|
||||
":suricata:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "rule.gid", "rule.name", "rule.category", "rule.rev", "event.severity", "event.severity_label" ],
|
||||
":sysmon:": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "source.hostname", "event.dataset", "process.executable", "user.name" ],
|
||||
@@ -84,8 +84,8 @@
|
||||
{ "name": "NIDS Alerts", "description": "Show all NIDS alerts grouped by alert name", "query": "event.category: network AND event.dataset: alert | groupby rule.name"},
|
||||
{ "name": "Wazuh/OSSEC Alerts", "description": "Show all Wazuh alerts grouped by category", "query": "event.module:ossec AND event.dataset:alert | groupby rule.category"},
|
||||
{ "name": "Wazuh/OSSEC Commands", "description": "Show all Wazuh alerts grouped by command line", "query": "event.module:ossec AND event.dataset:alert | groupby process.command_line"},
|
||||
{ "name": "Wazuh/OSSEC Processes", "description": "Show all Wazuh alerts grouped by process name", "query": "event.module:ossec AND event.dataset:alert | groupby process.name"},
|
||||
{ "name": "Wazuh/OSSEC Users", "description": "Show all Wazuh alerts grouped by username", "query": "event.module:ossec AND event.dataset:alert | groupby user.name"},
|
||||
{ "name": "Wazuh/OSSEC Processes", "description": "Show all Wazuh alerts grouped by process name", "query": "event.module:ossec AND event.dataset:alert | groupby process.name.keyword"},
|
||||
{ "name": "Wazuh/OSSEC Users", "description": "Show all Wazuh alerts grouped by username", "query": "event.module:ossec AND event.dataset:alert | groupby user.escalated.keyword"},
|
||||
{ "name": "Sysmon Events", "description": "Show all Sysmon logs grouped by event type", "query": "event.module:sysmon | groupby event.dataset"},
|
||||
{ "name": "Sysmon Usernames", "description": "Show all Sysmon logs grouped by username", "query": "event.module:sysmon | groupby event.dataset, user.name.keyword"},
|
||||
{ "name": "Zeek Notice", "description": "Show notices from Zeek", "query": "event.dataset:notice | groupby notice.note notice.message"},
|
||||
@@ -114,7 +114,7 @@
|
||||
{ "name": "HTTP", "description": "HTTP grouped by user agent", "query": "event.dataset:http | groupby http.useragent"},
|
||||
{ "name": "HTTP", "description": "HTTP grouped by virtual host", "query": "event.dataset:http | groupby http.virtual_host"},
|
||||
{ "name": "HTTP", "description": "HTTP with exe downloads", "query": "event.dataset:http AND file.resp_mime_types:dosexec | groupby http.virtual_host"},
|
||||
{ "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.dataset:intel | groupby intel.indicator"},
|
||||
{ "name": "Intel", "description": "Intel framework hits grouped by indicator", "query": "event.dataset:intel | groupby intel.indicator.keyword"},
|
||||
{ "name": "IRC", "description": "IRC grouped by command", "query": "event.dataset:irc | groupby irc.command.type"},
|
||||
{ "name": "KERBEROS", "description": "KERBEROS grouped by service", "query": "event.dataset:kerberos | groupby kerberos.service"},
|
||||
{ "name": "MODBUS", "description": "MODBUS grouped by function", "query": "event.dataset:modbus | groupby modbus.function"},
|
||||
@@ -124,7 +124,7 @@
|
||||
{ "name": "PE", "description": "PE files list", "query": "event.dataset:pe | groupby file.machine file.os file.subsystem"},
|
||||
{ "name": "RADIUS", "description": "RADIUS grouped by username", "query": "event.dataset:radius | groupby user.name.keyword"},
|
||||
{ "name": "RDP", "description": "RDP grouped by client name", "query": "event.dataset:rdp | groupby client.name"},
|
||||
{ "name": "RFB", "description": "RFB grouped by desktop name", "query": "event.dataset:rfb | groupby rfb.desktop.name"},
|
||||
{ "name": "RFB", "description": "RFB grouped by desktop name", "query": "event.dataset:rfb | groupby rfb.desktop.name.keyword"},
|
||||
{ "name": "Signatures", "description": "Zeek signatures grouped by signature id", "query": "event.dataset:signatures | groupby signature_id"},
|
||||
{ "name": "SIP", "description": "SIP grouped by user agent", "query": "event.dataset:sip | groupby client.user_agent"},
|
||||
{ "name": "SMB_Files", "description": "SMB files grouped by action", "query": "event.dataset:smb_files | groupby file.action"},
|
||||
@@ -135,12 +135,18 @@
|
||||
{ "name": "SSH", "description": "SSH grouped by version", "query": "event.dataset:ssh | groupby ssh.version"},
|
||||
{ "name": "SSL", "description": "SSL grouped by version and server name", "query": "event.dataset:ssl | groupby ssl.version ssl.server_name"},
|
||||
{ "name": "SYSLOG", "description": "SYSLOG grouped by severity and facility ", "query": "event.dataset:syslog | groupby syslog.severity syslog.facility"},
|
||||
{ "name": "Tunnels", "description": "Tunnels grouped by action", "query": "event.dataset:tunnels | groupby event.action"},
|
||||
{ "name": "Tunnel", "description": "Tunnels grouped by action", "query": "event.dataset:tunnel | groupby event.action"},
|
||||
{ "name": "Weird", "description": "Zeek weird log grouped by name", "query": "event.dataset:weird | groupby weird.name"},
|
||||
{ "name": "x509", "description": "x.509 grouped by key length", "query": "event.dataset:x509 | groupby x509.certificate.key.length"},
|
||||
{ "name": "x509", "description": "x.509 grouped by issuer", "query": "event.dataset:x509 | groupby x509.certificate.issuer"},
|
||||
{ "name": "x509", "description": "x.509 grouped by subject", "query": "event.dataset:x509 | groupby x509.certificate.subject"},
|
||||
{ "name": "Firewall", "description": "Firewall events grouped by action", "query": "event_type:firewall | groupby action"}
|
||||
],
|
||||
"actions": [
|
||||
{ "name": "", "description": "actionPcapHelp", "icon": "fa-stream", "link": "/joblookup?esid={eventId}", "target": "" },
|
||||
{ "name": "", "description": "actionAlertHelp", "icon": "fa-bell", "link": "/soctopus/thehive/alert/{eventId}", "target": "_blank" },
|
||||
{ "name": "", "description": "actionGoogleHelp", "icon": "fab fa-google", "link": "https://www.google.com/search?q={value}", "target": "_blank" },
|
||||
{ "name": "actionVirusTotal", "description": "actionVirusTotalHelp", "icon": "", "link": "https://www.virustotal.com/gui/search/{value}", "target": "_blank" }
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'soc' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
@@ -98,3 +103,11 @@ so-kratos:
|
||||
- 0.0.0.0:4434:4434
|
||||
- watch:
|
||||
- file: /opt/so/conf/kratos
|
||||
|
||||
{% else %}
|
||||
|
||||
soc_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: soc_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -65,6 +65,7 @@ playbook_ext_url = https://{{MANAGER}}/playbook
|
||||
playbook_key = de6639318502476f2fa5aa06f43f51fb389a3d7f
|
||||
playbook_verifycert = no
|
||||
playbook_unit_test_index = playbook-testing
|
||||
playbook_rulesets = windows
|
||||
|
||||
[log]
|
||||
logfile = /var/log/SOCtopus/soctopus.log
|
||||
|
||||
@@ -1,8 +1,14 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'soctopus' in top_states %}
|
||||
|
||||
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{%- set MANAGER_URL = salt['pillar.get']('global:url_base', '') %}
|
||||
{%- set MANAGER_IP = salt['pillar.get']('global:managerip', '') %}
|
||||
{% set MANAGER_URL = salt['pillar.get']('global:url_base', '') %}
|
||||
{% set MANAGER_IP = salt['pillar.get']('global:managerip', '') %}
|
||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
|
||||
soctopusdir:
|
||||
file.directory:
|
||||
@@ -59,7 +65,18 @@ so-soctopus:
|
||||
- /opt/so/log/soctopus/:/var/log/SOCtopus/:rw
|
||||
- /opt/so/rules/elastalert/playbook:/etc/playbook-rules:rw
|
||||
- /opt/so/conf/navigator/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
- /nsm/repo/rules/sigma:/soctopus/sigma
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
- 0.0.0.0:7000:7000
|
||||
- extra_hosts:
|
||||
- {{MANAGER_URL}}:{{MANAGER_IP}}
|
||||
|
||||
{% else %}
|
||||
|
||||
soctopus_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: soctopus_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,3 +1,8 @@
|
||||
{% set show_top = salt['state.show_top']() %}
|
||||
{% set top_states = show_top.values() | join(', ') %}
|
||||
|
||||
{% if 'ssl' in top_states %}
|
||||
|
||||
{% set manager = salt['grains.get']('master') %}
|
||||
{% set managerip = salt['pillar.get']('global:managerip', '') %}
|
||||
{% set HOSTNAME = salt['grains.get']('host') %}
|
||||
@@ -8,10 +13,10 @@
|
||||
{% set CUSTOM_FLEET_HOSTNAME = salt['pillar.get']('global:fleet_custom_hostname', None) %}
|
||||
|
||||
{% if grains.id.split('_')|last in ['manager', 'eval', 'standalone', 'import'] %}
|
||||
{% set trusttheca_text = salt['mine.get'](grains.id, 'x509.get_pem_entries')[grains.id]['/etc/pki/ca.crt']|replace('\n', '') %}
|
||||
{% set trusttheca_text = salt['cp.get_file_str']('/etc/pki/ca.crt')|replace('\n', '') %}
|
||||
{% set ca_server = grains.id %}
|
||||
{% else %}
|
||||
{% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %}
|
||||
{% set x509dict = salt['mine.get']('*', 'x509.get_pem_entries') %}
|
||||
{% for host in x509dict %}
|
||||
{% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
|
||||
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
|
||||
@@ -570,3 +575,11 @@ elastickeyperms:
|
||||
- group: 930
|
||||
|
||||
{%- endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
ssl_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: ssl_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user