Merge remote-tracking branch 'upstream/master' into fleet-fixes

This commit is contained in:
Josh Brower
2019-11-26 07:01:08 -05:00
38 changed files with 3100 additions and 200 deletions

View File

@@ -0,0 +1,24 @@
from os import path
import subprocess
def check():
os = __grains__['os']
retval = 'False'
if os == 'Ubuntu':
if path.exists('/var/run/reboot-required'):
retval = 'True'
elif os == 'CentOS':
cmd = 'needs-restarting -r > /dev/null 2>&1'
try:
needs_restarting = subprocess.check_call(cmd, shell=True)
except subprocess.CalledProcessError:
retval = 'True'
else:
retval = 'Unsupported OS: %s' % os
return retval

View File

@@ -39,10 +39,10 @@ pki_private_key:
- require:
- file: /etc/pki
mine.send:
send_x509_pem_entries_to_mine:
module.run:
- func: x509.get_pem_entries
- kwargs:
glob_path: /etc/pki/ca.crt
- mine.send:
- func: x509.get_pem_entries
- glob_path: /etc/pki/ca.crt
- onchanges:
- x509: /etc/pki/ca.crt

View File

@@ -141,6 +141,8 @@ so-core:
- watch:
- file: /opt/so/conf/nginx/nginx.conf
# If master or eval, install Grafana/Telegraf/Influx
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' and GRAFANA == 1 %}
# Add Telegraf to monitor all the things.
tgraflogdir:
file.directory:
@@ -213,9 +215,6 @@ so-telegraf:
- /opt/so/conf/telegraf/etc/telegraf.conf
- /opt/so/conf/telegraf/scripts
# If its a master or eval lets install the back end for now
{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' and GRAFANA == 1 %}
# Influx DB
influxconfdir:
file.directory:
@@ -316,7 +315,7 @@ grafanaconf:
- source: salt://common/grafana/etc
{% if salt['pillar.get']('mastertab', False) %}
{%- for SN, SNDATA in salt['pillar.get']('mastertab', {}).iteritems() %}
{%- for SN, SNDATA in salt['pillar.get']('mastertab', {}).items() %}
dashboard-master:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/master/{{ SN }}-Master.json
@@ -337,7 +336,7 @@ dashboard-master:
{% endif %}
{% if salt['pillar.get']('sensorstab', False) %}
{%- for SN, SNDATA in salt['pillar.get']('sensorstab', {}).iteritems() %}
{%- for SN, SNDATA in salt['pillar.get']('sensorstab', {}).items() %}
dashboard-{{ SN }}:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/forward_nodes/{{ SN }}-Sensor.json
@@ -358,7 +357,7 @@ dashboard-{{ SN }}:
{% endif %}
{% if salt['pillar.get']('nodestab', False) %}
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).iteritems() %}
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
dashboard-{{ SN }}:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/storage_nodes/{{ SN }}-Node.json
@@ -379,7 +378,7 @@ dashboard-{{ SN }}:
{% endif %}
{% if salt['pillar.get']('evaltab', False) %}
{%- for SN, SNDATA in salt['pillar.get']('evaltab', {}).iteritems() %}
{%- for SN, SNDATA in salt['pillar.get']('evaltab', {}).items() %}
dashboard-{{ SN }}:
file.managed:
- name: /opt/so/conf/grafana/grafana_dashboards/eval/{{ SN }}-Node.json

View File

@@ -185,6 +185,18 @@ http {
proxy_set_header Proxy "";
}
location /cyberchef/ {
proxy_pass http://{{ masterip }}:9080/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
}
location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/;

View File

@@ -187,6 +187,18 @@ http {
proxy_set_header Proxy "";
}
location /cyberchef/ {
proxy_pass http://{{ masterip }}:9080/;
proxy_read_timeout 90;
proxy_connect_timeout 90;
proxy_http_version 1.1; # this is essential for chunked responses to work
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header Proxy "";
}
location /soctopus/ {
proxy_pass http://{{ masterip }}:7000/;

53
salt/cyberchef/init.sls Normal file
View File

@@ -0,0 +1,53 @@
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Create the cyberchef group
cyberchefgroup:
group.present:
- name: cyberchef
- gid: 946
# Add the cyberchef user
cyberchef:
user.present:
- uid: 946
- gid: 946
- home: /opt/so/conf/cyberchef
cyberchefconfdir:
file.directory:
- name: /opt/so/conf/cyberchef
- user: 946
- group: 939
- makedirs: True
cybercheflog:
file.directory:
- name: /opt/so/log/cyberchef
- user: 946
- group: 946
- makedirs: True
so-cyberchefimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-cyberchef:HH1.1.3
so-cyberchef:
docker_container.running:
- require:
- so-cyberchefimage
- image: docker.io/soshybridhunter/so-cyberchef:HH1.1.3
- port_bindings:
- 0.0.0.0:9080:8080

View File

@@ -8,6 +8,11 @@ rules_folder: /etc/elastalert/rules/
# the rules directory - true or false
scan_subdirectories: true
# Do not disable a rule when an uncaught exception is thrown -
# This setting should be tweaked once the following issue has been fixed
# https://github.com/Security-Onion-Solutions/securityonion-saltstack/issues/98
disable_rules_on_error: false
# How often ElastAlert will query Elasticsearch
# The unit can be anything from weeks to seconds
run_every:

View File

@@ -15,7 +15,7 @@ timeframe:
buffer_time:
minutes: 10
allow_buffer_time_overlap: true
query_key: alert
query_key: ["alert", "ips"]
realert:
days: 1
@@ -36,11 +36,11 @@ hive_proxies:
hive_alert_config:
title: '{match[alert]}'
type: 'external'
type: 'NIDS'
source: 'SecurityOnion'
description: "`NIDS Dashboard:` \n\n <https://{{es}}/kibana/app/kibana#/dashboard/ed6f7e20-e060-11e9-8f0c-2ddbf5ed9290?_g=(refreshInterval:(display:Off,pause:!f,value:0),time:(from:now-24h,mode:quick,to:now))&_a=(columns:!(_source),index:'*:logstash-*',interval:auto,query:(query_string:(analyze_wildcard:!t,query:'sid:{match[sid]}')),sort:!('@timestamp',desc))> \n\n `IPs: `{match[source_ip]}:{match[source_port]} --> {match[destination_ip]}:{match[destination_port]} \n\n `Signature:` {match[rule_signature]}"
severity: 2
tags: ['elastalert', 'SecurityOnion', 'NIDS','{match[sid]}']
tags: ['{match[sid]}','{match[source_ip]}','{match[destination_ip]}']
tlp: 3
status: 'New'
follow: True

View File

@@ -276,6 +276,18 @@ enable_master_cortex_9001_{{ip}}:
- position: 1
- save: True
enable_master_cyberchef_9080_{{ip}}:
iptables.insert:
- table: filter
- chain: DOCKER-USER
- jump: ACCEPT
- proto: tcp
- source: {{ ip }}
- dport: 9080
- position: 1
- save: True
{% endfor %}
# Make it so all the minions can talk to salt and update etc.

View File

@@ -61,13 +61,13 @@ fleetdbpriv:
so-fleetimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-fleet:HH1.1.0
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-fleet:HH1.1.3
so-fleet:
docker_container.running:
- require:
- so-fleetimage
- image: docker.io/soshybridhunter/so-fleet:HH1.1.0
- image: docker.io/soshybridhunter/so-fleet:HH1.1.3
- hostname: so-fleet
- port_bindings:
- 0.0.0.0:8080:8080
@@ -83,6 +83,7 @@ so-fleet:
- KOLIDE_AUTH_JWT_KEY=thisisatest
- KOLIDE_OSQUERY_STATUS_LOG_FILE=/var/log/osquery/status.log
- KOLIDE_OSQUERY_RESULT_LOG_FILE=/var/log/osquery/result.log
- KOLIDE_SERVER_URL_PREFIX=/fleet
- binds:
- /etc/pki/fleet.key:/ssl/server.key:ro
- /etc/pki/fleet.crt:/ssl/server.cert:ro

View File

@@ -1,5 +1,5 @@
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
# Secret Key
# The secret key is used to secure cryptographic functions.

View File

@@ -3,6 +3,9 @@
{%- set CORTEXUSER = salt['pillar.get']('static:cortexuser', '') %}
{%- set CORTEXPASSWORD = salt['pillar.get']('static:cortexpassword', '') %}
{%- set CORTEXKEY = salt['pillar.get']('static:cortexkey', '') %}
{%- set CORTEXORGNAME = salt['pillar.get']('static:cortexorgname', '') %}
{%- set CORTEXORGUSER = salt['pillar.get']('static:cortexorguser', '') %}
{%- set CORTEXORGUSERKEY = salt['pillar.get']('static:cortexorguserkey', '') %}
cortex_init(){
sleep 60
@@ -10,17 +13,34 @@ cortex_init(){
CORTEX_USER="{{CORTEXUSER}}"
CORTEX_PASSWORD="{{CORTEXPASSWORD}}"
CORTEX_KEY="{{CORTEXKEY}}"
CORTEX_ORG_NAME="{{CORTEXORGNAME}}"
CORTEX_ORG_DESC="{{CORTEXORGNAME}} organization created by Security Onion setup"
CORTEX_ORG_USER="{{CORTEXORGUSER}}"
CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
# Migrate DB
curl -v -k -XPOST "https://$CORTEX_IP:/cortex/api/maintenance/migrate"
# Create intial Cortex user
curl -v -k "https://$CORTEX_IP/cortex/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"read\",\"analyze\",\"orgadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
# Create intial Cortex superadmin
curl -v -k "https://$CORTEX_IP/cortex/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
# Create user-supplied org
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
# Create user-supplied org user
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
# Enable URLScan.io Analyzer
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
# Enable Cert PassiveDNS Analyzer
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
# Revoke $CORTEX_USER key
curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" "https:///$CORTEX_IP/api/user/$CORTEX_USER/key"
# Update SOCtopus config with apikey value
#sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG

View File

@@ -13,7 +13,7 @@
#/usr/share/logstash/pipeline.so/0002_input_windows_json.conf
#/usr/share/logstash/pipeline.so/0003_input_syslog.conf
#/usr/share/logstash/pipeline.so/0005_input_suricata.conf
/usr/share/logstash/pipeline.dynamic/0006_input_beats.conf
#/usr/share/logstash/pipeline.dynamic/0006_input_beats.conf
/usr/share/logstash/pipeline.so/0007_input_import.conf
/usr/share/logstash/pipeline.dynamic/0010_input_hhbeats.conf
#/usr/share/logstash/pipeline.so/1000_preprocess_log_elapsed.conf

View File

@@ -9,23 +9,6 @@ input {
}
}
filter {
if [type] == "ids" or [type] =~ "bro" {
mutate {
rename => { "host" => "beat_host" }
remove_tag => ["beat"]
add_field => { "sensor_name" => "%{[beat][name]}" }
add_field => { "syslog-host_from" => "%{[beat][name]}" }
remove_field => [ "beat", "prospector", "input", "offset" ]
}
}
if [type] =~ "ossec" {
mutate {
rename => { "host" => "beat_host" }
remove_tag => ["beat"]
add_field => { "syslog-host_from" => "%{[beat][name]}" }
remove_field => [ "beat", "prospector", "input", "offset" ]
}
}
if [type] == "osquery" {
mutate {
rename => { "host" => "beat_host" }

View File

@@ -0,0 +1,48 @@
#!/bin/bash
MASTER={{ MASTER }}
VERSION="HH1.1.3"
TRUSTED_CONTAINERS=( \
"so-core:$VERSION" \
"so-cyberchef:$VERSION" \
"so-acng:$VERSION" \
"so-sensoroni:$VERSION" \
"so-fleet:$VERSION" \
"so-soctopus:$VERSION" \
"so-steno:$VERSION" \
"so-playbook:$VERSION" \
"so-thehive-cortex:$VERSION" \
"so-thehive:$VERSION" \
"so-thehive-es:$VERSION" \
"so-wazuh:$VERSION" \
"so-kibana:$VERSION" \
"so-auth-ui:$VERSION" \
"so-auth-api:$VERSION" \
"so-elastalert:$VERSION" \
"so-navigator:$VERSION" \
"so-filebeat:$VERSION" \
"so-suricata:$VERSION" \
"so-logstash:$VERSION" \
"so-bro:$VERSION" \
"so-idstools:$VERSION" \
"so-fleet-launcher:$VERSION" \
"so-freqserver:$VERSION" \
"so-influxdb:$VERSION" \
"so-grafana:$VERSION" \
"so-telegraf:$VERSION" \
"so-redis:$VERSION" \
"so-mysql:$VERSION" \
"so-curtor:$VERSION" \
"so-elasticsearch:$VERSION" \
"so-domainstats:$VERSION" \
"so-tcpreplay:$VERSION" \
)
for i in "${TRUSTED_CONTAINERS[@]}"
do
# Pull down the trusted docker image
docker pull --disable-content-trust=false docker.io/soshybridhunter/$i
# Tag it with the new registry destination
docker tag soshybridhunter/$i $MASTER:5000/soshybridhunter/$i
docker push $MASTER:5000/soshybridhunter/$i
done

View File

@@ -17,6 +17,15 @@
{% if masterproxy == 1 %}
socore_own_saltstack:
file.directory:
- name: /opt/so/saltstack
- user: socore
- group: socore
- recurse:
- user
- group
# Create the directories for apt-cacher-ng
aptcacherconfdir:
file.directory:

View File

@@ -0,0 +1,23 @@
{% set needs_restarting_check = salt['mine.get']('*', 'needs_restarting.check', tgt_type='glob') -%}
{%- if needs_restarting_check %}
{%- set minions_need_restarted = [] %}
{%- for minion, need_restarted in needs_restarting_check | dictsort() %}
{%- if need_restarted == 'True' %}
{% do minions_need_restarted.append(minion) %}
{%- endif %}
{%- endfor -%}
{%- if minions_need_restarted | length > 0 %}
*****************************************************************************************
* The following nodes in your Security Onion grid need restarted due to package updates *
*****************************************************************************************
{% for minion in minions_need_restarted -%}
{{ minion }}
{% endfor -%}
{%- endif -%}
{%- endif -%}

5
salt/motd/init.sls Normal file
View File

@@ -0,0 +1,5 @@
package_update_reboot_required_motd:
file.managed:
- name: /etc/motd
- source: salt://motd/files/package_update_reboot_required.jinja
- template: jinja

View File

@@ -0,0 +1,5 @@
needs_restarting:
module.run:
- mine.send:
- func: needs_restarting.check
- order: last

10
salt/patch/os/init.sls Normal file
View File

@@ -0,0 +1,10 @@
include:
- patch.needs_restarting
{% if grains.os == "CentOS" %}
- yum.packages
{% endif %}
patch_os:
pkg.uptodate:
- name: patch_os
- refresh: True

View File

@@ -0,0 +1,76 @@
{% if salt['pillar.get']('patch:os:schedule_name') %}
{% set patch_os_pillar = salt['pillar.get']('patch:os') %}
{% set schedule_name = patch_os_pillar.schedule_name %}
{% set splay = patch_os_pillar.get('splay', 300) %}
{% if schedule_name != 'manual' and schedule_name != 'auto' %}
{% import_yaml "patch/os/schedules/"~schedule_name~".yml" as os_schedule %}
{% if patch_os_pillar.enabled %}
patch_os_schedule:
schedule.present:
- function: state.sls
- job_args:
- patch.os
- when:
{% for days in os_schedule.patch.os.schedule %}
{% for day, times in days.items() %}
{% for time in times %}
- {{day}} {{time}}
{% endfor %}
{% endfor %}
{% endfor %}
- splay: {{splay}}
- return_job: True
{% else %}
disable_patch_os_schedule:
schedule.disabled:
- name: patch_os_schedule
{% endif %}
{% elif schedule_name == 'auto' %}
{% if patch_os_pillar.enabled %}
patch_os_schedule:
schedule.present:
- function: state.sls
- job_args:
- patch.os
- hours: 8
- splay: {{splay}}
- return_job: True
{% else %}
disable_patch_os_schedule:
schedule.disabled:
- name: patch_os_schedule
{% endif %}
{% elif schedule_name == 'manual' %}
remove_patch_os_schedule:
schedule.absent:
- name: patch_os_schedule
{% endif %}
{% else %}
no_patch_os_schedule_name_set:
test.fail_without_changes:
- name: "Set a pillar value for patch:os:schedule_name in this minion's .sls file. If an OS patch schedule is not listed as enabled in show_schedule output below, then OS patches will need to be applied manually until this is corrected."
show_patch_os_schedule:
module.run:
- schedule.is_enabled:
- name: patch_os_schedule
{% endif %}

View File

@@ -0,0 +1,10 @@
patch:
os:
schedule:
- Tuesday:
- '15:00'
- Thursday:
- '03:00'
- Saturday:
- '01:00'
- '15:00'

View File

@@ -96,13 +96,13 @@ stenolog:
so-stenoimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-steno:HH1.1.1
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-steno:HH1.1.3
so-steno:
docker_container.running:
- require:
- so-stenoimage
- image: docker.io/soshybridhunter/so-steno:HH1.1.1
- image: docker.io/soshybridhunter/so-steno:HH1.1.3
- network_mode: host
- privileged: True
- port_bindings:

Binary file not shown.

View File

@@ -11,9 +11,9 @@ playbookdb:
playbookwebhook:
module.run:
- name: sqlite3.modify
- db: /opt/so/conf/playbook/redmine.db
- sql: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1"
- sqlite3.modify:
- db: /opt/so/conf/playbook/redmine.db
- sql: "update webhooks set url = 'http://{{MASTERIP}}:7000/playbook/webhook' where project_id = 1"
navigatorconfig:
file.managed:
@@ -26,13 +26,13 @@ navigatorconfig:
so-playbookimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-playbook:HH1.1.1
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-playbook:HH1.1.3
so-playbook:
docker_container.running:
- require:
- so-playbookimage
- image: docker.io/soshybridhunter/so-playbook:HH1.1.1
- image: docker.io/soshybridhunter/so-playbook:HH1.1.3
- hostname: playbook
- name: so-playbook
- binds:

View File

@@ -29,19 +29,19 @@ sensoronisync:
so-sensoroniimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-sensoroni:HH1.1.1
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-sensoroni:HH1.1.3
so-sensoroni:
docker_container.running:
- require:
- so-sensoroniimage
- image: docker.io/soshybridhunter/so-sensoroni:HH1.1.1
- image: docker.io/soshybridhunter/so-sensoroni:HH1.1.3
- hostname: sensoroni
- name: so-sensoroni
- binds:
- /nsm/sensoroni/jobs:/opt/sensoroni/jobs:rw
- /opt/so/conf/sensoroni/sensoroni.json:/opt/sensoroni/sensoroni.json:ro
- /opt/so/log/sensoroni/:/opt/sensoroni/log/:rw
- /opt/so/log/sensoroni/:/opt/sensoroni/logs/:rw
- port_bindings:
- 0.0.0.0:9822:9822
- watch:

View File

@@ -50,4 +50,4 @@ playbook_url = http://{{ip}}:3200/playbook
playbook_key = a4a34538782804adfcb8dfae96262514ad70c37c
[log]
logfile = /tmp/soctopus.log
logfile = /var/log/SOCtopus/soctopus.log

View File

@@ -1,23 +1,6 @@
{% set es = salt['pillar.get']('static:masterip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
es_host: {{es}}
es_port: 9200
name: Alert-Name
type: frequency
index: "*:logstash-*"
num_events: 1
timeframe:
minutes: 10
buffer_time:
minutes: 10
allow_buffer_time_overlap: true
filter:
- query:
query_string:
query: 'select from test'
alert: modules.so.thehive.TheHiveAlerter
hive_connection:
@@ -30,11 +13,11 @@ hive_proxies:
hive_alert_config:
title: '{rule[name]}'
type: 'external'
type: 'playbook'
source: 'SecurityOnion'
description: '`Data:` {match[message]}'
description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` <https://{{es}}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{match[_id]}'),sort:!('@timestamp',desc))> \n\n `Raw Data:` {match[message]}"
severity: 2
tags: ['elastalert', 'SecurityOnion']
tags: ['playbook']
tlp: 3
status: 'New'
follow: True

View File

@@ -1,23 +1,6 @@
{% set es = salt['pillar.get']('static:masterip', '') %}
{% set hivehost = salt['pillar.get']('static:masterip', '') %}
{% set hivekey = salt['pillar.get']('static:hivekey', '') %}
es_host: {{es}}
es_port: 9200
name: Alert-Name
type: frequency
index: "*:logstash-*"
num_events: 1
timeframe:
minutes: 10
buffer_time:
minutes: 10
allow_buffer_time_overlap: true
filter:
- query:
query_string:
query: 'select from test'
alert: modules.so.thehive.TheHiveAlerter
hive_connection:
@@ -28,20 +11,22 @@ hive_proxies:
http: ''
https: ''
hive_alert_config:
title: '{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}'
type: 'external'
source: 'SecurityOnion'
description: '`Hostname:` __{match[osquery][hostname]}__ `Live Query:`__[Pivot Link](https://{{es}}/fleet/queries/new?host_uuids={match[osquery][LiveQuery]})__ `Pack:` __{match[osquery][name]}__ `Data:` {match[osquery][columns]}'
severity: 2
tags: ['elastalert', 'SecurityOnion']
tlp: 3
status: 'New'
follow: True
caseTemplate: '5000'
hive_observable_data_mapping:
- ip: '{match[osquery][EndpointIP1]}'
- ip: '{match[osquery][EndpointIP2]}'
- other: '{match[osquery][hostIdentifier]}'
- other: '{match[osquery][hostname]}'
hive_alert_config:
title: '{rule[name]} -- {match[osquery][hostname]} -- {match[osquery][name]}'
type: 'osquery'
source: 'SecurityOnion'
description: "`Play:` https://{{es}}/playbook/issues/6000 \n\n `View Event:` <https://{{es}}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{match[_id]}'),sort:!('@timestamp',desc))> \n\n `Hostname:` __{match[osquery][hostname]}__ `Live Query:`__[Pivot Link](https://{{es}}/fleet/queries/new?host_uuids={match[osquery][LiveQuery]})__ `Pack:` __{match[osquery][name]}__ `Data:` {match[osquery][columns]}"
severity: 2
tags: ['playbook','osquery']
tlp: 3
status: 'New'
follow: True
caseTemplate: '5000'

View File

@@ -13,6 +13,12 @@ soctopussync:
- group: 939
- template: jinja
soctopuslogdir:
file.directory:
- name: /opt/so/log/soctopus
- user: 939
- group: 939
playbookrulesdir:
file.directory:
- name: /opt/so/rules/elastalert/playbook
@@ -40,17 +46,18 @@ navigatordefaultlayer:
so-soctopusimage:
cmd.run:
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-soctopus:HH1.1.1
- name: docker pull --disable-content-trust=false docker.io/soshybridhunter/so-soctopus:HH1.1.3
so-soctopus:
docker_container.running:
- require:
- so-soctopusimage
- image: docker.io/soshybridhunter/so-soctopus:HH1.1.1
- image: docker.io/soshybridhunter/so-soctopus:HH1.1.3
- hostname: soctopus
- name: so-soctopus
- binds:
- /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro
- /opt/so/log/soctopus/:/var/log/SOCtopus/:rw
- /opt/so/rules/elastalert/playbook:/etc/playbook-rules:rw
- /opt/so/conf/playbook/nav_layer_playbook.json:/etc/playbook/nav_layer_playbook.json:rw
- port_bindings:

View File

@@ -5,6 +5,11 @@
{%- set THEHIVE = salt['pillar.get']('master:thehive', '0') -%}
{%- set PLAYBOOK = salt['pillar.get']('master:playbook', '0') -%}
base:
'*':
- patch.os.schedule
- patch.needs_restarting
- motd
'G@role:so-sensor':
- ca
- ssl
@@ -40,6 +45,7 @@ base:
- suricata
- bro
- curator
- cyberchef
- elastalert
{%- if OSQUERY != 0 %}
- fleet
@@ -66,6 +72,7 @@ base:
- ca
- ssl
- common
- cyberchef
- sensoroni
- firewall
- master

View File

@@ -31,6 +31,6 @@ echo "Applying cross cluster search config..."
# Add all the storage nodes to cross cluster searching.
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).iteritems() %}
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
curl -XPUT http://{{ ES }}:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"{{ SN }}": {"skip_unavailable": "true", "seeds": ["{{ SNDATA.ip }}:9300"]}}}}}'
{%- endfor %}

3
salt/yum/packages.sls Normal file
View File

@@ -0,0 +1,3 @@
install_yum_utils:
pkg.installed:
- name: yum-utils