More hive remova

This commit is contained in:
Mike Reeves
2022-03-14 14:51:39 -04:00
parent ff30f572d7
commit b83fec6fd2
22 changed files with 14 additions and 914 deletions

View File

@@ -17,5 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-stop cortex $1 echo "TheHive and its components are no longer part of Security Onion"
/usr/sbin/so-start thehive $1

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-start thehive $1 echo "TheHive and its components are no longer part of Security Onion"

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-stop cortex $1 echo "TheHive and its components are no longer part of Security Onion"

View File

@@ -17,38 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
usage() { echo "TheHive and its components are no longer part of Security Onion"
echo "Usage: $0 <new-user-name>"
echo ""
echo "Adds a new user to Cortex. The new password will be read from STDIN."
exit 1
}
if [ $# -ne 1 ]; then
usage
fi
USER=$1
CORTEX_KEY=$(lookup_pillar cortexorguserkey)
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
CORTEX_ORG_NAME=$(lookup_pillar cortexorgname)
CORTEX_USER=$USER
# Read password for new user from stdin
test -t 0
if [[ $? == 0 ]]; then
echo "Enter new password:"
fi
read -rs CORTEX_PASS
# Create new user in Cortex
resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }")
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully added user to Cortex."
else
echo "Unable to add user to Cortex; user might already exist."
echo $resp
exit 2
fi

View File

@@ -17,41 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
usage() { echo "TheHive and its components are no longer part of Security Onion"
echo "Usage: $0 <user-name> <true|false>"
echo ""
echo "Enables or disables a user in Cortex."
exit 1
}
if [ $# -ne 2 ]; then
usage
fi
USER=$1
CORTEX_KEY=$(lookup_pillar cortexorguserkey)
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
CORTEX_USER=$USER
case "${2^^}" in
FALSE | NO | 0)
CORTEX_STATUS=Locked
;;
TRUE | YES | 1)
CORTEX_STATUS=Ok
;;
*)
usage
;;
esac
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }")
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully updated user in Cortex."
else
echo "Failed to update user in Cortex."
echo $resp
exit 2
fi

View File

@@ -17,5 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-stop thehive-es $1 echo "TheHive and its components are no longer part of Security Onion"
/usr/sbin/so-start thehive $1

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-start thehive $1 echo "TheHive and its components are no longer part of Security Onion"

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-stop thehive-es $1 echo "TheHive and its components are no longer part of Security Onion"

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-restart thehive $1 echo "TheHive and its components are no longer part of Security Onion"

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-start thehive $1 echo "TheHive and its components are no longer part of Security Onion"

View File

@@ -17,4 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
/usr/sbin/so-stop thehive $1 echo "TheHive and its components are no longer part of Security Onion"

View File

@@ -17,38 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
usage() { echo "TheHive and its components are no longer part of Security Onion"
echo "Usage: $0 <new-user-name>"
echo ""
echo "Adds a new user to TheHive. The new password will be read from STDIN."
exit 1
}
if [ $# -ne 1 ]; then
usage
fi
USER=$1
THEHIVE_KEY=$(lookup_pillar hivekey)
THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api"
THEHIVE_USER=$USER
# Read password for new user from stdin
test -t 0
if [[ $? == 0 ]]; then
echo "Enter new password:"
fi
read -rs THEHIVE_PASS
check_password_and_exit "$THEHIVE_PASS"
# Create new user in TheHive
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully added user to TheHive"
else
echo "Unable to add user to TheHive; user might already exist"
echo $resp
exit 2
fi

View File

@@ -17,41 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
usage() { echo "TheHive and its components are no longer part of Security Onion"
echo "Usage: $0 <user-name> <true|false>"
echo ""
echo "Enables or disables a user in TheHive."
exit 1
}
if [ $# -ne 2 ]; then
usage
fi
USER=$1
THEHIVE_KEY=$(lookup_pillar hivekey)
THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api"
THEHIVE_USER=$USER
case "${2^^}" in
FALSE | NO | 0)
THEHIVE_STATUS=Locked
;;
TRUE | YES | 1)
THEHIVE_STATUS=Ok
;;
*)
usage
;;
esac
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user/${THEHIVE_USER}" -d "{\"status\":\"${THEHIVE_STATUS}\" }")
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
echo "Successfully updated user in TheHive"
else
echo "Failed to update user in TheHive"
echo "$resp"
exit 2
fi

View File

@@ -17,41 +17,4 @@
. /usr/sbin/so-common . /usr/sbin/so-common
usage() { echo "TheHive and its components are no longer part of Security Onion"
echo "Usage: $0 <user-name>"
echo ""
echo "Update password for an existing TheHive user. The new password will be read from STDIN."
exit 1
}
if [ $# -ne 1 ]; then
usage
fi
USER=$1
THEHIVE_KEY=$(lookup_pillar hivekey)
THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api"
THEHIVE_USER=$USER
# Read password for new user from stdin
test -t 0
if [[ $? == 0 ]]; then
echo "Enter new password:"
fi
read -rs THEHIVE_PASS
if ! check_password "$THEHIVE_PASS"; then
echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password."
exit 2
fi
# Change password for user in TheHive
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user/${THEHIVE_USER}/password/set" -d "{\"password\" : \"$THEHIVE_PASS\"}")
if [[ -z "$resp" ]]; then
echo "Successfully updated TheHive user password"
else
echo "Unable to update TheHive user password"
echo $resp
exit 2
fi

View File

@@ -1,219 +0,0 @@
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %}
{%- set HIVEPLAYSECRET = salt['pillar.get']('global:hiveplaysecret', '') %}
# Secret Key
# The secret key is used to secure cryptographic functions.
# WARNING: If you deploy your application on several servers, make sure to use the same key.
play.http.secret.key="{{ HIVEPLAYSECRET }}"
play.http.context=/thehive/
search.uri = "http://{{ MANAGERIP }}:9400"
# Elasticsearch
search {
# Name of the index
index = the_hive
# Name of the Elasticsearch cluster
cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MANAGERIP }}:9500"]
#search.uri = "http://{{ MANAGERIP }}:9500"
# Scroll keepalive
keepalive = 1m
# Size of the page for scroll
pagesize = 50
# Number of shards
nbshards = 5
# Number of replicas
nbreplicas = 0
# Arbitrary settings
settings {
# Maximum number of nested fields
mapping.nested_fields.limit = 100
}
### XPack SSL configuration
# Username for XPack authentication
#username
# Password for XPack authentication
#password
# Enable SSL to connect to ElasticSearch
ssl.enabled = false
# Path to certificate authority file
#ssl.ca
# Path to certificate file
#ssl.certificate
# Path to key file
#ssl.key
### SearchGuard configuration
# Path to JKS file containing client certificate
#guard.keyStore.path
# Password of the keystore
#guard.keyStore.password
# Path to JKS file containing certificate authorities
#guard.trustStore.path
## Password of the truststore
#guard.trustStore.password
# Enforce hostname verification
#guard.hostVerification
# If hostname verification is enabled specify if hostname should be resolved
#guard.hostVerificationResolveHostname
}
# Authentication
auth {
# "provider" parameter contains authentication provider. It can be multi-valued (useful for migration)
# available auth types are:
# services.LocalAuthSrv : passwords are stored in user entity (in Elasticsearch). No configuration is required.
# ad : use ActiveDirectory to authenticate users. Configuration is under "auth.ad" key
# ldap : use LDAP to authenticate users. Configuration is under "auth.ldap" key
provider = [local]
# By default, basic authentication is disabled. You can enable it by setting "method.basic" to true.
#method.basic = true
ad {
# The Windows domain name in DNS format. This parameter is required if you do not use
# 'serverNames' below.
#domainFQDN = "mydomain.local"
# Optionally you can specify the host names of the domain controllers instead of using 'domainFQDN
# above. If this parameter is not set, TheHive uses 'domainFQDN'.
#serverNames = [ad1.mydomain.local, ad2.mydomain.local]
# The Windows domain name using short format. This parameter is required.
#domainName = "MYDOMAIN"
# If 'true', use SSL to connect to the domain controller.
#useSSL = true
}
ldap {
# The LDAP server name or address. The port can be specified using the 'host:port'
# syntax. This parameter is required if you don't use 'serverNames' below.
#serverName = "ldap.mydomain.local:389"
# If you have multiple LDAP servers, use the multi-valued setting 'serverNames' instead.
#serverNames = [ldap1.mydomain.local, ldap2.mydomain.local]
# Account to use to bind to the LDAP server. This parameter is required.
#bindDN = "cn=thehive,ou=services,dc=mydomain,dc=local"
# Password of the binding account. This parameter is required.
#bindPW = "***secret*password***"
# Base DN to search users. This parameter is required.
#baseDN = "ou=users,dc=mydomain,dc=local"
# Filter to search user in the directory server. Please note that {0} is replaced
# by the actual user name. This parameter is required.
#filter = "(cn={0})"
# If 'true', use SSL to connect to the LDAP directory server.
#useSSL = true
}
}
# Maximum time between two requests without requesting authentication
session {
warning = 5m
inactivity = 1h
}
# Max textual content length
play.http.parser.maxMemoryBuffer= 1M
# Max file size
play.http.parser.maxDiskBuffer = 1G
# Cortex
# TheHive can connect to one or multiple Cortex instances. Give each
# Cortex instance a name and specify the associated URL.
#
# In order to use Cortex, first you need to enable the Cortex module by uncommenting the next line
play.modules.enabled += connectors.cortex.CortexConnector
cortex {
"CORTEX-SERVER-ID" {
url = "http://{{ MANAGERIP }}:9001/cortex/"
key = "{{ CORTEXKEY }}"
# # HTTP client configuration (SSL and proxy)
# ws {}
}
}
# MISP
# TheHive can connect to one or multiple MISP instances. Give each MISP
# instance a name and specify the associated Authkey that must be used
# to poll events, the case template that should be used by default when
# importing events as well as the tags that must be added to cases upon
# import.
# Prior to configuring the integration with a MISP instance, you must
# enable the MISP connector. This will allow you to import events to
# and/or export cases to the MISP instance(s).
#play.modules.enabled += connectors.misp.MispConnector
misp {
# Interval between consecutive MISP event imports in hours (h) or
# minutes (m).
interval = 1h
#"MISP-SERVER-ID" {
# # MISP connection configuration requires at least an url and a key. The key must
# # be linked with a sync account on MISP.
# url = ""
# key = ""
#
# # Name of the case template in TheHive that shall be used to import
# # MISP events as cases by default.
# caseTemplate = "<Template_Name_goes_here>"
#
# # Optional tags to add to each observable imported from an event
# # available on this instance.
# tags = ["misp-server-id"]
#
# ## MISP event filters
# # MISP filters is used to exclude events from the import.
# # Filter criteria are:
# # The number of attribute
# max-attributes = 1000
# # The size of its JSON representation
# max-size = 1 MiB
# # The age of the last publish date
# max-age = 7 days
# # Organization and tags
# exclusion {
# organisation = ["bad organisation", "other organisations"]
# tags = ["tag1", "tag2"]
# }
#
# ## HTTP client configuration (SSL and proxy)
# # Truststore to use to validate the X.509 certificate of the MISP
# # instance if the default truststore is not sufficient.
# # Proxy can also be used
# ws {
# ssl.trustManager.stores = [ {
# path = /path/to/truststore.jks
# } ]
# proxy {
# host = proxy.mydomain.org
# port = 3128
# }
# }
#
# # MISP purpose defines if this instance can be used to import events (ImportOnly), export cases (ExportOnly) or both (ImportAndExport)
# # Default is ImportAndExport
# purpose = ImportAndExport
#} ## <-- Uncomment to complete the configuration
}
webhooks {
NodeRedWebHook {
url = "http://{{ MANAGERIP }}:1880/thehive"
}
#SOCtopusWebHook {
# url = "http://{{ MANAGERIP }}:7000/enrich"
#}
}

View File

@@ -1,148 +0,0 @@
{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
{%- set CORTEXPLAYSECRET = salt['pillar.get']('global:cortexplaysecret', '') %}
# Secret Key
# The secret key is used to secure cryptographic functions.
# WARNING: If you deploy your application on several servers, make sure to use the same key.
play.http.secret.key="{{ CORTEXPLAYSECRET }}"
play.http.context=/cortex/
pidfile.path = "/dev/null"
search.uri = "http://{{ MANAGERIP }}:9400"
# Elasticsearch
search {
# Name of the index
index = cortex
# Name of the Elasticsearch cluster
cluster = thehive
# Address of the Elasticsearch instance
host = ["{{ MANAGERIP }}:9500"]
# Scroll keepalive
keepalive = 1m
# Size of the page for scroll
pagesize = 50
# Number of shards
nbshards = 5
# Number of replicas
nbreplicas = 0
# Arbitrary settings
settings {
# Maximum number of nested fields
mapping.nested_fields.limit = 100
}
## Authentication configuration
#search.username = ""
#search.password = ""
## SSL configuration
#search.keyStore {
# path = "/path/to/keystore"
# type = "JKS" # or PKCS12
# password = "keystore-password"
#}
#search.trustStore {
# path = "/path/to/trustStore"
# type = "JKS" # or PKCS12
# password = "trustStore-password"
#}
}
## Cache
#
# If an analyzer is executed against the same observable, the previous report can be returned without re-executing the
# analyzer. The cache is used only if the second job occurs within cache.job (the default is 10 minutes).
cache.job = 10 minutes
## Authentication
auth {
# "provider" parameter contains the authentication provider(s). It can be multi-valued, which is useful
# for migration.
# The available auth types are:
# - services.LocalAuthSrv : passwords are stored in the user entity within ElasticSearch). No
# configuration are required.
# - ad : use ActiveDirectory to authenticate users. The associated configuration shall be done in
# the "ad" section below.
# - ldap : use LDAP to authenticate users. The associated configuration shall be done in the
# "ldap" section below.
provider = [local]
ad {
# The Windows domain name in DNS format. This parameter is required if you do not use
# 'serverNames' below.
#domainFQDN = "mydomain.local"
# Optionally you can specify the host names of the domain controllers instead of using 'domainFQDN
# above. If this parameter is not set, TheHive uses 'domainFQDN'.
#serverNames = [ad1.mydomain.local, ad2.mydomain.local]
# The Windows domain name using short format. This parameter is required.
#domainName = "MYDOMAIN"
# If 'true', use SSL to connect to the domain controller.
#useSSL = true
}
ldap {
# The LDAP server name or address. The port can be specified using the 'host:port'
# syntax. This parameter is required if you don't use 'serverNames' below.
#serverName = "ldap.mydomain.local:389"
# If you have multiple LDAP servers, use the multi-valued setting 'serverNames' instead.
#serverNames = [ldap1.mydomain.local, ldap2.mydomain.local]
# Account to use to bind to the LDAP server. This parameter is required.
#bindDN = "cn=thehive,ou=services,dc=mydomain,dc=local"
# Password of the binding account. This parameter is required.
#bindPW = "***secret*password***"
# Base DN to search users. This parameter is required.
#baseDN = "ou=users,dc=mydomain,dc=local"
# Filter to search user in the directory server. Please note that {0} is replaced
# by the actual user name. This parameter is required.
#filter = "(cn={0})"
# If 'true', use SSL to connect to the LDAP directory server.
#useSSL = true
}
}
## ANALYZERS
#
analyzer {
# Absolute path where you have pulled the Cortex-Analyzers repository.
path = ["/Cortex-Analyzers/analyzers"]
# Sane defaults. Do not change unless you know what you are doing.
fork-join-executor {
# Min number of threads available for analysis.
parallelism-min = 2
# Parallelism (threads) ... ceil(available processors * factor).
parallelism-factor = 2.0
# Max number of threads available for analysis.
parallelism-max = 4
}
}
## RESPONDERS
##
responder {
# Directory that holds responders
urls = ["/Cortex-Analyzers/responders", "/custom-responders"]
fork-join-executor {
# Min number of threads available for analyze
parallelism-min = 2
# Parallelism (threads) ... ceil(available processors * factor)
parallelism-factor = 2.0
# Max number of threads available for analyze
parallelism-max = 4
}
}
# It's the end my friend. Happy hunting!

View File

@@ -1,17 +0,0 @@
cluster.name: thehive
network.host: 0.0.0.0
discovery.zen.minimum_master_nodes: 1
# This is a test -- if this is here, then the volume is mounted correctly.
path.logs: /var/log/elasticsearch
action.destructive_requires_name: true
discovery.type: single-node
script.allowed_types: inline
transport.bind_host: 0.0.0.0
transport.publish_host: 0.0.0.0
transport.publish_port: 9500
http.host: 0.0.0.0
http.port: 9400
transport.tcp.port: 9500
transport.host: 0.0.0.0
thread_pool.search.queue_size: 100000
thread_pool.write.queue_size: 100000

View File

@@ -1,20 +0,0 @@
status = error
#appender.console.type = Console
#appender.console.name = console
#appender.console.layout.type = PatternLayout
#appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
#rootLogger.level = info
#rootLogger.appenderRef.console.ref = console
# This is a test -- if this here, then the volume is mounted correctly.
appender.rolling.type = RollingFile
appender.rolling.name = rolling
appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log
appender.rolling.layout.type = PatternLayout
appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n
appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log
appender.rolling.policies.type = Policies
appender.rolling.policies.time.type = TimeBasedTriggeringPolicy
appender.rolling.policies.time.interval = 1
appender.rolling.policies.time.modulate = true
rootLogger.level = info
rootLogger.appenderRef.rolling.ref = rolling

View File

@@ -1,180 +0,0 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls in allowed_states %}
{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %}
{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %}
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
{% set MANAGER = salt['grains.get']('master') %}
thehiveconfdir:
file.directory:
- name: /opt/so/conf/thehive/etc
- makedirs: True
- user: 939
- group: 939
thehivelogdir:
file.directory:
- name: /opt/so/log/thehive
- makedirs: True
- user: 939
- group: 939
thehiveconf:
file.recurse:
- name: /opt/so/conf/thehive/etc
- source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
cortexconfdir:
file.directory:
- name: /opt/so/conf/cortex
- makedirs: True
- user: 939
- group: 939
cortexlogdir:
file.directory:
- name: /opt/so/log/cortex
- makedirs: True
- user: 939
- group: 939
cortexconf:
file.recurse:
- name: /opt/so/conf/cortex
- source: salt://thehive/etc
- user: 939
- group: 939
- template: jinja
cortexanalyzers:
file.directory:
- name: /opt/so/conf/cortex/custom-analyzers
- user: 939
- group: 939
- template: jinja
cortexresponders:
file.directory:
- name: /opt/so/conf/cortex/custom-responders
- user: 939
- group: 939
- template: jinja
# Install Elasticsearch
# Made directory for ES data to live in
thehiveesdata:
file.directory:
- name: /nsm/thehive/esdata
- makedirs: True
- user: 939
- group: 939
thehive_elasticsearch_yml:
file.exists:
- name: /opt/so/conf/thehive/etc/es/elasticsearch.yml
log4j2_properties:
file.exists:
- name: /opt/so/conf/thehive/etc/es/log4j2.properties
so-thehive-es:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-es:{{ VERSION }}
- hostname: so-thehive-es
- name: so-thehive-es
- user: 939
- interactive: True
- tty: True
- binds:
- /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw
- /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro
- /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro
- /opt/so/log/thehive:/var/log/elasticsearch:rw
- environment:
- ES_JAVA_OPTS=-Xms512m -Xmx512m -Dlog4j2.formatMsgNoLookups=true
- port_bindings:
- 0.0.0.0:9400:9400
- 0.0.0.0:9500:9500
- require:
- file: thehive_elasticsearch_yml
- file: log4j2_properties
append_so-thehive-es_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-thehive-es
cortex_application_conf:
file.exists:
- name: /opt/so/conf/thehive/etc/cortex-application.conf
application_conf:
file.exists:
- name: /opt/so/conf/thehive/etc/application.conf
# Install Cortex
so-cortex:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-cortex:{{ VERSION }}
- hostname: so-cortex
- name: so-cortex
- user: 939
- binds:
- /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro
- /opt/so/conf/cortex/custom-analyzers:/custom-analyzers:ro
- /opt/so/conf/cortex/custom-responders:/custom-responders:ro
- port_bindings:
- 0.0.0.0:9001:9001
- require:
- file: cortex_application_conf
append_so-cortex_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-cortex
cortexscript:
cmd.script:
- source: salt://thehive/scripts/cortex_init
- cwd: /opt/so
- template: jinja
- hide_output: False
so-thehive:
docker_container.running:
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive:{{ VERSION }}
- environment:
- ELASTICSEARCH_HOST={{ MANAGERIP }}
- hostname: so-thehive
- name: so-thehive
- user: 939
- binds:
- /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro
- port_bindings:
- 0.0.0.0:9000:9000
- require:
- file: application_conf
append_so-thehive_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-thehive
thehivescript:
cmd.script:
- source: salt://thehive/scripts/hive_init
- cwd: /opt/so
- template: jinja
- hide_output: False
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}

View File

@@ -1,75 +0,0 @@
#!/bin/bash
# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
# {%- set CORTEXUSER = salt['pillar.get']('global:cortexuser', 'cortexadmin') %}
# {%- set CORTEXPASSWORD = salt['pillar.get']('global:cortexpassword', 'cortexchangeme') %}
# {%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') %}
# {%- set CORTEXORGNAME = salt['pillar.get']('global:cortexorgname', '') %}
# {%- set CORTEXORGUSER = salt['pillar.get']('global:cortexorguser', 'soadmin') %}
# {%- set CORTEXORGUSERKEY = salt['pillar.get']('global:cortexorguserkey', '') %}
. /usr/sbin/so-common
default_salt_dir=/opt/so/saltstack/default
cortex_clean(){
sed -i '/^ cortexuser:/d' /opt/so/saltstack/local/pillar/global.sls
sed -i '/^ cortexpassword:/d' /opt/so/saltstack/local/pillar/global.sls
sed -i '/^ cortexorguser:/d' /opt/so/saltstack/local/pillar/global.sls
}
cortex_init(){
CORTEX_URL="http://{{MANAGERIP}}:9001/cortex/"
CORTEX_API_URL="${CORTEX_URL}api"
CORTEX_USER="{{CORTEXUSER}}"
CORTEX_PASSWORD="{{CORTEXPASSWORD}}"
CORTEX_KEY="{{CORTEXKEY}}"
CORTEX_ORG_NAME="{{CORTEXORGNAME}}"
CORTEX_ORG_DESC="{{CORTEXORGNAME}} organization created by Security Onion setup"
CORTEX_ORG_USER="{{CORTEXORGUSER}}"
CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf"
if wait_for_web_response $CORTEX_URL "Cortex" 120; then
# Migrate DB
curl -sk -XPOST -L "$CORTEX_API_URL/maintenance/migrate"
# Create intial Cortex superadmin
curl -sk -L "$CORTEX_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
# Create user-supplied org
curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
# Create user-supplied org user
curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
# Enable URLScan.io Analyzer
curl -sv -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
# Enable Cert PassiveDNS Analyzer
curl -sv -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
# Revoke $CORTEX_USER key
curl -sk -XDELETE -H "Authorization: Bearer $CORTEX_KEY" -L "$CORTEX_API_URL/user/$CORTEX_USER/key"
# Update SOCtopus config with apikey value
#sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG
touch /opt/so/state/cortex.txt
else
echo "We experienced an issue connecting to Cortex!"
exit 1
fi
}
if [ -f /opt/so/state/cortex.txt ]; then
cortex_clean
exit 0
else
if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"' 120; then
cortex_init
cortex_clean
else
echo "TheHive Elasticsearch server is not ready; unable to proceed with Cortex init."
exit 1
fi
fi

View File

@@ -1,53 +0,0 @@
#!/bin/bash
# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
# {%- set THEHIVEUSER = salt['pillar.get']('global:hiveuser', 'hiveadmin') %}
# {%- set THEHIVEPASSWORD = salt['pillar.get']('global:hivepassword', 'hivechangeme') %}
# {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %}
. /usr/sbin/so-common
thehive_clean(){
sed -i '/^ hiveuser:/d' /opt/so/saltstack/local/pillar/global.sls
sed -i '/^ hivepassword:/d' /opt/so/saltstack/local/pillar/global.sls
}
thehive_init(){
THEHIVE_URL="http://{{MANAGERIP}}:9000/thehive/"
THEHIVE_API_URL="${THEHIVE_URL}api"
THEHIVE_USER="{{THEHIVEUSER}}"
THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}"
THEHIVE_KEY="{{THEHIVEKEY}}"
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
echo -n "Waiting for TheHive..."
if wait_for_web_response $THEHIVE_URL "TheHive" 120; then
# Migrate DB
curl -sk -XPOST -L "$THEHIVE_API_URL/maintenance/migrate"
# Create intial TheHive user
curl -sk -L "$THEHIVE_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}"
# Pre-load custom fields
#
# reputation
curl -sk -L "$THEHIVE_API_URL/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
touch /opt/so/state/thehive.txt
else
echo "We experienced an issue connecting to TheHive!"
exit 1
fi
}
if [ -f /opt/so/state/thehive.txt ]; then
thehive_clean
exit 0
else
if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"' 120; then
thehive_init
thehive_clean
else
echo "TheHive Elasticsearch server is not ready; unable to proceed with TheHive init."
exit 1
fi
fi

View File

@@ -1,6 +1,5 @@
{% set ZEEKVER = salt['pillar.get']('global:mdengine', '') %} {% set ZEEKVER = salt['pillar.get']('global:mdengine', '') %}
{% set WAZUH = salt['pillar.get']('global:wazuh', '0') %} {% set WAZUH = salt['pillar.get']('global:wazuh', '0') %}
{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %} {% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %} {% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %} {% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
@@ -143,9 +142,6 @@ base:
- utility - utility
- schedule - schedule
- soctopus - soctopus
{%- if THEHIVE != 0 %}
- thehive
{%- endif %}
{%- if PLAYBOOK != 0 %} {%- if PLAYBOOK != 0 %}
- playbook - playbook
- redis - redis
@@ -210,9 +206,6 @@ base:
- fleet.install_package - fleet.install_package
{%- endif %} {%- endif %}
- soctopus - soctopus
{%- if THEHIVE != 0 %}
- thehive
{%- endif %}
{%- if PLAYBOOK != 0 %} {%- if PLAYBOOK != 0 %}
- playbook - playbook
{%- endif %} {%- endif %}