This commit is contained in:
m0duspwnens
2020-12-23 14:53:27 -05:00
10 changed files with 237 additions and 154 deletions

View File

@@ -139,4 +139,33 @@ fail() {
get_random_value() {
length=${1:-20}
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
}
wait_for_web_response() {
url=$1
expected=$2
maxAttempts=${3:-300}
logfile=/root/wait_for_web_response.log
attempt=0
while [[ $attempt -lt $maxAttempts ]]; do
attempt=$((attempt+1))
echo "Waiting for value '$expected' at '$url' ($attempt/$maxAttempts)"
result=$(curl -ks -L $url)
exitcode=$?
echo "--------------------------------------------------" >> $logfile
echo "$(date) - Checking web URL: $url ($attempt/$maxAttempts)" >> $logfile
echo "$result" >> $logfile
echo "exit code=$exitcode" >> $logfile
echo "" >> $logfile
if [[ $exitcode -eq 0 && "$result" =~ $expected ]]; then
echo "Received expected response; proceeding."
return 0
fi
echo "Server is not ready"
sleep 1
done
echo "Server still not ready after $maxAttempts attempts; giving up."
return 1
}

View File

@@ -96,7 +96,7 @@ rule_prompt(){
echo "-----------------------------------"
echo
while [ -z "$RULE_NAME" ]; do
read -p "Please enter the rule filename you want to test (filename only, no path): " -e RULE_NAME
read -p "Choose a rule to test from the list above (must be typed exactly as shown above): " -e RULE_NAME
done
}

View File

@@ -21,25 +21,33 @@ import yaml
hostgroupsFilename = "/opt/so/saltstack/local/salt/firewall/hostgroups.local.yaml"
portgroupsFilename = "/opt/so/saltstack/local/salt/firewall/portgroups.local.yaml"
defaultPortgroupsFilename = "/opt/so/saltstack/default/salt/firewall/portgroups.yaml"
supportedProtocols = ['tcp', 'udp']
def showUsage(args):
def showUsage(options, args):
print('Usage: {} [OPTIONS] <COMMAND> [ARGS...]'.format(sys.argv[0]))
print(' Options:')
print(' --apply - After updating the firewall configuration files, apply the new firewall state')
print(' --apply - After updating the firewall configuration files, apply the new firewall state')
print(' --defaultports - Read port groups from default configuration files instead of local configuration.')
print('')
print(' Available commands:')
print(' help - Prints this usage information.')
print(' includedhosts - Lists the IPs included in the given group. Args: <GROUP_NAME>')
print(' excludedhosts - Lists the IPs excluded from the given group. Args: <GROUP_NAME>')
print(' includehost - Includes the given IP in the given group. Args: <GROUP_NAME> <IP>')
print(' excludehost - Excludes the given IP from the given group. Args: <GROUP_NAME> <IP>')
print(' removehost - Removes an excluded IP from the given group. Args: <GROUP_NAME> <IP>')
print(' addhostgroup - Adds a new, custom host group. Args: <GROUP_NAME>')
print(' listports - Lists ports in the given group and protocol. Args: <GROUP_NAME> <PORT_PROTOCOL>')
print(' addport - Adds a PORT to the given group. Args: <GROUP_NAME> <PORT_PROTOCOL> <PORT>')
print(' removeport - Removes a PORT from the given group. Args: <GROUP_NAME> <PORT_PROTOCOL> <PORT>')
print(' addportgroup - Adds a new, custom port group. Args: <GROUP_NAME>')
print(' General commands:')
print(' help - Prints this usage information.')
print('')
print(' Host commands:')
print(' listhostgroups - Lists the known host groups.')
print(' includedhosts - Lists the IPs included in the given group. Args: <GROUP_NAME>')
print(' excludedhosts - Lists the IPs excluded from the given group. Args: <GROUP_NAME>')
print(' includehost - Includes the given IP in the given group. Args: <GROUP_NAME> <IP>')
print(' excludehost - Excludes the given IP from the given group. Args: <GROUP_NAME> <IP>')
print(' removehost - Removes an excluded IP from the given group. Args: <GROUP_NAME> <IP>')
print(' addhostgroup - Adds a new, custom host group. Args: <GROUP_NAME>')
print('')
print(' Port commands:')
print(' listportgroups - Lists the known port groups.')
print(' listports - Lists ports in the given group and protocol. Args: <GROUP_NAME> <PORT_PROTOCOL>')
print(' addport - Adds a PORT to the given group. Args: <GROUP_NAME> <PORT_PROTOCOL> <PORT>')
print(' removeport - Removes a PORT from the given group. Args: <GROUP_NAME> <PORT_PROTOCOL> <PORT>')
print(' addportgroup - Adds a new, custom port group. Args: <GROUP_NAME>')
print('')
print(' Where:')
print(' GROUP_NAME - The name of an alias group (Ex: analyst)')
@@ -48,6 +56,15 @@ def showUsage(args):
print(' PORT - Either a single numeric port (Ex: 443), or a port range (Ex: 8000:8002).')
sys.exit(1)
def checkDefaultPortsOption(options):
global portgroupsFilename
if "--defaultports" in options:
portgroupsFilename = defaultPortgroupsFilename
def checkApplyOption(options):
if "--apply" in options:
return apply()
def loadYaml(filename):
file = open(filename, "r")
return yaml.load(file.read())
@@ -56,6 +73,14 @@ def writeYaml(filename, content):
file = open(filename, "w")
return yaml.dump(content, file)
def listHostGroups():
content = loadYaml(hostgroupsFilename)
hostgroups = content['firewall']['hostgroups']
if hostgroups is not None:
for group in hostgroups:
print(group)
return 0
def listIps(name, mode):
content = loadYaml(hostgroupsFilename)
if name not in content['firewall']['hostgroups']:
@@ -111,10 +136,18 @@ def createProtocolMap():
map[protocol] = []
return map
def addhostgroup(args):
def listPortGroups():
content = loadYaml(portgroupsFilename)
portgroups = content['firewall']['aliases']['ports']
if portgroups is not None:
for group in portgroups:
print(group)
return 0
def addhostgroup(options, args):
if len(args) != 1:
print('Missing host group name argument', file=sys.stderr)
showUsage(args)
showUsage(options, args)
name = args[0]
content = loadYaml(hostgroupsFilename)
@@ -125,10 +158,17 @@ def addhostgroup(args):
writeYaml(hostgroupsFilename, content)
return 0
def addportgroup(args):
def listportgroups(options, args):
if len(args) != 0:
print('Unexpected arguments', file=sys.stderr)
showUsage(options, args)
checkDefaultPortsOption(options)
return listPortGroups()
def addportgroup(options, args):
if len(args) != 1:
print('Missing port group name argument', file=sys.stderr)
showUsage(args)
showUsage(options, args)
name = args[0]
content = loadYaml(portgroupsFilename)
@@ -143,11 +183,12 @@ def addportgroup(args):
writeYaml(portgroupsFilename, content)
return 0
def listports(args):
def listports(options, args):
if len(args) != 2:
print('Missing port group name or port protocol', file=sys.stderr)
showUsage(args)
showUsage(options, args)
checkDefaultPortsOption(options)
name = args[0]
protocol = args[1]
if protocol not in supportedProtocols:
@@ -162,16 +203,19 @@ def listports(args):
if name not in ports:
print('Port group does not exist', file=sys.stderr)
return 3
if protocol not in ports[name]:
print('Port group does not contain protocol', file=sys.stderr)
return 3
ports = ports[name][protocol]
if ports is not None:
for port in ports:
print(port)
return 0
def addport(args):
def addport(options, args):
if len(args) != 3:
print('Missing port group name or port protocol, or port argument', file=sys.stderr)
showUsage(args)
showUsage(options, args)
name = args[0]
protocol = args[1]
@@ -197,12 +241,13 @@ def addport(args):
return 3
ports.append(port)
writeYaml(portgroupsFilename, content)
return 0
code = checkApplyOption(options)
return code
def removeport(args):
def removeport(options, args):
if len(args) != 3:
print('Missing port group name or port protocol, or port argument', file=sys.stderr)
showUsage(args)
showUsage(options, args)
name = args[0]
protocol = args[1]
@@ -225,43 +270,60 @@ def removeport(args):
return 3
ports.remove(port)
writeYaml(portgroupsFilename, content)
return 0
code = checkApplyOption(options)
return code
def includedhosts(args):
def listhostgroups(options, args):
if len(args) != 0:
print('Unexpected arguments', file=sys.stderr)
showUsage(options, args)
return listHostGroups()
def includedhosts(options, args):
if len(args) != 1:
print('Missing host group name argument', file=sys.stderr)
showUsage(args)
showUsage(options, args)
return listIps(args[0], 'insert')
def excludedhosts(args):
def excludedhosts(options, args):
if len(args) != 1:
print('Missing host group name argument', file=sys.stderr)
showUsage(args)
showUsage(options, args)
return listIps(args[0], 'delete')
def includehost(args):
def includehost(options, args):
if len(args) != 2:
print('Missing host group name or ip argument', file=sys.stderr)
showUsage(args)
showUsage(options, args)
result = addIp(args[0], args[1], 'insert')
if result == 0:
removeIp(args[0], args[1], 'delete', True)
return result
code = result
if code == 0:
code = checkApplyOption(options)
return code
def excludehost(args):
def excludehost(options, args):
if len(args) != 2:
print('Missing host group name or ip argument', file=sys.stderr)
showUsage(args)
showUsage(options, args)
result = addIp(args[0], args[1], 'delete')
if result == 0:
removeIp(args[0], args[1], 'insert', True)
return result
code = result
if code == 0:
code = checkApplyOption(options)
return code
def removehost(args):
def removehost(options, args):
if len(args) != 2:
print('Missing host group name or ip argument', file=sys.stderr)
showUsage(args)
return removeIp(args[0], args[1], 'delete')
showUsage(options, args)
code = removeIp(args[0], args[1], 'delete')
if code == 0:
code = checkApplyOption(options)
return code
def apply():
proc = subprocess.run(['salt-call', 'state.apply', 'firewall', 'queue=True'])
@@ -276,28 +338,26 @@ def main():
args.remove(option)
if len(args) == 0:
showUsage(None)
showUsage(options, None)
commands = {
"help": showUsage,
"listhostgroups": listhostgroups,
"includedhosts": includedhosts,
"excludedhosts": excludedhosts,
"includehost": includehost,
"excludehost": excludehost,
"removehost": removehost,
"listportgroups": listportgroups,
"listports": listports,
"addport": addport,
"removeport": removeport,
"addhostgroup": addhostgroup,
"addportgroup": addportgroup
}
cmd = commands.get(args[0], showUsage)
code = cmd(args[1:])
if code == 0 and "--apply" in options:
code = apply()
code = cmd(options, args[1:])
sys.exit(code)

View File

@@ -20,6 +20,7 @@ echo "Starting to check for yara rule updates at $(date)..."
output_dir="/opt/so/saltstack/default/salt/strelka/rules"
mkdir -p $output_dir
repos="$output_dir/repos.txt"
ignorefile="$output_dir/ignore.txt"
@@ -95,55 +96,56 @@ clone_dir="/tmp"
if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
while IFS= read -r repo; do
if ! $(echo "$repo" | grep -qE '^#'); then
# Remove old repo if existing bc of previous error condition or unexpected disruption
repo_name=`echo $repo | awk -F '/' '{print $NF}'`
[ -d $repo_name ] && rm -rf $repo_name
# Remove old repo if existing bc of previous error condition or unexpected disruption
repo_name=`echo $repo | awk -F '/' '{print $NF}'`
[ -d $repo_name ] && rm -rf $repo_name
# Clone repo and make appropriate directories for rules
# Clone repo and make appropriate directories for rules
git clone $repo $clone_dir/$repo_name
echo "Analyzing rules from $clone_dir/$repo_name..."
mkdir -p $output_dir/$repo_name
[ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
git clone $repo $clone_dir/$repo_name
echo "Analyzing rules from $clone_dir/$repo_name..."
mkdir -p $output_dir/$repo_name
[ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
# Copy over rules
for i in $(find $clone_dir/$repo_name -name "*.yar*"); do
rule_name=$(echo $i | awk -F '/' '{print $NF}')
repo_sum=$(sha256sum $i | awk '{print $1}')
# Copy over rules
for i in $(find $clone_dir/$repo_name -name "*.yar*"); do
rule_name=$(echo $i | awk -F '/' '{print $NF}')
repo_sum=$(sha256sum $i | awk '{print $1}')
# Check rules against those in ignore list -- don't copy if ignored.
if ! grep -iq $rule_name $ignorefile; then
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
# Check rules against those in ignore list -- don't copy if ignored.
if ! grep -iq $rule_name $ignorefile; then
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
# For existing rules, check to see if they need to be updated, by comparing checksums
if [ $existing_rules -gt 0 ];then
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
if [ "$repo_sum" != "$local_sum" ]; then
echo "Checksums do not match!"
echo "Updating $rule_name..."
cp $i $output_dir/$repo_name;
((updatecounter++))
# For existing rules, check to see if they need to be updated, by comparing checksums
if [ $existing_rules -gt 0 ];then
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
if [ "$repo_sum" != "$local_sum" ]; then
echo "Checksums do not match!"
echo "Updating $rule_name..."
cp $i $output_dir/$repo_name;
((updatecounter++))
fi
else
# If rule doesn't exist already, we'll add it
echo "Adding new rule: $rule_name..."
cp $i $output_dir/$repo_name
((newcounter++))
fi
else
# If rule doesn't exist already, we'll add it
echo "Adding new rule: $rule_name..."
cp $i $output_dir/$repo_name
((newcounter++))
fi
fi;
done
fi;
done
# Check to see if we have any old rules that need to be removed
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
is_repo_rule=$(find $clone_dir/$repo_name -name "$i" | wc -l)
if [ $is_repo_rule -eq 0 ]; then
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
rm $output_dir/$repo_name/$i
((deletecounter++))
fi
done
rm -rf $clone_dir/$repo_name
# Check to see if we have any old rules that need to be removed
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
is_repo_rule=$(find $clone_dir/$repo_name -name "$i" | wc -l)
if [ $is_repo_rule -eq 0 ]; then
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
rm $output_dir/$repo_name/$i
((deletecounter++))
fi
done
rm -rf $clone_dir/$repo_name
fi
done < $repos
echo "Done!"

View File

@@ -55,6 +55,12 @@ strelkarules:
- source: salt://strelka/rules
- user: 939
- group: 939
strelkarepos:
file.managed:
- name: /opt/so/saltstack/default/salt/strelka/rules/repos.txt
- source: salt://strelka/rules/repos.txt.jinja
- template: jinja
{%- endif %}

View File

@@ -0,0 +1,4 @@
# DO NOT EDIT THIS FILE! Strelka YARA rule repos are stored here from the strelka.repos pillar section
{%- for repo in salt['pillar.get']('strelka:repos', {}) %}
{{ repo }}
{%- endfor %}

View File

@@ -1,4 +1,5 @@
#!/bin/bash
# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %}
# {%- set URLBASE = salt['pillar.get']('global:url_base', '') %}
# {%- set CORTEXUSER = salt['pillar.get']('global:cortexuser', 'cortexadmin') %}
# {%- set CORTEXPASSWORD = salt['pillar.get']('global:cortexpassword', 'cortexchangeme') %}
@@ -7,6 +8,8 @@
# {%- set CORTEXORGUSER = salt['pillar.get']('global:cortexorguser', 'soadmin') %}
# {%- set CORTEXORGUSERKEY = salt['pillar.get']('global:cortexorguserkey', '') %}
. /usr/sbin/so-common
default_salt_dir=/opt/so/saltstack/default
cortex_clean(){
@@ -16,8 +19,8 @@ cortex_clean(){
}
cortex_init(){
sleep 60
CORTEX_API_URL="{{URLBASE}}/cortex/api"
CORTEX_URL="{{URLBASE}}/cortex"
CORTEX_API_URL="{{CORTEX_URL}}/api"
CORTEX_USER="{{CORTEXUSER}}"
CORTEX_PASSWORD="{{CORTEXPASSWORD}}"
CORTEX_KEY="{{CORTEXKEY}}"
@@ -27,47 +30,45 @@ cortex_init(){
CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}"
SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf"
if wait_for_web_response https://$CORTEX_URL "Cortex"; then
# Migrate DB
curl -v -k -XPOST -L "https://$CORTEX_API_URL/maintenance/migrate"
# Migrate DB
curl -v -k -XPOST -L "https://$CORTEX_API_URL/maintenance/migrate"
# Create intial Cortex superadmin
curl -v -k -L "https://$CORTEX_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
# Create intial Cortex superadmin
curl -v -k -L "https://$CORTEX_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}"
# Create user-supplied org
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
# Create user-supplied org user
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
# Create user-supplied org
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}"
# Create user-supplied org user
curl -k -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }"
# Enable URLScan.io Analyzer
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
# Enable URLScan.io Analyzer
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}'
# Enable Cert PassiveDNS Analyzer
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
# Revoke $CORTEX_USER key
curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" -L "https://$CORTEX_API_URL/user/$CORTEX_USER/key"
# Enable Cert PassiveDNS Analyzer
curl -v -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}'
# Revoke $CORTEX_USER key
curl -k -XDELETE -H "Authorization: Bearer $CORTEX_KEY" -L "https://$CORTEX_API_URL/user/$CORTEX_USER/key"
# Update SOCtopus config with apikey value
#sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG
# Update SOCtopus config with apikey value
#sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG
touch /opt/so/state/cortex.txt
touch /opt/so/state/cortex.txt
else
echo "We experienced an issue connecting to Cortex!"
fi
}
if [ -f /opt/so/state/cortex.txt ]; then
cortex_clean
exit 0
else
rm -f garbage_file
while ! wget -O garbage_file {{URLBASE}}:9500 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file
sleep 1
done
rm -f garbage_file
sleep 5
cortex_init
cortex_clean
if wait_for_web_response http://{{MANAGERIP}}:9400 '"status":"green"'; then
cortex_init
cortex_clean
else
echo "TheHive Elasticsearch server is not ready; unable to proceed with cortex init."
fi
fi

View File

@@ -5,13 +5,14 @@
# {%- set THEHIVEPASSWORD = salt['pillar.get']('global:hivepassword', 'hivechangeme') %}
# {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %}
. /usr/sbin/so-common
thehive_clean(){
sed -i '/^ hiveuser:/d' /opt/so/saltstack/local/pillar/global.sls
sed -i '/^ hivepassword:/d' /opt/so/saltstack/local/pillar/global.sls
}
thehive_init(){
sleep 120
THEHIVE_URL="{{URLBASE}}/thehive"
THEHIVE_API_URL="$THEHIVE_URL/api"
THEHIVE_USER="{{THEHIVEUSER}}"
@@ -20,23 +21,7 @@ thehive_init(){
SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf"
echo -n "Waiting for TheHive..."
COUNT=0
THEHIVE_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl --output /dev/null --silent --head --fail -k "https://$THEHIVE_URL"
if [ $? -eq 0 ]; then
THEHIVE_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
if wait_for_web_response https://$THEHIVE_URL "TheHive"; then
# Migrate DB
curl -v -k -XPOST -L "https://$THEHIVE_API_URL/maintenance/migrate"
@@ -47,7 +32,6 @@ thehive_init(){
#
# reputation
curl -v -k -L "https://$THEHIVE_API_URL/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}"
touch /opt/so/state/thehive.txt
else
@@ -59,15 +43,10 @@ if [ -f /opt/so/state/thehive.txt ]; then
thehive_clean
exit 0
else
rm -f garbage_file
while ! wget -O garbage_file {{MANAGERIP}}:9400 2>/dev/null
do
echo "Waiting for Elasticsearch..."
rm -f garbage_file
sleep 1
done
rm -f garbage_file
sleep 5
thehive_init
thehive_clean
if wait_for_web_response http://{{MANAGERIP}}:9400 '"status":"green"'; then
thehive_init
thehive_clean
else
echo "TheHive Elasticsearch server is not ready; unable to proceed with hive init."
fi
fi

View File

@@ -62,8 +62,8 @@ OSQUERY=1
# PATCHSCHEDULEHOURS=
PATCHSCHEDULENAME=auto
PLAYBOOK=1
# REDIRECTHOST=
REDIRECTINFO=HOSTNAME
REDIRECTHOST=securityonion
REDIRECTINFO=OTHER
RULESETUP=ETOPEN
# SHARDCOUNT=
SKIP_REBOOT=0

View File

@@ -1260,6 +1260,8 @@ manager_global() {
"strelka:"\
" enabled: $STRELKA"\
" rules: 1"\
" repos:"\
" - https://github.com/Neo23x0/signature-base"\
"curator:"\
" hot_warm: False"\
"elastic:"\