diff --git a/salt/logstash/conf/conf.enabled.txt.so-helix b/salt/logstash/conf/conf.enabled.txt.so-helix new file mode 100644 index 000000000..6464496fa --- /dev/null +++ b/salt/logstash/conf/conf.enabled.txt.so-helix @@ -0,0 +1,18 @@ +# This is where can specify which LogStash configs get loaded. +# +# The custom folder on the master gets automatically synced to each logstash +# node. +# +# To enable a custom configuration see the following example and uncomment: +# /usr/share/logstash/pipeline.custom/1234_input_custom.conf +## +# All of the defaults are loaded. +/usr/share/logstash/pipeline.so/0000_input_syslogng.conf +/usr/share/logstash/pipeline.so/0001_input_json.conf +/usr/share/logstash/pipeline.so/0002_input_windows_json.conf +/usr/share/logstash/pipeline.so/0003_input_syslog.conf +/usr/share/logstash/pipeline.so/0005_input_suricata.conf +#/usr/share/logstash/pipeline.dynamic/0006_input_beats.conf +/usr/share/logstash/pipeline.dynamic/0010_input_hhbeats.conf +/usr/share/logstash/pipeline.so/0007_input_import.conf +/usr/share/logstash/pipeline.dynamic/9999_output_redis.conf diff --git a/salt/logstash/files/dynamic/9997_output_helix.conf b/salt/logstash/files/dynamic/9997_output_helix.conf new file mode 100644 index 000000000..c71b7d241 --- /dev/null +++ b/salt/logstash/files/dynamic/9997_output_helix.conf @@ -0,0 +1,112 @@ +{%- if salt['grains.get']('role') == 'so-master' %} +{% set master = salt['pillar.get']('static:masterip', '') %} +{%- set nodetype = 'master' %} +{%- else %} +{%- set nodetype = salt['pillar.get']('node:node_type', 'storage') %} +{% set master = salt['pillar.get']('static:masterip', '') %} +{%- endif %} +filter { + if "fe_clone" in [type] { + grok { + match => [ + "source_ip", "^%{IPV4:srcipv4}$", + "source_ip", "(?^([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{1,4}$|((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4})$)" + ] + } + grok { + match => [ + "destination_ip", "(?^([0-9A-Fa-f]{0,4}:){2,7}([0-9A-Fa-f]{1,4}$|((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\.|$)){4})$)", + "destination_ip", "^%{IPV4:dstipv4}$" + ] + } + geoip { + source => "[source_ip]" + target => "source_geo" + } + geoip { + source => "[destination_ip]" + target => "destination_geo" + + } + mutate { + #rename => { "%{[source_geo][country_code]}" => "srccountrycode" } + #rename => { "%{[destination_geo][country_code]}" => "dstcountrycode" } + rename => { "syslog-host_from" => "sensor" } + rename => { "message" => "rawmsg" } + rename => { "event_type" => "program" } + copy => { "program" => "class" } + rename => { "source_port" => "srcport" } + rename => { "destination_port" => "dstport" } + + remove_field => ["source_ip", "destination_ip"] + remove_field => ["sensorname", "sensor_name", "service", "source", "tags", "syslog-host"] + remove_field => ["sensor_name", "source_ips", "ips", "destination_ips", "syslog-priority", "syslog-file_name", "syslog-facility"] + } + } + if "bro_conn" in [program] { + mutate { + #add_field => { "metaclass" => "connection" } + rename => { "original_bytes" => "sentbytes" } + rename => { "respond_bytes" => "rcvdbytes" } + rename => { "connection_state" => "connstate" } + rename => { "uid" => "connectionid" } + rename => { "respond_packets" => "rcvdpackets" } + rename => { "original_packets" => "sentpackets" } + rename => { "respond_ip_bytes" => "rcvdipbytes" } + rename => { "original_ip_bytes" => "sentipbytes" } + rename => { "local_respond" => "local_resp" } + rename => { "local_orig" => "localorig" } + rename => { "missed_bytes" => "missingbytes" } + } + } + if "bro_dns" in [program] { + mutate{ + #add_field = { "metaclass" => "dns"} + rename => { "query" => "domain" } + rename => { "query_class" => "queryclass" } + rename => { "query_class_name" => "queryclassname" } + rename => { "query_type" => "querytype" } + rename => { "query_type_name" => "querytypename" } + rename => { "ra" => "recursionavailable" } + rename => { "rd" => "recursiondesired" } + + } + } + if "bro_dhcp" in [program] { + mutate{ + #add_field = { "metaclass" => "dhcp"} + rename => { "ips" => "ip" } + } + } + if "bro_files" in [program] { + mutate{ + #add_field = { "metaclass" => "dns"} + rename => { "missing_bytes" => "missingbytes" } + rename => { "fuid" => "fileid" } + rename => { "uid" => "connectionid" } + } + } + if "bro_http" in [program] { + mutate{ + #add_field = { "metaclass" => "dns"} + rename => { "status_code" => "statuscode" } + rename => { "status_message" => "statusmsg" } + rename => { "resp_mime_types" => "rcvdmimetype" } + rename => { "resp_fuids" => "rcvdfileid" } + rename => { "response_body_len" => "rcvdbodybytes" } + rename => { "request_body_len" => "sentbodybytes" } + + } + } +} +output { + if "fe_clone" in [type] { + http { + url => "https://helix-integrations.cloud.aws.apps.fireeye.com/api/upload?source=test&format=json" + http_method => post + http_compression => true + headers => ["Authorization", "{{ HELIXAPIKEY }}"] + format => json_batch + } + } +} diff --git a/setup/so-setup.sh b/setup/so-setup.sh index 42a1e70c8..ea7e869af 100644 --- a/setup/so-setup.sh +++ b/setup/so-setup.sh @@ -153,6 +153,7 @@ if (whiptail_you_sure) ; then ## Master ## #################### if [ $INSTALLTYPE == 'HELIXSESOR']; then + whiptail_helix_apikey whiptail_homenet_master whiptail_rule_setup # Get the code if it isn't ET Open diff --git a/setup/whiptail.sh b/setup/whiptail.sh index 547fb4a9a..7ad53333e 100644 --- a/setup/whiptail.sh +++ b/setup/whiptail.sh @@ -169,6 +169,15 @@ whiptail_eval_adv_warning() { whiptail --title "Security Onion Setup" --msgbox "Please keep in mind the more services that you enable the more RAM that is required." 8 75 } +whiptail_helix_apikey() { + HELIXAPIKEY=$(whiptail --title "Security Onion Setup" --inputbox \ + "Enter your Helix API Key" 10 75 3>&1 1>&2 2>&3) + + local exitstatus=$? + whiptail_check_exitstatus + +} + whiptail_homenet_master() { # Ask for the HOME_NET on the master