diff --git a/salt/common/init.sls b/salt/common/init.sls index 00d7f35d0..22e36d1d2 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -1,3 +1,4 @@ +{%- set GRAFANA = salt['pillar.get']('master:grafana', '0') %} # Add socore Group socoregroup: group.present: @@ -102,7 +103,7 @@ nginxtmp: # Start the core docker so-core: docker_container.running: - - image: soshybridhunter/so-core:HH1.0.5 + - image: soshybridhunter/so-core:HH1.0.7 - hostname: so-core - user: socore - binds: @@ -113,6 +114,7 @@ so-core: - /opt/so/tmp/nginx/:/run:rw - /etc/pki/masterssl.crt:/etc/pki/nginx/server.crt:ro - /etc/pki/masterssl.key:/etc/pki/nginx/server.key:ro + - /opt/so/conf/fleet/packages:/opt/so/html/packages - cap_add: NET_BIND_SERVICE - port_bindings: - 80:80 @@ -155,7 +157,7 @@ tgrafconf: so-telegraf: docker_container.running: - - image: soshybridhunter/so-telegraf:HH1.0.4 + - image: soshybridhunter/so-telegraf:HH1.0.7 - environment: - HOST_PROC=/host/proc - HOST_ETC=/host/etc @@ -187,7 +189,7 @@ so-telegraf: - /opt/so/conf/telegraf/scripts # If its a master or eval lets install the back end for now -{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' %} +{% if grains['role'] == 'so-master' or grains['role'] == 'so-eval' and GRAFANA == 1 %} # Influx DB influxconfdir: @@ -210,7 +212,7 @@ influxdbconf: so-influxdb: docker_container.running: - - image: soshybridhunter/so-influxdb:HH1.0.4 + - image: soshybridhunter/so-influxdb:HH1.0.7 - hostname: influxdb - environment: - INFLUXDB_HTTP_LOG_ENABLED=false @@ -367,7 +369,7 @@ dashboard-{{ SN }}: # Install the docker. This needs to be behind nginx at some point so-grafana: docker_container.running: - - image: soshybridhunter/so-grafana:HH1.0.4 + - image: soshybridhunter/so-grafana:HH1.0.7 - hostname: grafana - user: socore - binds: diff --git a/salt/common/nginx/nginx.conf.so-eval b/salt/common/nginx/nginx.conf.so-eval index 50f48497d..3b0a0d4a1 100644 --- a/salt/common/nginx/nginx.conf.so-eval +++ b/salt/common/nginx/nginx.conf.so-eval @@ -136,6 +136,28 @@ http { } + location /thehive/ { + proxy_pass http://{{ masterip }}:9000/thehive/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_http_version 1.1; # this is essential for chunked responses to work + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + + } + + location /soctopus/ { + proxy_pass http://{{ masterip }}:7000/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + + } error_page 404 /404.html; location = /40x.html { } diff --git a/salt/common/nginx/nginx.conf.so-master b/salt/common/nginx/nginx.conf.so-master index 50f48497d..c0eada231 100644 --- a/salt/common/nginx/nginx.conf.so-master +++ b/salt/common/nginx/nginx.conf.so-master @@ -136,6 +136,29 @@ http { } + location /thehive/ { + proxy_pass http://{{ masterip }}:9000/thehive/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_http_version 1.1; # this is essential for chunked responses to work + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + + } + + location /soctopus/ { + proxy_pass http://{{ masterip }}:7000/; + proxy_read_timeout 90; + proxy_connect_timeout 90; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + + } + error_page 404 /404.html; location = /40x.html { } diff --git a/salt/firewall/init.sls b/salt/firewall/init.sls index 0d5fd174d..7ff009495 100644 --- a/salt/firewall/init.sls +++ b/salt/firewall/init.sls @@ -389,6 +389,17 @@ enable_standard_analyst_3000_{{ip}}: - position: 1 - save: True +enable_standard_analyst_7000_{{ip}}: + iptables.insert: + - table: filter + - chain: DOCKER-USER + - jump: ACCEPT + - proto: tcp + - source: {{ ip }} + - dport: 7000 + - position: 1 + - save: True + enable_standard_analyst_9000_{{ip}}: iptables.insert: - table: filter diff --git a/salt/hive/init.sls b/salt/hive/init.sls index 958ecf540..371e790de 100644 --- a/salt/hive/init.sls +++ b/salt/hive/init.sls @@ -1,3 +1,25 @@ +hiveconfdir: + file.directory: + - name: /opt/so/conf/hive/etc + - makedirs: True + - user: 939 + - group: 939 + +hivelogdir: + file.directory: + - name: /opt/so/log/hive + - makedirs: True + - user: 939 + - group: 939 + +hiveconf: + file.recurse: + - name: /opt/so/conf/hive/etc + - source: salt://hive/thehive/etc + - user: 939 + - group: 939 + - template: jinja + # Install Elasticsearch # Made directory for ES data to live in @@ -5,27 +27,36 @@ hiveesdata: file.directory: - name: /nsm/hive/esdata - makedirs: True + - user: 939 + - group: 939 so-thehive-es: docker_container.running: - - image: docker.elastic.co/elasticsearch/elasticsearch:5.6.0 + - image: soshybridhunter/so-thehive-es:HH1.0.7 - hostname: so-thehive-es - name: so-thehive-es + - user: 939 - interactive: True - tty: True - binds: - /nsm/hive/esdata:/usr/share/elasticsearch/data:rw + - /opt/so/conf/hive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro + - /opt/so/conf/hive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro + - /opt/so/log/hive:/var/log/elasticsearch:rw - environment: - http.host=0.0.0.0 - http.port=9400 - transport.tcp.port=9500 - transport.host=0.0.0.0 - - xpack.security.enabled=false - cluster.name=hive - script.inline=true - thread_pool.index.queue_size=100000 - thread_pool.search.queue_size=100000 - thread_pool.bulk.queue_size=100000 + - ES_JAVA_OPTS=-Xms512m -Xmx512m + - port_bindings: + - 0.0.0.0:9400:9400 + - 0.0.0.0:9500:9500 # Install Cortex @@ -34,21 +65,21 @@ so-cortex: - image: thehiveproject/cortex:latest - hostname: so-cortex - name: so-cortex - -# Install Hive -hiveconfdir: - file.directory: - - name: /opt/so/conf/hive/etc - - makedirs: True - -hiveconf: - file.managed: - - name: /opt/so/conf/hive/etc/application.conf - - source: salt://hive/thehive/etc/application.conf - - template: jinja + - port_bindings: + - 0.0.0.0:9001:9001 so-thehive: docker_container.running: - - image: thehiveproject/thehive:latest + - image: soshybridhunter/so-thehive:HH1.0.7 - hostname: so-thehive - name: so-thehive + - user: 939 + - binds: + - /opt/so/conf/hive/etc/application.conf:/opt/thehive/conf/application.conf:ro + - port_bindings: + - 0.0.0.0:9000:9000 + +hivescript: + cmd.script: + - source: salt://hive/thehive/scripts/hive_init.sh + - template: jinja diff --git a/salt/hive/thehive/etc/application.conf b/salt/hive/thehive/etc/application.conf index 5a0a96b9b..1fd4b4816 100644 --- a/salt/hive/thehive/etc/application.conf +++ b/salt/hive/thehive/etc/application.conf @@ -4,6 +4,7 @@ # The secret key is used to secure cryptographic functions. # WARNING: If you deploy your application on several servers, make sure to use the same key. play.http.secret.key="letsdewdis" +play.http.context=/thehive/ # Elasticsearch search { diff --git a/salt/hive/thehive/etc/es/elasticsearch.yml b/salt/hive/thehive/etc/es/elasticsearch.yml new file mode 100644 index 000000000..d600830b6 --- /dev/null +++ b/salt/hive/thehive/etc/es/elasticsearch.yml @@ -0,0 +1,17 @@ +cluster.name: "hive" +network.host: 0.0.0.0 +discovery.zen.minimum_master_nodes: 1 +# This is a test -- if this is here, then the volume is mounted correctly. +path.logs: /var/log/elasticsearch +action.destructive_requires_name: true +transport.bind_host: 0.0.0.0 +transport.publish_host: 0.0.0.0 +transport.publish_port: 9500 +http.host: 0.0.0.0 +http.port: 9400 +transport.tcp.port: 9500 +transport.host: 0.0.0.0 +script.inline: true +thread_pool.index.queue_size: 100000 +thread_pool.search.queue_size: 100000 +thread_pool.bulk.queue_size: 100000 diff --git a/salt/hive/thehive/etc/es/log4j2.properties b/salt/hive/thehive/etc/es/log4j2.properties new file mode 100644 index 000000000..85cf5d8fb --- /dev/null +++ b/salt/hive/thehive/etc/es/log4j2.properties @@ -0,0 +1,20 @@ +status = error +#appender.console.type = Console +#appender.console.name = console +#appender.console.layout.type = PatternLayout +#appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n +#rootLogger.level = info +#rootLogger.appenderRef.console.ref = console +# This is a test -- if this here, then the volume is mounted correctly. +appender.rolling.type = RollingFile +appender.rolling.name = rolling +appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log +appender.rolling.layout.type = PatternLayout +appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log +appender.rolling.policies.type = Policies +appender.rolling.policies.time.type = TimeBasedTriggeringPolicy +appender.rolling.policies.time.interval = 1 +appender.rolling.policies.time.modulate = true +rootLogger.level = info +rootLogger.appenderRef.rolling.ref = rolling diff --git a/salt/hive/thehive/scripts/hive_init.sh b/salt/hive/thehive/scripts/hive_init.sh new file mode 100755 index 000000000..cb901e36b --- /dev/null +++ b/salt/hive/thehive/scripts/hive_init.sh @@ -0,0 +1,37 @@ +#!/bin/bash +{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %} +{%- set HIVEUSER = salt['pillar.get']('static:hiveuser', '') %} +{%- set HIVEPASSWORD = salt['pillar.get']('static:hivepassword', '') %} +{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %} + +hive_init(){ + sleep 60 + HIVE_IP="{{MASTERIP}}" + HIVE_USER="{{HIVEUSER}}" + HIVE_PASSWORD="{{HIVEPASSWORD}}" + SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf" + + # Migrate DB + curl -v -k -XPOST "https://$HIVE_IP:/thehive/api/maintenance/migrate" + + # Generate unique ID for apikey + HIVE_KEY="{{HIVEKEY}}" + + # Create intial TheHive user + curl -v -k "https://$HIVE_IP/thehive/api/user" -H "Content-Type: application/json" -d "{\"login\" : \"$HIVE_USER\",\"name\" : \"$HIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$HIVE_PASSWORD\", \"key\": \"$HIVE_KEY\"}" + + # Update SOCtopus config with apikey value + #sed -i "s/hive_key = .*/hive_key = $HIVE_KEY/" $SOCTOPUS_CONFIG + + # Check for correct authentication + #curl -v -k -H "Authorization: Bearer $HIVE_KEY" "https://$HIVE_IP/thehive/api/user/$USER" + + touch /opt/so/state/thehive.txt + +} + +if [ -f /opt/so/state/thehive.txt ]; then + exit 0 +else + hive_init +fi diff --git a/salt/kibana/init.sls b/salt/kibana/init.sls index 3b5037336..050582c82 100644 --- a/salt/kibana/init.sls +++ b/salt/kibana/init.sls @@ -57,7 +57,7 @@ synckibanacustom: # Start the kibana docker so-kibana: docker_container.running: - - image: soshybridhunter/so-kibana:HH1.0.6 + - image: soshybridhunter/so-kibana:HH1.0.7 - hostname: kibana - user: kibana - environment: diff --git a/salt/logstash/conf/conf.enabled.txt.so-eval b/salt/logstash/conf/conf.enabled.txt.so-eval index 5c85ab8ac..d8eae00e6 100644 --- a/salt/logstash/conf/conf.enabled.txt.so-eval +++ b/salt/logstash/conf/conf.enabled.txt.so-eval @@ -7,25 +7,25 @@ # /usr/share/logstash/pipeline.custom/1234_input_custom.conf ## # All of the defaults are loaded. -/usr/share/logstash/pipeline.so/0000_input_syslogng.conf -/usr/share/logstash/pipeline.so/0001_input_json.conf -/usr/share/logstash/pipeline.so/0002_input_windows_json.conf -/usr/share/logstash/pipeline.so/0003_input_syslog.conf -/usr/share/logstash/pipeline.so/0005_input_suricata.conf +#/usr/share/logstash/pipeline.so/0000_input_syslogng.conf +#/usr/share/logstash/pipeline.so/0001_input_json.conf +#/usr/share/logstash/pipeline.so/0002_input_windows_json.conf +#/usr/share/logstash/pipeline.so/0003_input_syslog.conf +#/usr/share/logstash/pipeline.so/0005_input_suricata.conf /usr/share/logstash/pipeline.dynamic/0006_input_beats.conf /usr/share/logstash/pipeline.so/0007_input_import.conf -/usr/share/logstash/pipeline.so/1000_preprocess_log_elapsed.conf -/usr/share/logstash/pipeline.so/1001_preprocess_syslogng.conf -/usr/share/logstash/pipeline.so/1002_preprocess_json.conf +#/usr/share/logstash/pipeline.so/1000_preprocess_log_elapsed.conf +#/usr/share/logstash/pipeline.so/1001_preprocess_syslogng.conf +#/usr/share/logstash/pipeline.so/1002_preprocess_json.conf /usr/share/logstash/pipeline.so/1003_preprocess_bro.conf -/usr/share/logstash/pipeline.so/1004_preprocess_syslog_types.conf +#/usr/share/logstash/pipeline.so/1004_preprocess_syslog_types.conf /usr/share/logstash/pipeline.so/1026_preprocess_dhcp.conf -/usr/share/logstash/pipeline.so/1029_preprocess_esxi.conf -/usr/share/logstash/pipeline.so/1030_preprocess_greensql.conf -/usr/share/logstash/pipeline.so/1031_preprocess_iis.conf -/usr/share/logstash/pipeline.so/1032_preprocess_mcafee.conf +#/usr/share/logstash/pipeline.so/1029_preprocess_esxi.conf +#/usr/share/logstash/pipeline.so/1030_preprocess_greensql.conf +#/usr/share/logstash/pipeline.so/1031_preprocess_iis.conf +#/usr/share/logstash/pipeline.so/1032_preprocess_mcafee.conf /usr/share/logstash/pipeline.so/1033_preprocess_snort.conf -/usr/share/logstash/pipeline.so/1034_preprocess_syslog.conf +#/usr/share/logstash/pipeline.so/1034_preprocess_syslog.conf /usr/share/logstash/pipeline.so/1100_preprocess_bro_conn.conf /usr/share/logstash/pipeline.so/1101_preprocess_bro_dhcp.conf /usr/share/logstash/pipeline.so/1102_preprocess_bro_dns.conf @@ -60,17 +60,17 @@ /usr/share/logstash/pipeline.so/1132_preprocess_bro_smb_mapping.conf /usr/share/logstash/pipeline.so/1133_preprocess_bro_ntlm.conf /usr/share/logstash/pipeline.so/1134_preprocess_bro_dce_rpc.conf -/usr/share/logstash/pipeline.so/1998_test_data.conf -/usr/share/logstash/pipeline.so/2000_network_flow.conf +#/usr/share/logstash/pipeline.so/1998_test_data.conf +#/usr/share/logstash/pipeline.so/2000_network_flow.conf /usr/share/logstash/pipeline.so/6000_bro.conf /usr/share/logstash/pipeline.so/6001_bro_import.conf -/usr/share/logstash/pipeline.so/6002_syslog.conf -/usr/share/logstash/pipeline.so/6101_switch_brocade.conf -/usr/share/logstash/pipeline.so/6200_firewall_fortinet.conf -/usr/share/logstash/pipeline.so/6201_firewall_pfsense.conf -/usr/share/logstash/pipeline.so/6300_windows.conf -/usr/share/logstash/pipeline.so/6301_dns_windows.conf -/usr/share/logstash/pipeline.so/6400_suricata.conf +#/usr/share/logstash/pipeline.so/6002_syslog.conf +#/usr/share/logstash/pipeline.so/6101_switch_brocade.conf +#/usr/share/logstash/pipeline.so/6200_firewall_fortinet.conf +#/usr/share/logstash/pipeline.so/6201_firewall_pfsense.conf +#/usr/share/logstash/pipeline.so/6300_windows.conf +#/usr/share/logstash/pipeline.so/6301_dns_windows.conf +#/usr/share/logstash/pipeline.so/6400_suricata.conf /usr/share/logstash/pipeline.so/6500_ossec.conf /usr/share/logstash/pipeline.so/6501_ossec_sysmon.conf /usr/share/logstash/pipeline.so/6502_ossec_autoruns.conf @@ -87,25 +87,25 @@ #/usr/share/logstash/pipeline.so/8503_postprocess_freq_analysis_bro_http.conf #/usr/share/logstash/pipeline.so/8504_postprocess_freq_analysis_bro_ssl.conf #/usr/share/logstash/pipeline.so/8505_postprocess_freq_analysis_bro_x509.conf -/usr/share/logstash/pipeline.so/8998_postprocess_log_elapsed.conf +#/usr/share/logstash/pipeline.so/8998_postprocess_log_elapsed.conf /usr/share/logstash/pipeline.so/8999_postprocess_rename_type.conf /usr/share/logstash/pipeline.dynamic/9000_output_bro.conf -/usr/share/logstash/pipeline.dynamic/9001_output_switch.conf +#/usr/share/logstash/pipeline.dynamic/9001_output_switch.conf /usr/share/logstash/pipeline.dynamic/9002_output_import.conf -/usr/share/logstash/pipeline.dynamic/9004_output_flow.conf -/usr/share/logstash/pipeline.dynamic/9026_output_dhcp.conf -/usr/share/logstash/pipeline.dynamic/9029_output_esxi.conf -/usr/share/logstash/pipeline.dynamic/9030_output_greensql.conf -/usr/share/logstash/pipeline.dynamic/9031_output_iis.conf -/usr/share/logstash/pipeline.dynamic/9032_output_mcafee.conf +#/usr/share/logstash/pipeline.dynamic/9004_output_flow.conf +#/usr/share/logstash/pipeline.dynamic/9026_output_dhcp.conf +#/usr/share/logstash/pipeline.dynamic/9029_output_esxi.conf +#/usr/share/logstash/pipeline.dynamic/9030_output_greensql.conf +#/usr/share/logstash/pipeline.dynamic/9031_output_iis.conf +#/usr/share/logstash/pipeline.dynamic/9032_output_mcafee.conf /usr/share/logstash/pipeline.dynamic/9033_output_snort.conf -/usr/share/logstash/pipeline.dynamic/9034_output_syslog.conf -/usr/share/logstash/pipeline.dynamic/9200_output_firewall.conf -/usr/share/logstash/pipeline.dynamic/9300_output_windows.conf -/usr/share/logstash/pipeline.dynamic/9301_output_dns_windows.conf +#/usr/share/logstash/pipeline.dynamic/9034_output_syslog.conf +#/usr/share/logstash/pipeline.dynamic/9200_output_firewall.conf +#/usr/share/logstash/pipeline.dynamic/9300_output_windows.conf +#/usr/share/logstash/pipeline.dynamic/9301_output_dns_windows.conf /usr/share/logstash/pipeline.dynamic/9400_output_suricata.conf /usr/share/logstash/pipeline.dynamic/9500_output_beats.conf /usr/share/logstash/pipeline.dynamic/9600_output_ossec.conf -/usr/share/logstash/pipeline.dynamic/9998_output_test_data.conf +#/usr/share/logstash/pipeline.dynamic/9998_output_test_data.conf /usr/share/logstash/pipeline.dynamic/7100_osquery_wel.conf /usr/share/logstash/pipeline.dynamic/9100_output_osquery.conf diff --git a/salt/master/init.sls b/salt/master/init.sls index 35f6c5254..8f20ef69f 100644 --- a/salt/master/init.sls +++ b/salt/master/init.sls @@ -49,7 +49,7 @@ acngcopyconf: # Install the apt-cacher-ng container so-aptcacherng: docker_container.running: - - image: soshybridhunter/so-acng:HH1.0.5 + - image: soshybridhunter/so-acng:HH1.0.7 - hostname: so-acng - port_bindings: - 0.0.0.0:3142:3142 diff --git a/salt/mysql/init.sls b/salt/mysql/init.sls index af80030ee..b1e875578 100644 --- a/salt/mysql/init.sls +++ b/salt/mysql/init.sls @@ -50,7 +50,7 @@ mysqldatadir: so-mysql: docker_container.running: - - image: soshybridhunter/so-mysql:HH1.0.5 + - image: soshybridhunter/so-mysql:HH1.0.7 - hostname: so-mysql - user: socore - port_bindings: diff --git a/salt/redis/init.sls b/salt/redis/init.sls index cd982a137..6dfbb473d 100644 --- a/salt/redis/init.sls +++ b/salt/redis/init.sls @@ -49,7 +49,7 @@ toosmooth/so-redis:test2: so-redis: docker_container.running: - - image: soshybridhunter/so-redis:HH1.0.5 + - image: soshybridhunter/so-redis:HH1.0.7 - hostname: so-redis - user: socore - port_bindings: diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf new file mode 100644 index 000000000..1a48ad92f --- /dev/null +++ b/salt/soctopus/files/SOCtopus.conf @@ -0,0 +1,49 @@ +{%- set ip = salt['pillar.get']('static:masterip', '') %} +{%- set HIVEKEY = salt['pillar.get']('static:hivekey', '') %} + +[es] +es_url = http://{{ip}}:9200 + +[fir] +fir_url = YOURFIRURL +fir_token = YOURFIRTOKEN +fir_actor = 3 +fir_category = 3 +fir_confidentiality = 1 +fir_detection = 2 +fir_plan = 8 +fir_severity = 4 + +[grr] +grr_url = YOURGRRURL +grr_user = YOURGRRUSER +grr_pass = YOURGRRPASS + +[hive] +hive_url = https://{{ip}}/thehive/ +hive_key = {{ HIVEKEY }} +hive_tlp = 3 +hive_verifycert = False + +[misp] +misp_url = YOURMISPURL +misp_key = YOURMISPKEY +misp_verifycert = False +distrib = 0 +threat = 4 +analysis = 0 + +[rtir] +rtir_url = YOURRTIRURL +rtir_api = REST/1.0/ +rtir_user = YOURRTIRUSER +rtir_pass = YOURRTIRPASS +rtir_queue = Incidents +rtir_creator = root + +[slack] +slack_url = YOURSLACKWORKSPACE +slack_webhook = YOURSLACKWEBHOOK + +[log] +logfile = /tmp/soctopus.log diff --git a/salt/soctopus/init.sls b/salt/soctopus/init.sls new file mode 100644 index 000000000..3902124e9 --- /dev/null +++ b/salt/soctopus/init.sls @@ -0,0 +1,24 @@ +soctopusdir: + file.directory: + - name: /opt/so/conf/soctopus + - user: 939 + - group: 939 + - makedirs: True + +soctopussync: + file.recurse: + - name: /opt/so/conf/soctopus + - source: salt://soctopus/files + - user: 939 + - group: 939 + - template: jinja + +so-soctopus: + docker_container.running: + - image: soshybridhunter/so-soctopus:HH1.0.7 + - hostname: soctopus + - name: so-soctopus + - binds: + - /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro + - port_bindings: + - 0.0.0.0:7000:7000 diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index ea29c69a0..48106a83a 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -72,7 +72,7 @@ suriconfigsync: so-suricata: docker_container.running: - - image: soshybridhunter/so-suricata:HH1.0.6 + - image: soshybridhunter/so-suricata:HH1.0.7 - privileged: True - environment: - INTERFACE={{ interface }} diff --git a/salt/top.sls b/salt/top.sls index fcd68f1eb..2a34c7548 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -1,4 +1,6 @@ {%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %} +{%- set OSQUERY = salt['pillar.get']('master:osquery', '0') %} +{%- set WAZUH = salt['pillar.get']('master:wazuh', '0') %} base: 'G@role:so-sensor': - ca @@ -21,7 +23,9 @@ base: - firewall - master - idstools + {%- if OSQUERY != 0 %} - mysql + {%- endif %} - elasticsearch - logstash - kibana @@ -30,12 +34,17 @@ base: - bro - curator - elastalert - - redis + {%- if OSQUERY != 0 %} - fleet + - redis + {%- endif %} + {%- if WAZUH != 0 %} - wazuh + {%- endif %} - filebeat - utility - schedule + - soctopus 'G@role:so-master': @@ -56,6 +65,7 @@ base: - utility - schedule - fleet + - soctopus # Storage node logic diff --git a/salt/wazuh/init.sls b/salt/wazuh/init.sls index ac05f1984..4e5c136b5 100644 --- a/salt/wazuh/init.sls +++ b/salt/wazuh/init.sls @@ -58,7 +58,7 @@ wazuhagentregister: so-wazuh: docker_container.running: - - image: soshybridhunter/so-wazuh:HH1.0.5 + - image: soshybridhunter/so-wazuh:HH1.0.7 - hostname: {{HOSTNAME}}-wazuh-manager - name: so-wazuh - detach: True diff --git a/so-setup-network.sh b/so-setup-network.sh index e0a2899de..dde654eb3 100644 --- a/so-setup-network.sh +++ b/so-setup-network.sh @@ -410,6 +410,12 @@ es_heapsize() { } +eval_mode_hostsfile() { + + echo "127.0.0.1 $HOSTNAME" >> /etc/hosts + +} + filter_nics() { # Filter the NICs that we don't want to see in setup @@ -421,6 +427,7 @@ generate_passwords(){ # Generate Random Passwords for Things MYSQLPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1) FLEETPASS=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1) + HIVEKEY=$(cat /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w 20 | head -n 1) } get_filesystem_nsm(){ @@ -528,6 +535,28 @@ master_pillar() { echo " ls_input_threads: 1" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls echo " ls_batch_count: 125" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls echo " mtu: 1500" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls + if [ $EVALADVANCED == 'ADVANCED' ]; then + if [ $EVALGRAFANA == '0' ]; then + echo " grafana: 1" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls + else + echo " grafana: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls + fi + if [ $EVALOSQUERY == '0' ]; then + echo " osquery: 1" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls + else + echo " osquery: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls + fi + if [ $EVALWAZUH == '0' ]; then + echo " wazuh: 1" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls + else + echo " wazuh: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls + fi + else + echo " grafana: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls + echo " osquery: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls + echo " wazuh: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls + fi + else echo " freq: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls echo " domainstats: 0" >> /opt/so/saltstack/pillar/masters/$HOSTNAME.sls @@ -560,6 +589,9 @@ master_static() { echo " broversion: $BROVERSION" >> /opt/so/saltstack/pillar/static.sls echo " ids: $NIDS" >> /opt/so/saltstack/pillar/static.sls echo " masterip: $MAINIP" >> /opt/so/saltstack/pillar/static.sls + echo " hiveuser: hiveadmin" >> /opt/so/saltstack/pillar/static.sls + echo " hivepassword: hivechangeme" >> /opt/so/saltstack/pillar/static.sls + echo " hivekey: $HIVEKEY" >> /opt/so/saltstack/pillar/static.sls if [[ $MASTERUPDATES == 'MASTER' ]]; then echo " masterupdate: 1" >> /opt/so/saltstack/pillar/static.sls else @@ -1066,6 +1098,11 @@ whiptail_bond_nics() { BNICS=$(whiptail --title "NIC Setup" --checklist "Please add NICs to the Monitor Interface" 20 78 12 ${FNICS[@]} 3>&1 1>&2 2>&3 ) + while [ -z "$BNICS" ] + do + BNICS=$(whiptail --title "NIC Setup" --checklist "Please add NICs to the Monitor Interface" 20 78 12 ${FNICS[@]} 3>&1 1>&2 2>&3 ) + done + local exitstatus=$? whiptail_check_exitstatus $exitstatus @@ -1093,7 +1130,7 @@ whiptail_cancel() { whiptail_check_exitstatus() { if [ $1 == '1' ]; then - echo " They hit cancel" + echo "They hit cancel" whiptail_cancel fi @@ -1109,6 +1146,35 @@ whiptail_cur_close_days() { } +whiptail_eval_adv() { + EVALADVANCED=$(whiptail --title "Security Onion Setup" --radiolist \ + "Choose your eval install:" 20 78 4 \ + "BASIC" "Install basic components for evaluation" ON \ + "ADVANCED" "Choose additional components to be installed" OFF 3>&1 1>&2 2>&3 ) +} + +whiptail_eval_adv_service_grafana() { + whiptail --title "Eval Advanced Setup" --yesno "Would you like to enable Grafana for detailed monitoring?" 8 78 + local exitstatus=$? + EVALGRAFANA=$exitstatus +} + +whiptail_eval_adv_service_osquery() { + whiptail --title "Eval Advanced Setup" --yesno "Would you like to enable OSquery for client monitoring?" 8 78 + local exitstatus=$? + EVALOSQUERY=$exitstatus +} + +whiptail_eval_adv_service_wazuh() { + whiptail --title "Eval Advanced Setup" --yesno "Would you like to enable Wazuh for client monitoring?" 8 78 + local exitstatus=$? + EVALWAZUH=$exitstatus +} + +whiptail_eval_adv_warning() { + whiptail --title "Security Onion Setup" --msgbox "Please keep in mind the more services that you enable the more RAM that is required." 8 78 +} + whiptail_homenet_master() { # Ask for the HOME_NET on the master @@ -1172,6 +1238,11 @@ whiptail_management_nic() { MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 78 12 ${NICS[@]} 3>&1 1>&2 2>&3 ) + while [ -z "$MNIC" ] + do + MNIC=$(whiptail --title "NIC Setup" --radiolist "Please select your management NIC" 20 78 12 ${NICS[@]} 3>&1 1>&2 2>&3 ) + done + local exitstatus=$? whiptail_check_exitstatus $exitstatus @@ -1686,11 +1757,20 @@ if (whiptail_you_sure); then # Snag the HOME_NET whiptail_homenet_master + # Ask about advanced mode + whiptail_eval_adv + if [ $EVALADVANCED == 'ADVANCED' ]; then + whiptail_eval_adv_warning + whiptail_eval_adv_service_grafana + whiptail_eval_adv_service_osquery + whiptail_eval_adv_service_wazuh + fi + # Set a bunch of stuff since this is eval es_heapsize ls_heapsize - NODE_ES_HEAP_SIZE=$ES_HEAP_SIZE - NODE_LS_HEAP_SIZE=$LS_HEAP_SIZE + NODE_ES_HEAP_SIZE="600m" + NODE_LS_HEAP_SIZE="2000m" LSPIPELINEWORKERS=1 LSPIPELINEBATCH=125 LSINPUTTHREADS=1 @@ -1701,6 +1781,7 @@ if (whiptail_you_sure); then BROVERSION=ZEEK CURCLOSEDAYS=30 whiptail_make_changes + #eval_mode_hostsfile generate_passwords auth_pillar clear_master