mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-04-01 02:13:57 +02:00
Merge branch 'dev' of https://github.com/Security-Onion-Solutions/securityonion-saltstack into dev
This commit is contained in:
@@ -1,2 +0,0 @@
|
||||
salt/bro/files/local.bro
|
||||
salt/bro/files/local.bro.community
|
||||
|
||||
@@ -57,3 +57,7 @@ pillar_roots:
|
||||
peer:
|
||||
.*:
|
||||
- x509.sign_remote_certificate
|
||||
|
||||
reactor:
|
||||
- 'so/fleet':
|
||||
- salt://reactor/fleet.sls
|
||||
|
||||
@@ -17,10 +17,9 @@ eval:
|
||||
- so-grafana
|
||||
{% endif %}
|
||||
- so-dockerregistry
|
||||
- so-sensoroni
|
||||
- so-soc
|
||||
- so-kratos
|
||||
- so-idstools
|
||||
- so-auth-api
|
||||
- so-auth-ui
|
||||
{% if OSQUERY != '0' %}
|
||||
- so-mysql
|
||||
- so-fleet
|
||||
@@ -89,12 +88,11 @@ master_search:
|
||||
containers:
|
||||
- so-core
|
||||
- so-telegraf
|
||||
- so-sensoroni
|
||||
- so-soc
|
||||
- so-kratos
|
||||
- so-acng
|
||||
- so-idstools
|
||||
- so-redis
|
||||
- so-auth-api
|
||||
- so-auth-ui
|
||||
- so-logstash
|
||||
- so-elasticsearch
|
||||
- so-curator
|
||||
@@ -135,12 +133,11 @@ master:
|
||||
- so-influxdb
|
||||
- so-grafana
|
||||
{% endif %}
|
||||
- so-sensoroni
|
||||
- so-soc
|
||||
- so-kratos
|
||||
- so-acng
|
||||
- so-idstools
|
||||
- so-redis
|
||||
- so-auth-api
|
||||
- so-auth-ui
|
||||
- so-elasticsearch
|
||||
- so-logstash
|
||||
- so-kibana
|
||||
|
||||
3
pillar/firewall/fleet_nodes.sls
Normal file
3
pillar/firewall/fleet_nodes.sls
Normal file
@@ -0,0 +1,3 @@
|
||||
fleet_nodes:
|
||||
- 127.0.0.1
|
||||
|
||||
5
pillar/healthcheck/eval.sls
Normal file
5
pillar/healthcheck/eval.sls
Normal file
@@ -0,0 +1,5 @@
|
||||
healthcheck:
|
||||
enabled: False
|
||||
schedule: 300
|
||||
checks:
|
||||
- zeek
|
||||
5
pillar/healthcheck/sensor.sls
Normal file
5
pillar/healthcheck/sensor.sls
Normal file
@@ -0,0 +1,5 @@
|
||||
healthcheck:
|
||||
enabled: False
|
||||
schedule: 300
|
||||
checks:
|
||||
- zeek
|
||||
@@ -2,59 +2,15 @@ logstash:
|
||||
pipelines:
|
||||
search:
|
||||
config:
|
||||
- so/1000_preprocess_log_elapsed.conf
|
||||
- so/1001_preprocess_syslogng.conf
|
||||
- so/1002_preprocess_json.conf
|
||||
- so/1004_preprocess_syslog_types.conf
|
||||
- so/1026_preprocess_dhcp.conf
|
||||
- so/1029_preprocess_esxi.conf
|
||||
- so/1030_preprocess_greensql.conf
|
||||
- so/1031_preprocess_iis.conf
|
||||
- so/1032_preprocess_mcafee.conf
|
||||
- so/1033_preprocess_snort.conf
|
||||
- so/1034_preprocess_syslog.conf
|
||||
- so/2000_network_flow.conf
|
||||
- so/6002_syslog.conf
|
||||
- so/6101_switch_brocade.conf
|
||||
- so/6200_firewall_fortinet.conf
|
||||
- so/6201_firewall_pfsense.conf
|
||||
- so/6300_windows.conf
|
||||
- so/6301_dns_windows.conf
|
||||
- so/6400_suricata.conf
|
||||
- so/6500_ossec.conf
|
||||
- so/6501_ossec_sysmon.conf
|
||||
- so/6502_ossec_autoruns.conf
|
||||
- so/6600_winlogbeat_sysmon.conf
|
||||
- so/6700_winlogbeat.conf
|
||||
- so/7100_osquery_wel.conf
|
||||
- so/7200_strelka.conf
|
||||
- so/8001_postprocess_common_ip_augmentation.conf
|
||||
- so/8007_postprocess_http.conf
|
||||
- so/8200_postprocess_tagging.conf
|
||||
- so/8998_postprocess_log_elapsed.conf
|
||||
- so/8999_postprocess_rename_type.conf
|
||||
- so/0900_input_redis.conf.jinja
|
||||
- so/9000_output_bro.conf.jinja
|
||||
- so/9001_output_switch.conf.jinja
|
||||
- so/9000_output_zeek.conf.jinja
|
||||
- so/9002_output_import.conf.jinja
|
||||
- so/9004_output_flow.conf.jinja
|
||||
- so/9026_output_dhcp.conf.jinja
|
||||
- so/9029_output_esxi.conf.jinja
|
||||
- so/9030_output_greensql.conf.jinja
|
||||
- so/9031_output_iis.conf.jinja
|
||||
- so/9032_output_mcafee.conf.jinja
|
||||
- so/9033_output_snort.conf.jinja
|
||||
- so/9034_output_syslog.conf.jinja
|
||||
- so/9100_output_osquery.conf.jinja
|
||||
- so/9200_output_firewall.conf.jinja
|
||||
- so/9300_output_windows.conf.jinja
|
||||
- so/9301_output_dns_windows.conf.jinja
|
||||
- so/9400_output_suricata.conf.jinja
|
||||
- so/9500_output_beats.conf.jinja
|
||||
- so/9600_output_ossec.conf.jinja
|
||||
- so/9700_output_strelka.conf.jinja
|
||||
templates:
|
||||
- so/beats-template.json
|
||||
- so/logstash-ossec-template.json
|
||||
- so/logstash-strelka-template.json
|
||||
- so/logstash-template.json
|
||||
- so/so-beats-template.json
|
||||
- so/so-common-template.json
|
||||
- so/so-zeek-template.json
|
||||
|
||||
@@ -13,6 +13,7 @@ base:
|
||||
- static
|
||||
- firewall.*
|
||||
- brologs
|
||||
- healthcheck.sensor
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
'*_master or *_mastersearch':
|
||||
@@ -20,7 +21,7 @@ base:
|
||||
- static
|
||||
- firewall.*
|
||||
- data.*
|
||||
- auth
|
||||
- secrets
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
'*_master':
|
||||
@@ -32,9 +33,10 @@ base:
|
||||
- firewall.*
|
||||
- data.*
|
||||
- brologs
|
||||
- auth
|
||||
- secrets
|
||||
- logstash
|
||||
- logstash.eval
|
||||
- healthcheck.eval
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
'*_node':
|
||||
@@ -56,3 +58,10 @@ base:
|
||||
- logstash
|
||||
- logstash.helix
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
'*_fleet':
|
||||
- static
|
||||
- firewall.*
|
||||
- data.*
|
||||
- secrets
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
33
salt/_beacons/zeek.py
Normal file
33
salt/_beacons/zeek.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import logging
|
||||
|
||||
|
||||
def status():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl status'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
logging.info('zeekctl_module: zeekctl.status retval: %s' % retval)
|
||||
|
||||
return retval
|
||||
|
||||
|
||||
def beacon(config):
|
||||
|
||||
retval = []
|
||||
|
||||
is_enabled = __salt__['healthcheck.is_enabled']()
|
||||
logging.info('zeek_beacon: healthcheck_is_enabled: %s' % is_enabled)
|
||||
|
||||
if is_enabled:
|
||||
zeekstatus = status().lower().split(' ')
|
||||
logging.info('zeek_beacon: zeekctl.status: %s' % str(zeekstatus))
|
||||
if 'stopped' in zeekstatus or 'crashed' in zeekstatus or 'error' in zeekstatus or 'error:' in zeekstatus:
|
||||
zeek_restart = True
|
||||
else:
|
||||
zeek_restart = False
|
||||
|
||||
__salt__['telegraf.send']('healthcheck zeek_restart=%s' % str(zeek_restart))
|
||||
retval.append({'zeek_restart': zeek_restart})
|
||||
logging.info('zeek_beacon: retval: %s' % str(retval))
|
||||
|
||||
return retval
|
||||
|
||||
95
salt/_modules/healthcheck.py
Normal file
95
salt/_modules/healthcheck.py
Normal file
@@ -0,0 +1,95 @@
|
||||
#!py
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
allowed_functions = ['is_enabled', 'zeek']
|
||||
states_to_apply = []
|
||||
|
||||
|
||||
def apply_states(states=''):
|
||||
|
||||
calling_func = sys._getframe().f_back.f_code.co_name
|
||||
logging.debug('healthcheck_module: apply_states function caller: %s' % calling_func)
|
||||
|
||||
if not states:
|
||||
states = ','.join(states_to_apply)
|
||||
|
||||
if states:
|
||||
logging.info('healthcheck_module: apply_states states: %s' % str(states))
|
||||
__salt__['state.apply'](states)
|
||||
|
||||
|
||||
def docker_stop(container):
|
||||
|
||||
try:
|
||||
stopdocker = __salt__['docker.rm'](container, 'stop=True')
|
||||
except Exception as e:
|
||||
logging.error('healthcheck_module: %s' % e)
|
||||
|
||||
|
||||
def is_enabled():
|
||||
|
||||
if __salt__['pillar.get']('healthcheck:enabled', 'False'):
|
||||
retval = True
|
||||
else:
|
||||
retval = False
|
||||
|
||||
return retval
|
||||
|
||||
|
||||
def run(checks=''):
|
||||
|
||||
retval = []
|
||||
calling_func = sys._getframe().f_back.f_code.co_name
|
||||
logging.debug('healthcheck_module: run function caller: %s' % calling_func)
|
||||
|
||||
if checks:
|
||||
checks = checks.split(',')
|
||||
else:
|
||||
checks = __salt__['pillar.get']('healthcheck:checks', {})
|
||||
|
||||
logging.debug('healthcheck_module: run checks to be run: %s' % str(checks))
|
||||
for check in checks:
|
||||
if check in allowed_functions:
|
||||
retval.append(check)
|
||||
check = getattr(sys.modules[__name__], check)
|
||||
check()
|
||||
else:
|
||||
logging.warning('healthcheck_module: attempted to run function %s' % check)
|
||||
|
||||
# If you want to apply states at the end of the run,
|
||||
# be sure to append the state name to states_to_apply[]
|
||||
apply_states()
|
||||
|
||||
return retval
|
||||
|
||||
|
||||
def send_event(tag, eventdata):
|
||||
__salt__['event.send'](tag, eventdata[0])
|
||||
|
||||
|
||||
def zeek():
|
||||
|
||||
calling_func = sys._getframe().f_back.f_code.co_name
|
||||
logging.debug('healthcheck_module: zeek function caller: %s' % calling_func)
|
||||
retval = []
|
||||
|
||||
retcode = __salt__['zeekctl.status'](verbose=False)
|
||||
logging.debug('healthcheck_module: zeekctl.status retcode: %i' % retcode)
|
||||
if retcode:
|
||||
zeek_restart = True
|
||||
if calling_func != 'beacon':
|
||||
docker_stop('so-zeek')
|
||||
states_to_apply.append('zeek')
|
||||
else:
|
||||
zeek_restart = False
|
||||
|
||||
if calling_func == 'execute' and zeek_restart:
|
||||
apply_states()
|
||||
|
||||
retval.append({'zeek_restart': zeek_restart})
|
||||
|
||||
send_event('so/healthcheck/zeek', retval)
|
||||
__salt__['telegraf.send']('healthcheck zeek_restart=%s' % str(zeek_restart))
|
||||
return retval
|
||||
16
salt/_modules/telegraf.py
Normal file
16
salt/_modules/telegraf.py
Normal file
@@ -0,0 +1,16 @@
|
||||
#!py
|
||||
|
||||
import logging
|
||||
import socket
|
||||
|
||||
|
||||
def send(data):
|
||||
|
||||
mainint = __salt__['pillar.get']('node:mainint')
|
||||
mainip = __salt__['grains.get']('ip_interfaces').get(mainint)[0]
|
||||
dstport = 8094
|
||||
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
||||
sent = sock.sendto(data.encode('utf-8'), (mainip, dstport))
|
||||
|
||||
return sent
|
||||
160
salt/_modules/zeekctl.py
Normal file
160
salt/_modules/zeekctl.py
Normal file
@@ -0,0 +1,160 @@
|
||||
#!py
|
||||
|
||||
import logging
|
||||
|
||||
|
||||
def capstats(interval=10):
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl capstats %i'" % interval
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
|
||||
return retval
|
||||
|
||||
|
||||
def check():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl check'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
|
||||
return retval
|
||||
|
||||
|
||||
def cleanup(all=''):
|
||||
|
||||
retval = ''
|
||||
|
||||
if all:
|
||||
if all == 'all':
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl cleanup --all'"
|
||||
else:
|
||||
retval = 'Invalid option. zeekctl.help for options'
|
||||
else:
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl cleanup'"
|
||||
|
||||
if not retval:
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def config():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl config'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def deploy():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl deploy'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def df():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl df'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def diag():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl diag'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def install(local=''):
|
||||
|
||||
retval = ''
|
||||
|
||||
if local:
|
||||
if local == 'local':
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl install --local'"
|
||||
else:
|
||||
retval = 'Invalid option. zeekctl.help for options'
|
||||
else:
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl install'"
|
||||
|
||||
if not retval:
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def netstats():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl netstats'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def nodes():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl nodes'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def restart(clean=''):
|
||||
|
||||
retval = ''
|
||||
|
||||
if clean:
|
||||
if clean == 'clean':
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl restart --clean'"
|
||||
else:
|
||||
retval = 'Invalid option. zeekctl.help for options'
|
||||
else:
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl restart'"
|
||||
|
||||
if not retval:
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def scripts(c=''):
|
||||
|
||||
retval = ''
|
||||
|
||||
if c:
|
||||
if c == 'c':
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl scripts -c'"
|
||||
else:
|
||||
retval = 'Invalid option. zeekctl.help for options'
|
||||
else:
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl scripts'"
|
||||
|
||||
if not retval:
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def start():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl start'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def status(verbose=True):
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl status'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
if not verbose:
|
||||
retval = __context__['retcode']
|
||||
logging.info('zeekctl_module: zeekctl.status retval: %s' % retval)
|
||||
return retval
|
||||
|
||||
|
||||
def stop():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl stop'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
|
||||
|
||||
def top():
|
||||
|
||||
cmd = "runuser -l zeek -c '/opt/zeek/bin/zeekctl top'"
|
||||
retval = __salt__['docker.run']('so-zeek', cmd)
|
||||
return retval
|
||||
@@ -1,30 +0,0 @@
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.1.4') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
|
||||
so-auth-api-dir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/auth/api
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
so-auth-api:
|
||||
docker_container.running:
|
||||
- image: {{ MASTER }}:5000/soshybridhunter/so-auth-api:{{ VERSION }}
|
||||
- hostname: so-auth-api
|
||||
- name: so-auth-api
|
||||
- environment:
|
||||
- BASE_PATH: "/so-auth/api"
|
||||
- AUTH_TOKEN_TIMEOUT: 32400
|
||||
- binds:
|
||||
- /opt/so/conf/auth/api:/data
|
||||
- port_bindings:
|
||||
- 0.0.0.0:5656:5656
|
||||
|
||||
so-auth-ui:
|
||||
docker_container.running:
|
||||
- image: {{ MASTER }}:5000/soshybridhunter/so-auth-ui:{{ VERSION }}
|
||||
- hostname: so-auth-ui
|
||||
- name: so-auth-ui
|
||||
- port_bindings:
|
||||
- 0.0.0.0:4242:80
|
||||
@@ -516,7 +516,7 @@
|
||||
}
|
||||
],
|
||||
"thresholds": "5,10",
|
||||
"title": "{{ SERVERNAME }} - Zeek Packet Loss",
|
||||
"title": "{{ SERVERNAME }} -Zeek Packet Loss",
|
||||
"type": "singlestat",
|
||||
"valueFontSize": "80%",
|
||||
"valueMaps": [
|
||||
@@ -772,6 +772,130 @@
|
||||
],
|
||||
"valueName": "current"
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "InfluxDB",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 0,
|
||||
"y": 29
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 37,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "connected",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "healthcheck",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"zeek_restart"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "last"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "host",
|
||||
"operator": "=",
|
||||
"value": "{{ SERVERNAME }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Zeek Restarts via Healthcheck",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {
|
||||
"Interrupt": "#70DBED",
|
||||
|
||||
@@ -2169,6 +2169,130 @@
|
||||
],
|
||||
"valueName": "current"
|
||||
},
|
||||
{
|
||||
"aliasColors": {},
|
||||
"bars": false,
|
||||
"dashLength": 10,
|
||||
"dashes": false,
|
||||
"datasource": "InfluxDB",
|
||||
"fill": 1,
|
||||
"fillGradient": 0,
|
||||
"gridPos": {
|
||||
"h": 5,
|
||||
"w": 8,
|
||||
"x": 0,
|
||||
"y": 29
|
||||
},
|
||||
"hiddenSeries": false,
|
||||
"id": 37,
|
||||
"legend": {
|
||||
"avg": false,
|
||||
"current": false,
|
||||
"max": false,
|
||||
"min": false,
|
||||
"show": true,
|
||||
"total": false,
|
||||
"values": false
|
||||
},
|
||||
"lines": true,
|
||||
"linewidth": 1,
|
||||
"nullPointMode": "connected",
|
||||
"options": {
|
||||
"dataLinks": []
|
||||
},
|
||||
"percentage": false,
|
||||
"pointradius": 2,
|
||||
"points": false,
|
||||
"renderer": "flot",
|
||||
"seriesOverrides": [],
|
||||
"spaceLength": 10,
|
||||
"stack": false,
|
||||
"steppedLine": false,
|
||||
"targets": [
|
||||
{
|
||||
"groupBy": [
|
||||
{
|
||||
"params": [
|
||||
"$__interval"
|
||||
],
|
||||
"type": "time"
|
||||
},
|
||||
{
|
||||
"params": [
|
||||
"null"
|
||||
],
|
||||
"type": "fill"
|
||||
}
|
||||
],
|
||||
"measurement": "healthcheck",
|
||||
"orderByTime": "ASC",
|
||||
"policy": "autogen",
|
||||
"refId": "A",
|
||||
"resultFormat": "time_series",
|
||||
"select": [
|
||||
[
|
||||
{
|
||||
"params": [
|
||||
"zeek_restart"
|
||||
],
|
||||
"type": "field"
|
||||
},
|
||||
{
|
||||
"params": [],
|
||||
"type": "last"
|
||||
}
|
||||
]
|
||||
],
|
||||
"tags": [
|
||||
{
|
||||
"key": "host",
|
||||
"operator": "=",
|
||||
"value": "{{ SERVERNAME }}"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"thresholds": [],
|
||||
"timeFrom": null,
|
||||
"timeRegions": [],
|
||||
"timeShift": null,
|
||||
"title": "Zeek Restarts via Healthcheck",
|
||||
"tooltip": {
|
||||
"shared": true,
|
||||
"sort": 0,
|
||||
"value_type": "individual"
|
||||
},
|
||||
"type": "graph",
|
||||
"xaxis": {
|
||||
"buckets": null,
|
||||
"mode": "time",
|
||||
"name": null,
|
||||
"show": true,
|
||||
"values": []
|
||||
},
|
||||
"yaxes": [
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
},
|
||||
{
|
||||
"format": "short",
|
||||
"label": null,
|
||||
"logBase": 1,
|
||||
"max": null,
|
||||
"min": null,
|
||||
"show": true
|
||||
}
|
||||
],
|
||||
"yaxis": {
|
||||
"align": false,
|
||||
"alignLevel": null
|
||||
}
|
||||
},
|
||||
{
|
||||
"aliasColors": {
|
||||
"Buffered": "#6ED0E0",
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.1.4') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
{% set GRAFANA = salt['pillar.get']('master:grafana', '0') %}
|
||||
{% set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) %}
|
||||
{% set FLEETNODE = salt['pillar.get']('static:fleet_node', False) %}
|
||||
# Add socore Group
|
||||
socoregroup:
|
||||
group.present:
|
||||
@@ -81,10 +83,6 @@ docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
|
||||
salt-minion:
|
||||
service.running:
|
||||
- enable: True
|
||||
|
||||
# Drop the correct nginx config based on role
|
||||
|
||||
nginxconfdir:
|
||||
@@ -102,13 +100,6 @@ nginxconf:
|
||||
- template: jinja
|
||||
- source: salt://common/nginx/nginx.conf.{{ grains.role }}
|
||||
|
||||
copyindex:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/nginx/index.html
|
||||
- user: 939
|
||||
- group: 939
|
||||
- source: salt://common/nginx/index.html
|
||||
|
||||
nginxlogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/nginx/
|
||||
@@ -131,7 +122,6 @@ so-core:
|
||||
- binds:
|
||||
- /opt/so:/opt/so:rw
|
||||
- /opt/so/conf/nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- /opt/so/conf/nginx/index.html:/opt/socore/html/index.html:ro
|
||||
- /opt/so/log/nginx/:/var/log/nginx:rw
|
||||
- /opt/so/tmp/nginx/:/var/lib/nginx:rw
|
||||
- /opt/so/tmp/nginx/:/run:rw
|
||||
@@ -142,6 +132,9 @@ so-core:
|
||||
- port_bindings:
|
||||
- 80:80
|
||||
- 443:443
|
||||
{%- if FLEETMASTER or FLEETNODE %}
|
||||
- 8090:8090
|
||||
{%- endif %}
|
||||
- watch:
|
||||
- file: /opt/so/conf/nginx/nginx.conf
|
||||
|
||||
@@ -187,6 +180,8 @@ so-telegraf:
|
||||
- HOST_SYS=/host/sys
|
||||
- HOST_MOUNT_PREFIX=/host
|
||||
- network_mode: host
|
||||
- port_bindings:
|
||||
- 127.0.0.1:8094:8094
|
||||
- binds:
|
||||
- /opt/so/log/telegraf:/var/log/telegraf:rw
|
||||
- /opt/so/conf/telegraf/etc/telegraf.conf:/etc/telegraf/telegraf.conf:ro
|
||||
@@ -308,7 +303,9 @@ grafanaconf:
|
||||
- source: salt://common/grafana/etc
|
||||
|
||||
{% if salt['pillar.get']('mastertab', False) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('mastertab', {}).items() %}
|
||||
{% for SN, SNDATA in salt['pillar.get']('mastertab', {}).items() %}
|
||||
{% set NODETYPE = SN.split('_')|last %}
|
||||
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
|
||||
dashboard-master:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/master/{{ SN }}-Master.json
|
||||
@@ -325,11 +322,13 @@ dashboard-master:
|
||||
ROOTFS: {{ SNDATA.rootfs }}
|
||||
NSMFS: {{ SNDATA.nsmfs }}
|
||||
|
||||
{%- endfor %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if salt['pillar.get']('sensorstab', False) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('sensorstab', {}).items() %}
|
||||
{% for SN, SNDATA in salt['pillar.get']('sensorstab', {}).items() %}
|
||||
{% set NODETYPE = SN.split('_')|last %}
|
||||
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
|
||||
dashboard-{{ SN }}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/forward_nodes/{{ SN }}-Sensor.json
|
||||
@@ -350,7 +349,9 @@ dashboard-{{ SN }}:
|
||||
{% endif %}
|
||||
|
||||
{% if salt['pillar.get']('nodestab', False) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
|
||||
{% for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
|
||||
{% set NODETYPE = SN.split('_')|last %}
|
||||
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
|
||||
dashboardsearch-{{ SN }}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/search_nodes/{{ SN }}-Node.json
|
||||
@@ -371,7 +372,9 @@ dashboardsearch-{{ SN }}:
|
||||
{% endif %}
|
||||
|
||||
{% if salt['pillar.get']('evaltab', False) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('evaltab', {}).items() %}
|
||||
{% for SN, SNDATA in salt['pillar.get']('evaltab', {}).items() %}
|
||||
{% set NODETYPE = SN.split('_')|last %}
|
||||
{% set SN = SN | regex_replace('_' ~ NODETYPE, '') %}
|
||||
dashboard-{{ SN }}:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/grafana/grafana_dashboards/eval/{{ SN }}-Node.json
|
||||
|
||||
@@ -1,163 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<title>Security Onion - Hybrid Hunter</title>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1">
|
||||
<link rel="icon" type="image/png" href="favicon-32x32.png" sizes="32x32" />
|
||||
<link rel="icon" type="image/png" href="favicon-16x16.png" sizes="16x16" />
|
||||
<style>
|
||||
* {
|
||||
box-sizing: border-box;
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
padding-left: 30px;
|
||||
padding-right: 30px;
|
||||
}
|
||||
|
||||
body {
|
||||
font-family: Arial, Helvetica, sans-serif;
|
||||
background-color: #2a2a2a;
|
||||
|
||||
}
|
||||
a {
|
||||
color: #f2f2f2;
|
||||
text-align: left;
|
||||
padding: 0px;
|
||||
}
|
||||
|
||||
.center-content {
|
||||
margin: 0 auto;
|
||||
}
|
||||
|
||||
.center-image {
|
||||
display: block;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
width: 50%;
|
||||
}
|
||||
|
||||
/* Style the top navigation bar */
|
||||
.topnav {
|
||||
overflow: hidden;
|
||||
background-color: #333;
|
||||
width: 1080px;
|
||||
display: flex;
|
||||
align-content: center;
|
||||
}
|
||||
|
||||
/* Style the topnav links */
|
||||
.topnav a {
|
||||
margin: auto;
|
||||
color: #f2f2f2;
|
||||
text-align: center;
|
||||
padding: 14px 16px;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
/* Change color on hover */
|
||||
.topnav a:hover {
|
||||
background-color: #ddd;
|
||||
color: black;
|
||||
}
|
||||
|
||||
/* Style the content */
|
||||
.content {
|
||||
background-color: #2a2a2a;
|
||||
padding: 10px;
|
||||
padding-top: 20px;
|
||||
padding-left: 60px;
|
||||
color: #E3DBCC;
|
||||
width: 1080px;
|
||||
}
|
||||
|
||||
/* Style the footer */
|
||||
.footer {
|
||||
background-color: #2a2a2a;
|
||||
padding: 60px;
|
||||
color: #E3DBCC;
|
||||
width: 1080px;
|
||||
}
|
||||
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="center-content">
|
||||
<div class="topnav center-content">
|
||||
<a href="/so-auth/loginpage/create-user" target="_blank">Create New User</a>
|
||||
<a href="/kibana/" target="_blank">Kibana</a>
|
||||
<a href="/grafana/" target="_blank">Grafana</a>
|
||||
<a href="/sensoroni/" target="_blank">Sensoroni</a>
|
||||
<a href="/playbook/" target="_blank">Playbook</a>
|
||||
<a href="/fleet/" target="_blank">Fleet</a>
|
||||
<a href="/thehive/" target="_blank">TheHive</a>
|
||||
<a href="/packages/" target="_blank">Osquery Packages</a>
|
||||
<a href="https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/FAQ" target="_blank">FAQ</a>
|
||||
<a href="https://www.securityonionsolutions.com" target="_blank">Security Onion Solutions</a>
|
||||
<a href="https://blog.securityonion.net" target="_blank">Blog</a>
|
||||
</div>
|
||||
|
||||
<div class="content center-content">
|
||||
<div style="text-align: center;">
|
||||
<a href="https://securityonion.net">
|
||||
<img style="border: none;" src="alpha_logo.jpg" alt="Security Onion" class="center-image" target="_blank">
|
||||
</a>
|
||||
<br/>
|
||||
</div>
|
||||
<p>
|
||||
<div style="text-align: center;">
|
||||
<h1>Hybrid Hunter Alpha 1.1.4 - Feature Parity Release</h1>
|
||||
</div>
|
||||
<br/>
|
||||
<h2>Changes:</h2>
|
||||
<ul>
|
||||
<li>Added new in-house auth method [Security Onion Auth](https://github.com/Security-Onion-Solutions/securityonion-auth).</li>
|
||||
<li>Web user creation is done via the browser now instead of so-user-add.</li>
|
||||
<li>New Logstash pipeline setup. Now uses multiple pipelines.</li>
|
||||
<li>New Master + Search node type and well as a Heavy Node type in the install.</li>
|
||||
<li>Change all nodes to point to the docker registry on the Master. This cuts down on the calls to dockerhub.</li>
|
||||
<li>Zeek 3.0.1</li>
|
||||
<li>Elastic 6.8.6</li>
|
||||
<li>New SO Start | Stop | Restart scripts for all components (eg. `so-playbook-restart`).</li>
|
||||
<li>BPF support for Suricata (NIDS), Steno (PCAP) & Zeek ([Docs](https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/BPF)).</li>
|
||||
<li>Updated Domain Stats & Frequency Server containers to Python3 & created new Salt states for them.</li>
|
||||
<li>Added so-status script which gives an easy to read look at container status.</li>
|
||||
<li>Manage threshold.conf for Suricata using the thresholding pillar.</li>
|
||||
<li>The ISO now includes all the docker containers for faster install speeds.</li>
|
||||
<li>You now set the password for the onion account during the iso install. This account is temporary and will be removed after so-setup.</li>
|
||||
<li>Updated Helix parsers for better compatibility.</li>
|
||||
<li>Updated telegraf docker to include curl and jq.</li>
|
||||
<li>CVE-2020-0601 Zeek Detection Script.</li>
|
||||
<li>ISO Install now prompts you to create a password for the onion user during imaging. This account gets disabled during setup.</li>
|
||||
<li>Check out the <a href="https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/Hybrid-Hunter-Quick-Start-Guide" target="_blank">Hybrid Hunter Quick Start Guide</a>.</li>
|
||||
</ul>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
<div class="footer center-content">
|
||||
<b>Disclaimer of Warranty</b>
|
||||
<br/>
|
||||
<small>
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
|
||||
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM
|
||||
.AS IS. WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
|
||||
THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE,
|
||||
YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
</small>
|
||||
<br/>
|
||||
<br/>
|
||||
<b>Limitation of Liability</b>
|
||||
<br/>
|
||||
<small>
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER,
|
||||
OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM
|
||||
(INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES
|
||||
OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
</small>
|
||||
<br/>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
@@ -1,9 +1,9 @@
|
||||
{%- set masterip = salt['pillar.get']('master:mainip', '') %}
|
||||
{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log;
|
||||
pid /run/nginx.pid;
|
||||
@@ -27,6 +27,7 @@ http {
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
client_max_body_size 1024M;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
@@ -63,6 +64,29 @@ http {
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
{% if FLEET_MASTER %}
|
||||
server {
|
||||
listen 8090 ssl http2 default_server;
|
||||
server_name _;
|
||||
root /opt/socore/html;
|
||||
index blank.html;
|
||||
|
||||
ssl_certificate "/etc/pki/nginx/server.crt";
|
||||
ssl_certificate_key "/etc/pki/nginx/server.key";
|
||||
ssl_session_cache shared:SSL:1m;
|
||||
ssl_session_timeout 10m;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
|
||||
grpc_pass grpcs://{{ masterip }}:8080;
|
||||
grpc_set_header Host $host;
|
||||
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_buffering off;
|
||||
}
|
||||
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
# Settings for a TLS enabled server.
|
||||
|
||||
@@ -83,10 +107,54 @@ http {
|
||||
# Load configuration files for the default server block.
|
||||
#include /etc/nginx/default.d/*.conf;
|
||||
|
||||
#location / {
|
||||
# try_files $uri $uri.html /index.html;
|
||||
# }
|
||||
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
|
||||
proxy_pass http://{{ masterip }}:9822;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
|
||||
location / {
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ masterip }}:9822/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
|
||||
location /auth/ {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /packages/ {
|
||||
try_files $uri =206;
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
}
|
||||
|
||||
location /grafana/ {
|
||||
rewrite /grafana/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:3000/;
|
||||
@@ -100,7 +168,7 @@ http {
|
||||
}
|
||||
|
||||
location /kibana/ {
|
||||
auth_request /so-auth/api/auth/;
|
||||
auth_request /auth/sessions/whoami;
|
||||
rewrite /kibana/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:5601/;
|
||||
proxy_read_timeout 90;
|
||||
@@ -138,7 +206,7 @@ http {
|
||||
|
||||
|
||||
location /navigator/ {
|
||||
auth_request /so-auth/api/auth/;
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ masterip }}:4200/navigator/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
@@ -149,21 +217,8 @@ http {
|
||||
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
proxy_pass https://{{ masterip }}:8080/api/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /fleet/ {
|
||||
proxy_pass https://{{ masterip }}:8080/fleet/;
|
||||
proxy_pass https://{{ masterip }}:8080;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -208,22 +263,8 @@ http {
|
||||
|
||||
}
|
||||
|
||||
location /sensoroni/ {
|
||||
auth_request /so-auth/api/auth/;
|
||||
proxy_pass http://{{ masterip }}:9822/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
|
||||
}
|
||||
|
||||
location /kibana/app/sensoroni/ {
|
||||
rewrite ^/kibana/app/sensoroni/(.*) /sensoroni/$1 permanent;
|
||||
location /kibana/app/soc/ {
|
||||
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
|
||||
}
|
||||
|
||||
location /kibana/app/fleet/ {
|
||||
@@ -244,23 +285,11 @@ http {
|
||||
proxy_set_header Proxy "";
|
||||
}
|
||||
|
||||
location /so-auth/loginpage/ {
|
||||
proxy_pass http://{{ masterip }}:4242/;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location /so-auth/api/ {
|
||||
proxy_pass http://{{ masterip }}:5656/;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
}
|
||||
|
||||
error_page 401 = @error401;
|
||||
|
||||
location @error401 {
|
||||
add_header Set-Cookie "NSREDIRECT=http://{{ masterip }}$request_uri;Domain={{ masterip }};Path=/;Max-Age=60000";
|
||||
return 302 http://{{ masterip }}/so-auth/loginpage/;
|
||||
return 302 /auth/self-service/browser/flows/login;
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
|
||||
98
salt/common/nginx/nginx.conf.so-fleet
Normal file
98
salt/common/nginx/nginx.conf.so-fleet
Normal file
@@ -0,0 +1,98 @@
|
||||
{%- set MAINIP = salt['pillar.get']('node:mainip', '') %}
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log;
|
||||
pid /run/nginx.pid;
|
||||
|
||||
# Load dynamic modules. See /usr/share/nginx/README.dynamic.
|
||||
include /usr/share/nginx/modules/*.conf;
|
||||
|
||||
events {
|
||||
worker_connections 1024;
|
||||
}
|
||||
|
||||
http {
|
||||
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
|
||||
'$status $body_bytes_sent "$http_referer" '
|
||||
'"$http_user_agent" "$http_x_forwarded_for"';
|
||||
|
||||
access_log /var/log/nginx/access.log main;
|
||||
|
||||
sendfile on;
|
||||
tcp_nopush on;
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
|
||||
include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 8090 ssl http2 default_server;
|
||||
server_name _;
|
||||
root /opt/socore/html;
|
||||
index blank.html;
|
||||
|
||||
ssl_certificate "/etc/pki/nginx/server.crt";
|
||||
ssl_certificate_key "/etc/pki/nginx/server.key";
|
||||
ssl_session_cache shared:SSL:1m;
|
||||
ssl_session_timeout 10m;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
|
||||
grpc_pass grpcs://{{ MAINIP }}:8080;
|
||||
grpc_set_header Host $host;
|
||||
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_buffering off;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
server {
|
||||
listen 443 ssl http2 default_server;
|
||||
server_name _;
|
||||
root /opt/socore/html/packages;
|
||||
index index.html;
|
||||
|
||||
ssl_certificate "/etc/pki/nginx/server.crt";
|
||||
ssl_certificate_key "/etc/pki/nginx/server.key";
|
||||
ssl_session_cache shared:SSL:1m;
|
||||
ssl_session_timeout 10m;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
location /fleet/ {
|
||||
proxy_pass https://{{ MAINIP }}:8080;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
location = /40x.html {
|
||||
}
|
||||
|
||||
error_page 500 502 503 504 /50x.html;
|
||||
location = /50x.html {
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1,9 +1,9 @@
|
||||
{%- set masterip = salt['pillar.get']('master:mainip', '') %}
|
||||
{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log;
|
||||
pid /run/nginx.pid;
|
||||
@@ -27,6 +27,7 @@ http {
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
client_max_body_size 1024M;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
@@ -58,11 +59,34 @@ http {
|
||||
# }
|
||||
#}
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
{% if FLEET_MASTER %}
|
||||
server {
|
||||
listen 8090 ssl http2 default_server;
|
||||
server_name _;
|
||||
root /opt/socore/html;
|
||||
index blank.html;
|
||||
|
||||
ssl_certificate "/etc/pki/nginx/server.crt";
|
||||
ssl_certificate_key "/etc/pki/nginx/server.key";
|
||||
ssl_session_cache shared:SSL:1m;
|
||||
ssl_session_timeout 10m;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
|
||||
grpc_pass grpcs://{{ masterip }}:8080;
|
||||
grpc_set_header Host $host;
|
||||
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_buffering off;
|
||||
}
|
||||
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
# Settings for a TLS enabled server.
|
||||
|
||||
@@ -83,13 +107,57 @@ http {
|
||||
# Load configuration files for the default server block.
|
||||
#include /etc/nginx/default.d/*.conf;
|
||||
|
||||
#location / {
|
||||
# try_files $uri $uri.html /index.html;
|
||||
# }
|
||||
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
|
||||
proxy_pass http://{{ masterip }}:9822;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
|
||||
location / {
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ masterip }}:9822/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
|
||||
location /auth/ {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /packages/ {
|
||||
try_files $uri =206;
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
}
|
||||
|
||||
location /grafana/ {
|
||||
rewrite /grafana/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:3000/;
|
||||
rewrite /grafana/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:3000/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -100,9 +168,9 @@ http {
|
||||
}
|
||||
|
||||
location /kibana/ {
|
||||
auth_request /so-auth/api/auth/;
|
||||
rewrite /kibana/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:5601/;
|
||||
auth_request /auth/sessions/whoami;
|
||||
rewrite /kibana/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:5601/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -126,7 +194,7 @@ http {
|
||||
}
|
||||
|
||||
location /playbook/ {
|
||||
proxy_pass http://{{ masterip }}:3200/playbook/;
|
||||
proxy_pass http://{{ masterip }}:3200/playbook/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -136,9 +204,10 @@ http {
|
||||
|
||||
}
|
||||
|
||||
|
||||
location /navigator/ {
|
||||
auth_request /so-auth/api/auth/;
|
||||
proxy_pass http://{{ masterip }}:4200/navigator/;
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ masterip }}:4200/navigator/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -148,23 +217,8 @@ http {
|
||||
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
proxy_pass https://{{ masterip }}:8080/api/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /fleet/ {
|
||||
rewrite /fleet/(.*) /$1 break;
|
||||
auth_request /so-auth/api/auth/;
|
||||
proxy_pass https://{{ masterip }}:8080/;
|
||||
proxy_pass https://{{ masterip }}:8080;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -175,10 +229,10 @@ http {
|
||||
}
|
||||
|
||||
location /thehive/ {
|
||||
proxy_pass http://{{ masterip }}:9000/thehive/;
|
||||
proxy_pass http://{{ masterip }}:9000/thehive/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
@@ -187,19 +241,19 @@ http {
|
||||
}
|
||||
|
||||
location /cortex/ {
|
||||
proxy_pass http://{{ masterip }}:9001/cortex/;
|
||||
proxy_pass http://{{ masterip }}:9001/cortex/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
|
||||
location /soctopus/ {
|
||||
proxy_pass http://{{ masterip }}:7000/;
|
||||
proxy_pass http://{{ masterip }}:7000/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -209,22 +263,8 @@ http {
|
||||
|
||||
}
|
||||
|
||||
location /sensoroni/ {
|
||||
auth_request /so-auth/api/auth/;
|
||||
proxy_pass http://{{ masterip }}:9822/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
|
||||
}
|
||||
|
||||
location /kibana/app/sensoroni/ {
|
||||
rewrite ^/kibana/app/sensoroni/(.*) /sensoroni/$1 permanent;
|
||||
location /kibana/app/soc/ {
|
||||
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
|
||||
}
|
||||
|
||||
location /kibana/app/fleet/ {
|
||||
@@ -235,36 +275,21 @@ http {
|
||||
rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent;
|
||||
}
|
||||
|
||||
|
||||
location /sensoroniagents/ {
|
||||
proxy_pass http://{{ masterip }}:9822/;
|
||||
proxy_pass http://{{ masterip }}:9822/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
|
||||
location /so-auth/loginpage/ {
|
||||
proxy_pass http://{{ masterip }}:4242/;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location /so-auth/api/ {
|
||||
proxy_pass http://{{ masterip }}:5656/;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
}
|
||||
|
||||
error_page 401 = @error401;
|
||||
|
||||
location @error401 {
|
||||
add_header Set-Cookie "NSREDIRECT=http://{{ masterip }}$request_uri;Domain={{ masterip }};Path=/;Max-Age=60000";
|
||||
return 302 http://{{ masterip }}/so-auth/loginpage/;
|
||||
return 302 /auth/self-service/browser/flows/login;
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
{%- set masterip = salt['pillar.get']('master:mainip', '') %}
|
||||
{%- set FLEET_MASTER = salt['pillar.get']('static:fleet_master') %}
|
||||
# For more information on configuration, see:
|
||||
# * Official English Documentation: http://nginx.org/en/docs/
|
||||
# * Official Russian Documentation: http://nginx.org/ru/docs/
|
||||
|
||||
user nginx;
|
||||
worker_processes auto;
|
||||
error_log /var/log/nginx/error.log;
|
||||
pid /run/nginx.pid;
|
||||
@@ -27,6 +27,7 @@ http {
|
||||
tcp_nodelay on;
|
||||
keepalive_timeout 65;
|
||||
types_hash_max_size 2048;
|
||||
client_max_body_size 1024M;
|
||||
|
||||
include /etc/nginx/mime.types;
|
||||
default_type application/octet-stream;
|
||||
@@ -58,11 +59,34 @@ http {
|
||||
# }
|
||||
#}
|
||||
server {
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
listen 80 default_server;
|
||||
server_name _;
|
||||
return 301 https://$host$request_uri;
|
||||
}
|
||||
|
||||
{% if FLEET_MASTER %}
|
||||
server {
|
||||
listen 8090 ssl http2 default_server;
|
||||
server_name _;
|
||||
root /opt/socore/html;
|
||||
index blank.html;
|
||||
|
||||
ssl_certificate "/etc/pki/nginx/server.crt";
|
||||
ssl_certificate_key "/etc/pki/nginx/server.key";
|
||||
ssl_session_cache shared:SSL:1m;
|
||||
ssl_session_timeout 10m;
|
||||
ssl_ciphers HIGH:!aNULL:!MD5;
|
||||
ssl_prefer_server_ciphers on;
|
||||
|
||||
location ~ ^/kolide.agent.Api/(RequestEnrollment|RequestConfig|RequestQueries|PublishLogs|PublishResults|CheckHealth)$ {
|
||||
grpc_pass grpcs://{{ masterip }}:8080;
|
||||
grpc_set_header Host $host;
|
||||
grpc_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_buffering off;
|
||||
}
|
||||
|
||||
}
|
||||
{% endif %}
|
||||
|
||||
# Settings for a TLS enabled server.
|
||||
|
||||
@@ -83,13 +107,57 @@ http {
|
||||
# Load configuration files for the default server block.
|
||||
#include /etc/nginx/default.d/*.conf;
|
||||
|
||||
#location / {
|
||||
# try_files $uri $uri.html /index.html;
|
||||
# }
|
||||
location ~* (^/login/|^/js/.*|^/css/.*|^/images/.*) {
|
||||
proxy_pass http://{{ masterip }}:9822;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
|
||||
location / {
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ masterip }}:9822/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
}
|
||||
|
||||
location /auth/ {
|
||||
rewrite /auth/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:4433/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /packages/ {
|
||||
try_files $uri =206;
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
}
|
||||
|
||||
location /grafana/ {
|
||||
rewrite /grafana/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:3000/;
|
||||
rewrite /grafana/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:3000/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -100,9 +168,9 @@ http {
|
||||
}
|
||||
|
||||
location /kibana/ {
|
||||
auth_request /so-auth/api/auth/;
|
||||
rewrite /kibana/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:5601/;
|
||||
auth_request /auth/sessions/whoami;
|
||||
rewrite /kibana/(.*) /$1 break;
|
||||
proxy_pass http://{{ masterip }}:5601/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -112,8 +180,21 @@ http {
|
||||
|
||||
}
|
||||
|
||||
location /playbook/ {
|
||||
proxy_pass http://{{ masterip }}:3200/playbook/;
|
||||
location /nodered/ {
|
||||
proxy_pass http://{{ masterip }}:1880/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /playbook/ {
|
||||
proxy_pass http://{{ masterip }}:3200/playbook/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -123,9 +204,10 @@ http {
|
||||
|
||||
}
|
||||
|
||||
|
||||
location /navigator/ {
|
||||
auth_request /so-auth/api/auth/;
|
||||
proxy_pass http://{{ masterip }}:4200/navigator/;
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ masterip }}:4200/navigator/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -135,23 +217,8 @@ http {
|
||||
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
proxy_pass https://{{ masterip }}:8080/api/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /fleet/ {
|
||||
rewrite /fleet/(.*) /$1 break;
|
||||
auth_request /so-auth/api/auth/;
|
||||
proxy_pass https://{{ masterip }}:8080/;
|
||||
proxy_pass https://{{ masterip }}:8080;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -162,10 +229,10 @@ http {
|
||||
}
|
||||
|
||||
location /thehive/ {
|
||||
proxy_pass http://{{ masterip }}:9000/thehive/;
|
||||
proxy_pass http://{{ masterip }}:9000/thehive/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
@@ -174,31 +241,19 @@ http {
|
||||
}
|
||||
|
||||
location /cortex/ {
|
||||
proxy_pass http://{{ masterip }}:9001/cortex/;
|
||||
proxy_pass http://{{ masterip }}:9001/cortex/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
location /cyberchef/ {
|
||||
proxy_pass http://{{ masterip }}:9080/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_http_version 1.1; # this is essential for chunked responses to work
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
|
||||
location /soctopus/ {
|
||||
proxy_pass http://{{ masterip }}:7000/;
|
||||
proxy_pass http://{{ masterip }}:7000/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
@@ -208,22 +263,8 @@ http {
|
||||
|
||||
}
|
||||
|
||||
location /sensoroni/ {
|
||||
auth_request /so-auth/api/auth/;
|
||||
proxy_pass http://{{ masterip }}:9822/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
|
||||
}
|
||||
|
||||
location /kibana/app/sensoroni/ {
|
||||
rewrite ^/kibana/app/sensoroni/(.*) /sensoroni/$1 permanent;
|
||||
location /kibana/app/soc/ {
|
||||
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
|
||||
}
|
||||
|
||||
location /kibana/app/fleet/ {
|
||||
@@ -234,36 +275,21 @@ http {
|
||||
rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent;
|
||||
}
|
||||
|
||||
|
||||
location /sensoroniagents/ {
|
||||
proxy_pass http://{{ masterip }}:9822/;
|
||||
proxy_pass http://{{ masterip }}:9822/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
|
||||
}
|
||||
|
||||
|
||||
location /so-auth/loginpage/ {
|
||||
proxy_pass http://{{ masterip }}:4242/;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
|
||||
location /so-auth/api/ {
|
||||
proxy_pass http://{{ masterip }}:5656/;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Host $host;
|
||||
}
|
||||
|
||||
error_page 401 = @error401;
|
||||
|
||||
location @error401 {
|
||||
add_header Set-Cookie "NSREDIRECT=http://{{ masterip }}$request_uri;Domain={{ masterip }};Path=/;Max-Age=60000";
|
||||
return 302 http://{{ masterip }}/so-auth/loginpage/;
|
||||
return 302 /auth/self-service/browser/flows/login;
|
||||
}
|
||||
|
||||
error_page 404 /404.html;
|
||||
|
||||
@@ -498,10 +498,10 @@
|
||||
[[inputs.disk]]
|
||||
## By default stats will be gathered for all mount points.
|
||||
## Set mount_points will restrict the stats to only the specified mount points.
|
||||
# mount_points = ["/"]
|
||||
mount_points = ["/","/nsm"]
|
||||
|
||||
## Ignore mount points by filesystem type.
|
||||
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||
#ignore_fs = ["tmpfs", "devtmpfs", "devfs", "overlay", "aufs", "squashfs"]
|
||||
|
||||
|
||||
# Read metrics about disk IO by device
|
||||
@@ -2053,6 +2053,9 @@
|
||||
# ## more about them here:
|
||||
# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md
|
||||
# # data_format = "influx"
|
||||
[[inputs.socket_listener]]
|
||||
service_address = "udp://:8094"
|
||||
data_format = "influx"
|
||||
|
||||
|
||||
# # Statsd UDP/TCP Server
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
INFLUXSIZE=$(du -s -B1 /host/nsm/influxdb | awk {'print $1'}
|
||||
INFLUXSIZE=$(du -s -B1 /host/nsm/influxdb | awk {'print $1'})
|
||||
|
||||
echo "influxsize bytes=$INFLUXSIZE"
|
||||
|
||||
@@ -56,7 +56,7 @@ if [ "$SKIP" -eq 0 ]; then
|
||||
echo ""
|
||||
echo "[a] - Analyst - ports 80/tcp and 443/tcp"
|
||||
echo "[b] - Logstash Beat - port 5044/tcp"
|
||||
echo "[o] - Osquery endpoint - port 8080/tcp"
|
||||
echo "[o] - Osquery endpoint - port 8090/tcp"
|
||||
echo "[w] - Wazuh endpoint - port 1514"
|
||||
echo ""
|
||||
echo "Please enter your selection (a - analyst, b - beats, o - osquery, w - wazuh):"
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-restart auth $1
|
||||
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start auth $1
|
||||
@@ -1,20 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop auth $1
|
||||
@@ -2,8 +2,6 @@
|
||||
MASTER=MASTER
|
||||
VERSION="HH1.1.4"
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-auth-api:$VERSION" \
|
||||
"so-auth-ui:$VERSION" \
|
||||
"so-core:$VERSION" \
|
||||
"so-thehive-cortex:$VERSION" \
|
||||
"so-curator:$VERSION" \
|
||||
|
||||
@@ -17,4 +17,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
docker exec so-soctopus python3 playbook_play-sync.py
|
||||
docker exec so-soctopus python3 playbook_play-sync.py >> /opt/so/log/soctopus/so-playbook-sync.log 2>&1
|
||||
|
||||
@@ -32,6 +32,5 @@ fi
|
||||
case $1 in
|
||||
"cortex") docker stop so-thehive-cortex so-thehive && docker rm so-thehive-cortex so-thehive && salt-call state.apply hive queue=True;;
|
||||
"steno") docker stop so-steno && docker rm so-steno && salt-call state.apply pcap queue=True;;
|
||||
"auth") docker stop so-auth-api; docker stop so-auth-ui; salt-call state.apply auth queue=True;;
|
||||
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
|
||||
esac
|
||||
|
||||
@@ -32,16 +32,5 @@ fi
|
||||
case $1 in
|
||||
"all") salt-call state.highstate queue=True;;
|
||||
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
|
||||
"auth")
|
||||
if docker ps | grep -q so-auth-api; then
|
||||
if docker ps | grep -q so-auth-ui; then
|
||||
printf "\n$1 is already running!\n\n"
|
||||
else
|
||||
docker rm so-auth-api >/dev/null 2>&1; docker rm so-auth-ui >/dev/null 2>&1; salt-call state.apply $1 queue=True
|
||||
fi
|
||||
else
|
||||
docker rm so-auth-api >/dev/null 2>&1; docker rm so-auth-ui >/dev/null 2>&1; salt-call state.apply $1 queue=True
|
||||
fi
|
||||
;;
|
||||
*) if docker ps | grep -q so-$1; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
||||
esac
|
||||
|
||||
@@ -26,6 +26,8 @@
|
||||
{%- set pillar_val = 'sensor' -%}
|
||||
{%- elif (salt['grains.get']('role') == 'so-eval') -%}
|
||||
{%- set pillar_val = 'eval' -%}
|
||||
{%- elif (salt['grains.get']('role') == 'so-fleet') -%}
|
||||
{%- set pillar_val = 'fleet' -%}
|
||||
{%- elif (salt['grains.get']('role') == 'so-helix') -%}
|
||||
{%- set pillar_val = 'helix' -%}
|
||||
{%- elif (salt['grains.get']('role') == 'so-node') -%}
|
||||
@@ -105,7 +107,7 @@ populate_container_lists() {
|
||||
systemctl is-active --quiet docker
|
||||
|
||||
if [[ $? = 0 ]]; then
|
||||
mapfile -t docker_raw_list < <(curl -s --unix-socket /var/run/docker.sock http:/containers/json?all=1 \
|
||||
mapfile -t docker_raw_list < <(curl -s --unix-socket /var/run/docker.sock http:/v2/containers/json?all=1 \
|
||||
| jq -c '.[] | { Name: .Names[0], State: .State }' \
|
||||
| tr -d '/{"}')
|
||||
else
|
||||
|
||||
@@ -24,7 +24,6 @@ printf "Stopping $1...\n"
|
||||
echo $banner
|
||||
|
||||
case $1 in
|
||||
"auth") docker stop so-auth-api; docker rm so-auth-api; docker stop so-auth-ui; docker rm so-auth-ui ;;
|
||||
*) docker stop so-$1 ; docker rm so-$1 ;;
|
||||
esac
|
||||
|
||||
|
||||
237
salt/common/tools/sbin/so-user
Executable file
237
salt/common/tools/sbin/so-user
Executable file
@@ -0,0 +1,237 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2020 Security Onion Solutions. All rights reserved.
|
||||
#
|
||||
# This program is distributed under the terms of version 2 of the
|
||||
# GNU General Public License. See LICENSE for further details.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
got_root() {
|
||||
|
||||
# Make sure you are root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
# Make sure the user is root
|
||||
got_root
|
||||
|
||||
if [[ $# < 1 || $# > 2 ]]; then
|
||||
echo "Usage: $0 <list|add|update|delete|validate|valemail|valpass> [email]"
|
||||
echo ""
|
||||
echo " list: Lists all user email addresses currently defined in the identity system"
|
||||
echo " add: Adds a new user to the identity system; requires 'email' parameter"
|
||||
echo " update: Updates a user's password; requires 'email' parameter"
|
||||
echo " delete: Deletes an existing user; requires 'email' parameter"
|
||||
echo " validate: Validates that the given email address and password are acceptable for defining a new user; requires 'email' parameter"
|
||||
echo " valemail: Validates that the given email address is acceptable for defining a new user; requires 'email' parameter"
|
||||
echo " valpass: Validates that a password is acceptable for defining a new user"
|
||||
echo ""
|
||||
echo " Note that the password can be piped into stdin to avoid prompting for it."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
operation=$1
|
||||
email=$2
|
||||
|
||||
kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434}
|
||||
databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite}
|
||||
argon2Iterations=${ARGON2_ITERATIONS:-3}
|
||||
argon2Memory=${ARGON2_MEMORY:-14}
|
||||
argon2Parallelism=${ARGON2_PARALLELISM:-2}
|
||||
argon2HashSize=${ARGON2_HASH_SIZE:-32}
|
||||
|
||||
function fail() {
|
||||
msg=$1
|
||||
echo "$1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function require() {
|
||||
cmd=$1
|
||||
which "$1" 2>&1 > /dev/null
|
||||
[[ $? != 0 ]] && fail "This script requires the following command be installed: ${cmd}"
|
||||
}
|
||||
|
||||
# Verify this environment is capable of running this script
|
||||
function verifyEnvironment() {
|
||||
require "argon2"
|
||||
require "jq"
|
||||
require "curl"
|
||||
require "openssl"
|
||||
require "sqlite3"
|
||||
[[ ! -f $databasePath ]] && fail "Unable to find database file; specify path via KRATOS_DB_PATH environment variable"
|
||||
response=$(curl -Ss ${kratosUrl}/)
|
||||
[[ "$response" != "404 page not found" ]] && fail "Unable to communicate with Kratos; specify URL via KRATOS_URL environment variable"
|
||||
}
|
||||
|
||||
function findIdByEmail() {
|
||||
email=$1
|
||||
|
||||
response=$(curl -Ss ${kratosUrl}/identities)
|
||||
identityId=$(echo "${response}" | jq ".[] | select(.addresses[0].value == \"$email\") | .id")
|
||||
echo $identityId
|
||||
}
|
||||
|
||||
function validatePassword() {
|
||||
password=$1
|
||||
|
||||
len=$(expr length "$password")
|
||||
if [[ $len -lt 6 ]]; then
|
||||
echo "Password does not meet the minimum requirements"
|
||||
exit 2
|
||||
fi
|
||||
}
|
||||
|
||||
function validateEmail() {
|
||||
email=$1
|
||||
# (?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])
|
||||
if [[ ! "$email" =~ ^[[:alnum:]._%+-]+@[[:alnum:].-]+\.[[:alpha:]]{2,}$ ]]; then
|
||||
echo "Email address is invalid"
|
||||
exit 3
|
||||
fi
|
||||
}
|
||||
|
||||
function updatePassword() {
|
||||
identityId=$1
|
||||
|
||||
# Read password from stdin (show prompt only if no stdin was piped in)
|
||||
test -t 0
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Enter new password:"
|
||||
fi
|
||||
read -s password
|
||||
|
||||
validatePassword "$password"
|
||||
|
||||
if [[ -n $identityId ]]; then
|
||||
# Generate password hash
|
||||
salt=$(openssl rand -hex 8)
|
||||
passwordHash=$(echo "${password}" | argon2 ${salt} -id -t $argon2Iterations -m $argon2Memory -p $argon2Parallelism -l $argon2HashSize -e)
|
||||
|
||||
# Update DB with new hash
|
||||
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"${passwordHash}\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to update password"
|
||||
fi
|
||||
}
|
||||
|
||||
function listUsers() {
|
||||
response=$(curl -Ss ${kratosUrl}/identities)
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
echo "${response}" | jq -r ".[] | .addresses[0].value" | sort
|
||||
}
|
||||
|
||||
function createUser() {
|
||||
email=$1
|
||||
|
||||
now=$(date -u +%FT%TZ)
|
||||
addUserJson=$(cat <<EOF
|
||||
{
|
||||
"addresses": [
|
||||
{
|
||||
"expires_at": "2099-01-31T12:00:00Z",
|
||||
"value": "${email}",
|
||||
"verified": true,
|
||||
"verified_at": "${now}",
|
||||
"via": "so-add-user"
|
||||
}
|
||||
],
|
||||
"traits": {"email":"${email}"},
|
||||
"traits_schema_id": "default"
|
||||
}
|
||||
EOF
|
||||
)
|
||||
|
||||
response=$(curl -Ss ${kratosUrl}/identities -d "$addUserJson")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
identityId=$(echo "${response}" | jq ".id")
|
||||
if [[ ${identityId} == "null" ]]; then
|
||||
code=$(echo "${response}" | jq ".error.code")
|
||||
[[ "${code}" == "409" ]] && fail "User already exists"
|
||||
|
||||
reason=$(echo "${response}" | jq ".error.message")
|
||||
[[ $? == 0 ]] && fail "Unable to add user: ${reason}"
|
||||
fi
|
||||
|
||||
updatePassword $identityId
|
||||
}
|
||||
|
||||
function updateUser() {
|
||||
email=$1
|
||||
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
updatePassword $identityId
|
||||
}
|
||||
|
||||
function deleteUser() {
|
||||
email=$1
|
||||
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
response=$(curl -Ss -XDELETE "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
}
|
||||
|
||||
case "${operation}" in
|
||||
"add")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
validateEmail "$email"
|
||||
createUser "$email"
|
||||
echo "Successfully added new user"
|
||||
;;
|
||||
|
||||
"list")
|
||||
verifyEnvironment
|
||||
listUsers
|
||||
;;
|
||||
|
||||
"update")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
updateUser "$email"
|
||||
echo "Successfully updated user"
|
||||
;;
|
||||
|
||||
"delete")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
deleteUser "$email"
|
||||
echo "Successfully deleted user"
|
||||
;;
|
||||
|
||||
"validate")
|
||||
validateEmail "$email"
|
||||
updatePassword
|
||||
echo "Email and password are acceptable"
|
||||
;;
|
||||
|
||||
"valemail")
|
||||
validateEmail "$email"
|
||||
echo "Email is acceptable"
|
||||
;;
|
||||
|
||||
"valpass")
|
||||
updatePassword
|
||||
echo "Password is acceptable"
|
||||
;;
|
||||
|
||||
*)
|
||||
fail "Unsupported operation: $operation"
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
@@ -1,17 +1,2 @@
|
||||
#!/bin/bash
|
||||
USERNAME=$1
|
||||
|
||||
# Make sure a username is provided
|
||||
[ $# -eq 0 ] && { echo "Usage: $0 username"; exit 1; }
|
||||
|
||||
# If the file is there already lets create it otherwise add the user
|
||||
if [ ! -f /opt/so/conf/nginx/.htpasswd ]; then
|
||||
|
||||
# Create the password file
|
||||
htpasswd -c /opt/so/conf/nginx/.htpasswd $USERNAME
|
||||
|
||||
else
|
||||
|
||||
htpasswd /opt/so/conf/nginx/.htpasswd $USERNAME
|
||||
|
||||
fi
|
||||
so-user add $*
|
||||
37
salt/common/tools/sbin/soup
Normal file
37
salt/common/tools/sbin/soup
Normal file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
clone_to_tmp() {
|
||||
|
||||
# TODO Need to add a air gap option
|
||||
# Make a temp location for the files
|
||||
rm -rf /tmp/soup
|
||||
mkdir -p /tmp/soup
|
||||
cd /tmp/soup
|
||||
#git clone -b dev https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
|
||||
git clone https://github.com/Security-Onion-Solutions/securityonion-saltstack.git
|
||||
|
||||
}
|
||||
|
||||
# Prompt the user that this requires internets
|
||||
|
||||
clone_to_tmp
|
||||
cd /tmp/soup/securityonion-saltstack/update
|
||||
chmod +x soup
|
||||
./soup
|
||||
|
||||
|
||||
@@ -34,8 +34,6 @@
|
||||
#fi
|
||||
|
||||
# Avoid starting multiple instances
|
||||
if pgrep -f "so-curator-closed-delete-delete" >/dev/null; then
|
||||
echo "Script is already running."
|
||||
else
|
||||
if ! pgrep -f "so-curator-closed-delete-delete" >/dev/null; then
|
||||
/usr/sbin/so-curator-closed-delete-delete
|
||||
fi
|
||||
|
||||
@@ -37,9 +37,11 @@
|
||||
},
|
||||
{ "rename": { "field": "module", "target_field": "event.module", "ignore_missing": true } },
|
||||
{ "rename": { "field": "dataset", "target_field": "event.dataset", "ignore_missing": true } },
|
||||
{ "rename": { "field": "category", "target_field": "event.category", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
||||
{
|
||||
"remove": {
|
||||
"field": [ "index_name_prefix"],
|
||||
"field": [ "index_name_prefix", "message2"],
|
||||
"ignore_failure": false
|
||||
}
|
||||
}
|
||||
|
||||
26
salt/elasticsearch/files/ingest/osquery.query_result
Normal file
26
salt/elasticsearch/files/ingest/osquery.query_result
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"description" : "osquery",
|
||||
"processors" : [
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"source": "def dict = ['result': new HashMap()]; for (entry in ctx['message2'].entrySet()) { dict['result'][entry.getKey()] = entry.getValue(); } ctx['osquery'] = dict; "
|
||||
}
|
||||
},
|
||||
{ "rename": { "field": "osquery.result.hostIdentifier", "target_field": "osquery.result.host_identifier", "ignore_missing": true } },
|
||||
{ "rename": { "field": "osquery.result.calendarTime", "target_field": "osquery.result.calendar_time", "ignore_missing": true } },
|
||||
{ "rename": { "field": "osquery.result.unixTime", "target_field": "osquery.result.unix_time", "ignore_missing": true } },
|
||||
{ "json": { "field": "message", "target_field": "message3", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message3.columns.username", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message3.columns.uid", "target_field": "user.uid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message3.columns.gid", "target_field": "user.gid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message3.columns.shell", "target_field": "user.shell", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message3.columns.cmdline", "target_field": "process.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message3.columns.pid", "target_field": "process.pid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message3.columns.parent", "target_field": "process.ppid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message3.columns.cwd", "target_field": "process.working_directory", "ignore_missing": true } },
|
||||
{ "remove": { "field": [ "message3"], "ignore_failure": false } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
@@ -30,9 +30,9 @@
|
||||
{ "rename": { "field": "data.win.eventdata.user", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.system.eventID", "target_field": "event.code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "predecoder.program_name", "target_field": "process.name", "ignore_missing": true } },
|
||||
{ "set": { "if": "ctx.rule.level == 1", "field": "category", "value": "None" } },
|
||||
{ "set": { "if": "ctx.rule.level == 2", "field": "category", "value": "System low priority notification" } },
|
||||
{ "set": { "if": "ctx.rule.level == 3", "field": "category", "value": "Successful/authorized event" } },
|
||||
{ "set": { "if": "ctx.rule.level == 1", "field": "rule.category", "value": "None" } },
|
||||
{ "set": { "if": "ctx.rule.level == 2", "field": "rule.category", "value": "System low priority notification" } },
|
||||
{ "set": { "if": "ctx.rule.level == 3", "field": "rule.category", "value": "Successful/authorized event" } },
|
||||
{ "set": { "if": "ctx.rule.level == 4", "field": "rule.category", "value": "System low priority error" } },
|
||||
{ "set": { "if": "ctx.rule.level == 5", "field": "rule.category", "value": "User generated error" } },
|
||||
{ "set": { "if": "ctx.rule.level == 6", "field": "rule.category", "value": "Low relevance attack" } },
|
||||
|
||||
@@ -1,54 +1,55 @@
|
||||
{
|
||||
"description" : "ossec",
|
||||
"processors" : [
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "remove": { "field": [ "agent" ], "ignore_missing": true, "ignore_failure": false } },
|
||||
{ "rename": { "field": "message2.agent", "target_field": "agent", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.data", "target_field": "data", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.decoder", "target_field": "decoder", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.full_log", "target_field": "log.full", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.id", "target_field": "log.id.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.location", "target_field": "location", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.manager", "target_field": "manager", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.predecoder", "target_field": "predecoder", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.timestamp", "target_field": "timestamp", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.agent", "target_field": "agent", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.data", "target_field": "data", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.decoder", "target_field": "decoder", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.full_log", "target_field": "log.full", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.id", "target_field": "log.id.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.location", "target_field": "log.location", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.manager", "target_field": "manager", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.predecoder", "target_field": "predecoder", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.timestamp", "target_field": "event.timestamp", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.previous_log", "target_field": "log.previous_log", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.previous_output", "target_field": "log.previous_output", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rule", "target_field": "rule", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.command", "target_field": "command", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.dstip", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.dstport", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.dstuser", "target_field": "user.escalated", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.srcip", "target_field": "source.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.destinationIp", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.destinationPort", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.image", "target_field": "image_path", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.parentImage", "target_field": "parent_image_path", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.sourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.sourceIp", "target_field": "source_ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.sourcePort", "target_field": "source.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.targetFilename", "target_field": "file.target", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.user", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.system.eventID", "target_field": "event.code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "predecoder.program_name", "target_field": "process.name", "ignore_missing": true } },
|
||||
{ "set": { "if": "ctx.rule.level == 1", "field": "rule.category", "value": "None" } },
|
||||
{ "set": { "if": "ctx.rule.level == 2", "field": "rule.category", "value": "System low priority notification" } },
|
||||
{ "set": { "if": "ctx.rule.level == 3", "field": "rule.category", "value": "Successful/authorized event" } },
|
||||
{ "set": { "if": "ctx.rule.level == 4", "field": "rule.category", "value": "System low priority error" } },
|
||||
{ "set": { "if": "ctx.rule.level == 5", "field": "rule.category", "value": "User generated error" } },
|
||||
{ "set": { "if": "ctx.rule.level == 6", "field": "rule.category", "value": "Low relevance attack" } },
|
||||
{ "set": { "if": "ctx.rule.level == 7", "field": "rule.category", "value": "\"Bad word\" matching" } },
|
||||
{ "set": { "if": "ctx.rule.level == 8", "field": "rule.category", "value": "First time seen" } },
|
||||
{ "set": { "if": "ctx.rule.level == 9", "field": "rule.category", "value": "Error from invalid source" } },
|
||||
{ "set": { "if": "ctx.rule.level == 10", "field": "rule.category", "value": "Multiple user generated errors" } },
|
||||
{ "set": { "if": "ctx.rule.level == 11", "field": "rule.category", "value": "Integrity checking warning" } },
|
||||
{ "set": { "if": "ctx.rule.level == 12", "field": "rule.category", "value": "High importance event" } },
|
||||
{ "set": { "if": "ctx.rule.level == 13", "field": "rule.category", "value": "Unusal error (high importance)" } },
|
||||
{ "set": { "if": "ctx.rule.level == 14", "field": "rule.category", "value": "High importance security event" } },
|
||||
{ "set": { "if": "ctx.rule.level == 15", "field": "rule.category", "value": "Severe attack" } },
|
||||
{ "append": { "if": "ctx.rule.level != null", "field": "tags", "value": ["alert"] } },
|
||||
{ "rename": { "field": "message2.rule", "target_field": "rule", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.syscheck", "target_field": "host.syscheck", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.command", "target_field": "process.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.dstip", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.dstport", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.dstuser", "target_field": "user.escalated", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.srcip", "target_field": "source.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.destinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.destinationIp", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.destinationPort", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.image", "target_field": "image_path", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.parentImage", "target_field": "parent_image_path", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.sourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.sourceIp", "target_field": "source_ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.sourcePort", "target_field": "source.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.targetFilename", "target_field": "file.target", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.eventdata.user", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "data.win.system.eventID", "target_field": "event.code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "predecoder.program_name", "target_field": "process.name", "ignore_missing": true } },
|
||||
{ "set": { "if": "ctx.rule.level == 1", "field": "rule.category", "value": "None" } },
|
||||
{ "set": { "if": "ctx.rule.level == 2", "field": "rule.category", "value": "System low priority notification" } },
|
||||
{ "set": { "if": "ctx.rule.level == 3", "field": "rule.category", "value": "Successful/authorized event" } },
|
||||
{ "set": { "if": "ctx.rule.level == 4", "field": "rule.category", "value": "System low priority error" } },
|
||||
{ "set": { "if": "ctx.rule.level == 5", "field": "rule.category", "value": "User generated error" } },
|
||||
{ "set": { "if": "ctx.rule.level == 6", "field": "rule.category", "value": "Low relevance attack" } },
|
||||
{ "set": { "if": "ctx.rule.level == 7", "field": "rule.category", "value": "\"Bad word\" matching" } },
|
||||
{ "set": { "if": "ctx.rule.level == 8", "field": "rule.category", "value": "First time seen" } },
|
||||
{ "set": { "if": "ctx.rule.level == 9", "field": "rule.category", "value": "Error from invalid source" } },
|
||||
{ "set": { "if": "ctx.rule.level == 10", "field": "rule.category", "value": "Multiple user generated errors" } },
|
||||
{ "set": { "if": "ctx.rule.level == 11", "field": "rule.category", "value": "Integrity checking warning" } },
|
||||
{ "set": { "if": "ctx.rule.level == 12", "field": "rule.category", "value": "High importance event" } },
|
||||
{ "set": { "if": "ctx.rule.level == 13", "field": "rule.category", "value": "Unusal error (high importance)" } },
|
||||
{ "set": { "if": "ctx.rule.level == 14", "field": "rule.category", "value": "High importance security event" } },
|
||||
{ "set": { "if": "ctx.rule.level == 15", "field": "rule.category", "value": "Severe attack" } },
|
||||
{ "append": { "if": "ctx.rule.level != null", "field": "tags", "value": ["alert"] } },
|
||||
{ "remove": { "field": [ "predecoder", "decoder" ], "ignore_missing": true, "ignore_failure": false } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -3,12 +3,13 @@
|
||||
"processors" : [
|
||||
{ "rename":{ "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.flow_id", "target_field": "event.id", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.comunity_id", "target_field": "network.comunity_id", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } },
|
||||
{ "remove": { "field": ["message2", "agent"], "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
|
||||
{ "remove": { "field": ["agent"], "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -15,8 +15,9 @@
|
||||
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "set": { "field": "server.port", "value": "{{destination.port}}" } },
|
||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
|
||||
{ "date": { "field": "message2.ts", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "ignore_failure": true } },
|
||||
{ "remove": { "field": ["message2.ts", "path", "agent"], "ignore_failure": true } },
|
||||
{ "remove": { "field": ["agent"], "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -2,10 +2,14 @@
|
||||
"description" : "zeek.weird",
|
||||
"processors" : [
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.name", "target_field": "weird.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.addl", "target_field": "weird.additional_info", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.notice", "target_field": "weird.notice", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.peer", "target_field": "weird.peer", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.p", "target_field": "weird.p", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dst", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.src", "target_field": "source.ip", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "zeek.common" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -8,8 +8,9 @@
|
||||
{%- set HOSTNAME = salt['grains.get']('host', '') %}
|
||||
{%- set BROVER = salt['pillar.get']('static:broversion', 'COMMUNITY') %}
|
||||
{%- set WAZUHENABLED = salt['pillar.get']('static:wazuh_enabled', '1') %}
|
||||
{%- set FLEETENABLED = salt['pillar.get']('static:fleet_enabled', '1') %}
|
||||
{%- set STRELKAENABLED = salt['pillar.get']('static:strelka_enabled', '1') %}
|
||||
{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
|
||||
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
|
||||
|
||||
name: {{ HOSTNAME }}
|
||||
|
||||
@@ -19,7 +20,7 @@ name: {{ HOSTNAME }}
|
||||
|
||||
# Sets log level. The default log level is info.
|
||||
# Available log levels are: error, warning, info, debug
|
||||
logging.level: error
|
||||
logging.level: warning
|
||||
|
||||
# Enable debug output for selected components. To enable all selectors use ["*"]
|
||||
# Other available selectors are "beat", "publish", "service"
|
||||
@@ -81,7 +82,8 @@ filebeat.inputs:
|
||||
- /nsm/zeek/logs/current/{{ LOGNAME }}.log
|
||||
fields:
|
||||
module: zeek
|
||||
dataset: {{ LOGNAME }}
|
||||
dataset: {{ LOGNAME }}
|
||||
category: network
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
@@ -99,6 +101,7 @@ filebeat.inputs:
|
||||
fields:
|
||||
module: suricata
|
||||
dataset: alert
|
||||
category: network
|
||||
|
||||
processors:
|
||||
- drop_fields:
|
||||
@@ -117,7 +120,7 @@ filebeat.inputs:
|
||||
fields:
|
||||
module: ossec
|
||||
dataset: alert
|
||||
|
||||
category: host
|
||||
processors:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
@@ -137,13 +140,15 @@ filebeat.inputs:
|
||||
|
||||
{%- endif %}
|
||||
|
||||
{%- if FLEETENABLED == '1' %}
|
||||
{%- if FLEETMASTER or FLEETNODE %}
|
||||
|
||||
- type: log
|
||||
paths:
|
||||
- /osquery/logs/result.log
|
||||
- /nsm/osquery/fleet/result.log
|
||||
fields:
|
||||
type: osquery
|
||||
module: osquery
|
||||
dataset: query_result
|
||||
category: host
|
||||
|
||||
processors:
|
||||
- drop_fields:
|
||||
@@ -159,9 +164,10 @@ filebeat.inputs:
|
||||
|
||||
- type: log
|
||||
paths:
|
||||
- /opt/so/log/strelka/strelka.log
|
||||
- /nsm/strelka/log/strelka.log
|
||||
fields:
|
||||
module: strelka
|
||||
category: file
|
||||
dataset: file
|
||||
|
||||
processors:
|
||||
@@ -173,7 +179,8 @@ filebeat.inputs:
|
||||
close_removed: false
|
||||
|
||||
{%- endif %}
|
||||
#----------------------------- Logstash output ---------------------------------
|
||||
#----------------------------- Elasticsearch/Logstash output ---------------------------------
|
||||
{%- if grains['role'] == "so-eval" %}
|
||||
output.elasticsearch:
|
||||
enabled: true
|
||||
hosts: ["{{ MASTER }}:9200"]
|
||||
@@ -189,13 +196,22 @@ output.elasticsearch:
|
||||
- index: "so-ossec-%{+yyyy.MM.dd}"
|
||||
when.contains:
|
||||
module: "ossec"
|
||||
- index: "so-osquery-%{+yyyy.MM.dd}"
|
||||
when.contains:
|
||||
module: "osquery"
|
||||
- index: "so-strelka-%{+yyyy.MM.dd}"
|
||||
when.contains:
|
||||
module: "strelka"
|
||||
|
||||
#output.logstash:
|
||||
setup.template.enabled: false
|
||||
{%- else %}
|
||||
|
||||
output.logstash:
|
||||
# Boolean flag to enable or disable the output module.
|
||||
#enabled: true
|
||||
enabled: true
|
||||
|
||||
# The Logstash hosts
|
||||
#hosts: ["{{ MASTER }}:5644"]
|
||||
hosts: ["{{ MASTER }}:5644"]
|
||||
|
||||
# Number of workers per Logstash host.
|
||||
#worker: 1
|
||||
@@ -210,21 +226,21 @@ output.elasticsearch:
|
||||
# and certificates will be accepted. In this mode, SSL based connections are
|
||||
# susceptible to man-in-the-middle attacks. Use only for testing. Default is
|
||||
# `full`.
|
||||
#ssl.verification_mode: full
|
||||
ssl.verification_mode: full
|
||||
|
||||
# List of supported/valid TLS versions. By default all TLS versions 1.0 up to
|
||||
# 1.2 are enabled.
|
||||
#ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2]
|
||||
|
||||
# Optional SSL configuration options. SSL is off by default.
|
||||
# List of root certificates for HTTPS server verifications
|
||||
#ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
|
||||
ssl.certificate_authorities: ["/usr/share/filebeat/intraca.crt"]
|
||||
|
||||
# Certificate for SSL client authentication
|
||||
#ssl.certificate: "/usr/share/filebeat/filebeat.crt"
|
||||
ssl.certificate: "/usr/share/filebeat/filebeat.crt"
|
||||
|
||||
# Client Certificate Key
|
||||
#ssl.key: "/usr/share/filebeat/filebeat.key"
|
||||
ssl.key: "/usr/share/filebeat/filebeat.key"
|
||||
|
||||
setup.template.enabled: false
|
||||
# A dictionary of settings to place into the settings.index dictionary
|
||||
@@ -239,7 +255,7 @@ setup.template.enabled: false
|
||||
# https://www.elastic.co/guide/en/elasticsearch/reference/current/mapping-source-field.html
|
||||
#_source:
|
||||
#enabled: false
|
||||
|
||||
{%- endif %}
|
||||
#============================== Kibana =====================================
|
||||
|
||||
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
@@ -56,10 +56,11 @@ so-filebeat:
|
||||
- /opt/so/log/filebeat:/usr/share/filebeat/logs:rw
|
||||
- /opt/so/conf/filebeat/etc/filebeat.yml:/usr/share/filebeat/filebeat.yml:ro
|
||||
- /nsm/zeek:/nsm/zeek:ro
|
||||
- /nsm/strelka/log:/nsm/strelka/log:ro
|
||||
- /opt/so/log/suricata:/suricata:ro
|
||||
- /opt/so/wazuh/logs/alerts:/wazuh/alerts:ro
|
||||
- /opt/so/wazuh/logs/archives:/wazuh/archives:ro
|
||||
- /opt/so/log/fleet/:/osquery/logs:ro
|
||||
- /nsm/osquery/fleet/:/nsm/osquery/fleet:ro
|
||||
- /opt/so/conf/filebeat/etc/pki/filebeat.crt:/usr/share/filebeat/filebeat.crt:ro
|
||||
- /opt/so/conf/filebeat/etc/pki/filebeat.key:/usr/share/filebeat/filebeat.key:ro
|
||||
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
|
||||
|
||||
@@ -5,7 +5,10 @@
|
||||
{%- set ip = salt['pillar.get']('node:mainip', '') %}
|
||||
{%- elif grains['role'] == 'so-sensor' %}
|
||||
{%- set ip = salt['pillar.get']('sensor:mainip', '') %}
|
||||
{%- elif grains['role'] == 'so-fleet' %}
|
||||
{%- set ip = salt['pillar.get']('node:mainip', '') %}
|
||||
{%- endif %}
|
||||
|
||||
# Quick Fix for Docker being difficult
|
||||
iptables_fix_docker:
|
||||
iptables.chain_present:
|
||||
@@ -232,14 +235,14 @@ enable_masternode_mysql_3306_{{ip}}:
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_master_osquery_8080_{{ip}}:
|
||||
enable_master_osquery_8090_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8080
|
||||
- dport: 8090
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
@@ -466,14 +469,14 @@ enable_standard_beats_5044_{{ip}}:
|
||||
# Allow OSQuery Endpoints to send their traffic
|
||||
{% for ip in pillar.get('osquery_endpoint') %}
|
||||
|
||||
enable_standard_osquery_8080_{{ip}}:
|
||||
enable_standard_osquery_8090_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8080
|
||||
- dport: 8090
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
@@ -702,3 +705,108 @@ enable_forwardnode_beats_5644_{{ip}}:
|
||||
- position: 1
|
||||
- save: True
|
||||
{% endif %}
|
||||
|
||||
|
||||
# Rules if you are a Standalone Fleet node
|
||||
{% if grains['role'] == 'so-fleet' %}
|
||||
#This should be more granular
|
||||
iptables_allow_fleetnode_docker:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: INPUT
|
||||
- jump: ACCEPT
|
||||
- source: 172.17.0.0/24
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
# Allow Redis
|
||||
enable_fleetnode_redis_6379_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 6379
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_fleetnode_mysql_3306_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 3306
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_fleet_osquery_8080_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8080
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
|
||||
enable_fleetnodetemp_mysql_3306_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: 127.0.0.1
|
||||
- dport: 3306
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
enable_fleettemp_osquery_8080_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: 127.0.0.1
|
||||
- dport: 8080
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
|
||||
# Allow Analysts to access Fleet WebUI
|
||||
{% for ip in pillar.get('analyst') %}
|
||||
|
||||
enable_fleetnode_fleet_443_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 443
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
# Needed for osquery endpoints to checkin to Fleet API for mgt
|
||||
{% for ip in pillar.get('osquery_endpoint') %}
|
||||
|
||||
enable_fleetnode_8090_{{ip}}:
|
||||
iptables.insert:
|
||||
- table: filter
|
||||
- chain: DOCKER-USER
|
||||
- jump: ACCEPT
|
||||
- proto: tcp
|
||||
- source: {{ ip }}
|
||||
- dport: 8090
|
||||
- position: 1
|
||||
- save: True
|
||||
|
||||
{% endfor %}
|
||||
|
||||
{% endif %}
|
||||
11
salt/fleet/event_enable-fleet.sls
Normal file
11
salt/fleet/event_enable-fleet.sls
Normal file
@@ -0,0 +1,11 @@
|
||||
{% set ENROLLSECRET = salt['cmd.run']('docker exec so-fleet fleetctl get enroll-secret') %}
|
||||
{%- set MAINIP = salt['pillar.get']('node:mainip') -%}
|
||||
|
||||
so/fleet:
|
||||
event.send:
|
||||
- data:
|
||||
action: 'enablefleet'
|
||||
hostname: {{ grains.host }}
|
||||
mainip: {{ MAINIP }}
|
||||
role: {{ grains.role }}
|
||||
enroll-secret: {{ ENROLLSECRET }}
|
||||
10
salt/fleet/event_gen-packages.sls
Normal file
10
salt/fleet/event_gen-packages.sls
Normal file
@@ -0,0 +1,10 @@
|
||||
{% set ENROLLSECRET = salt['pillar.get']('auth:fleet_enroll-secret') %}
|
||||
|
||||
so/fleet:
|
||||
event.send:
|
||||
- data:
|
||||
action: 'genpackages'
|
||||
hostname: {{ grains.host }}
|
||||
role: {{ grains.role }}
|
||||
mainip: {{ grains.host }}
|
||||
enroll-secret: {{ ENROLLSECRET }}
|
||||
@@ -1,3 +1,5 @@
|
||||
{%- set PACKAGESTS = salt['pillar.get']('static:fleet_packages-timestamp:', 'N/A') -%}
|
||||
|
||||
<!DOCTYPE html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
@@ -75,23 +77,16 @@ a {
|
||||
<body>
|
||||
<div class="center-content">
|
||||
<div class="topnav center-content">
|
||||
<a href="/so-auth/loginpage/create-user" target="_blank">Create New User</a>
|
||||
<a href="/kibana/" target="_blank">Kibana</a>
|
||||
<a href="/grafana/" target="_blank">Grafana</a>
|
||||
<a href="/sensoroni/" target="_blank">Sensoroni</a>
|
||||
<a href="/playbook/" target="_blank">Playbook</a>
|
||||
<a href="/fleet/" target="_blank">Fleet</a>
|
||||
<a href="/thehive/" target="_blank">TheHive</a>
|
||||
<a href="/packages/" target="_blank">Osquery Packages</a>
|
||||
<a href="https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/FAQ" target="_blank">FAQ</a>
|
||||
<a href="https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/Configuring-Osquery-with-Security-Onion" target="_blank">Osquery/Fleet Docs</a>
|
||||
<a href="https://www.securityonionsolutions.com" target="_blank">Security Onion Solutions</a>
|
||||
<a href="https://blog.securityonion.net" target="_blank">Blog</a>
|
||||
</div>
|
||||
|
||||
<div class="content center-content">
|
||||
<p>
|
||||
<div style="text-align: center;">
|
||||
<h1>Osquery Packages</h1>
|
||||
<div style="text-align: center;">
|
||||
<h1>Security Onion - Dedicated Fleet Node</h1>
|
||||
<h2>Osquery Packages</h2>
|
||||
</div>
|
||||
<br/>
|
||||
<h2>Notes</h2>
|
||||
@@ -102,21 +97,22 @@ a {
|
||||
<br/>
|
||||
<h2>Downloads</h2>
|
||||
<div>
|
||||
Generated: N/A
|
||||
Generated: {{ PACKAGESTS }}
|
||||
<br/>
|
||||
<br/>
|
||||
Packages:
|
||||
<ul>
|
||||
<li><a href="/packages/launcher.msi" download="msi-launcher.msi">MSI (Windows)</a></li>
|
||||
<li><a href="/packages/launcher.deb" download="deb-launcher.deb">DEB (Debian)</a></li>
|
||||
<li><a href="/packages/launcher.rpm" download="rpm-launcher.rpm">RPM (RPM)</a></li>
|
||||
<li><a href="/launcher.msi" download="msi-launcher.msi">MSI (Windows)</a></li>
|
||||
<li><a href="/launcher.deb" download="deb-launcher.deb">DEB (Debian)</a></li>
|
||||
<li><a href="/launcher.rpm" download="rpm-launcher.rpm">RPM (RPM)</a></li>
|
||||
<li><a href="/launcher.pkg" download="pkg-launcher.pkg">PKG (MacOS)</a></li>
|
||||
</ul>
|
||||
<br/>
|
||||
<br/>
|
||||
Config Files:
|
||||
<ul>
|
||||
<li><a href="/packages/launcher.flags" download="launcher.flags.txt">RPM & DEB Flag File</a></li>
|
||||
<li><a href="/packages/launcher-msi.flags" download="launcher-msi.flags.txt">MSI Flag File</a></li>
|
||||
<li><a href="/launcher.flags" download="launcher.flags.txt">RPM & DEB Flag File</a></li>
|
||||
<li><a href="/launcher-msi.flags" download="launcher-msi.flags.txt">MSI Flag File</a></li>
|
||||
</ul>
|
||||
</div>
|
||||
<br/>
|
||||
@@ -5,11 +5,11 @@ spec:
|
||||
decorators:
|
||||
always:
|
||||
- SELECT codename FROM os_version;
|
||||
- SELECT uuid AS LiveQuery FROM system_info;
|
||||
- SELECT address AS EndpointIP1 FROM interface_addresses where address not
|
||||
- SELECT uuid AS live_query FROM system_info;
|
||||
- SELECT address AS endpoint_ip1 FROM interface_addresses where address not
|
||||
like '%:%' and address not like '127%' and address not like '169%' order by
|
||||
interface desc limit 1;
|
||||
- SELECT address AS EndpointIP2 FROM interface_addresses where address not
|
||||
- SELECT address AS endpoint_ip2 FROM interface_addresses where address not
|
||||
like '%:%' and address not like '127%' and address not like '169%' order by
|
||||
interface asc limit 1;
|
||||
- SELECT hardware_serial FROM system_info;
|
||||
@@ -239,9 +239,10 @@ spec:
|
||||
query: chrome_extensions
|
||||
- description: Disk encryption status and information.
|
||||
interval: 3600
|
||||
name: disk_encryption
|
||||
name: disk_encryption_snapshot
|
||||
platform: darwin
|
||||
query: disk_encryption
|
||||
query: disk_encryption_snapshot
|
||||
snapshot: true
|
||||
- description: Local system users.
|
||||
interval: 28800
|
||||
name: users_snapshot
|
||||
@@ -282,6 +283,12 @@ spec:
|
||||
name: sip_config
|
||||
platform: darwin
|
||||
query: sip_config
|
||||
- description: Shows information about the wifi network that a host is currently connected to.
|
||||
interval: 28800
|
||||
name: wifi_status_snapshot
|
||||
platform: darwin
|
||||
query: wifi_status_snapshot
|
||||
snapshot: true
|
||||
- description: Returns the private keys in the users ~/.ssh directory and whether
|
||||
or not they are encrypted.
|
||||
interval: 3600
|
||||
@@ -290,7 +297,8 @@ spec:
|
||||
query: user_ssh_keys
|
||||
removed: false
|
||||
targets:
|
||||
labels: null
|
||||
labels:
|
||||
- macOS
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -365,13 +373,10 @@ spec:
|
||||
description: Returns information about installed event taps. Can be used to detect
|
||||
keyloggers
|
||||
name: event_taps
|
||||
query: SELECT * FROM event_taps INNER JOIN processes ON event_taps.tapping_process
|
||||
= processes.pid WHERE event_tapped NOT LIKE '%mouse%' AND processes.path NOT LIKE
|
||||
'%.app%' AND processes.path!='/Library/Application Support/org.pqrs/Karabiner-Elements/bin/karabiner_grabber'
|
||||
AND processes.path NOT LIKE '/Users/%/bin/kwm' AND processes.path!='/Library/Rapport/bin/rooksd'
|
||||
AND processes.path!='/usr/sbin/universalaccessd' AND processes.path NOT LIKE '/usr/local/Cellar/%'
|
||||
AND processes.path NOT LIKE '/System/Library/%' AND processes.path NOT LIKE '%/steamapps/%'
|
||||
AND event_taps.enabled=1;
|
||||
query: SELECT * FROM event_taps INNER JOIN processes ON event_taps.tapping_process = processes.pid
|
||||
WHERE event_tapped NOT LIKE '%mouse%' AND processes.path NOT IN ('/usr/libexec/airportd',
|
||||
'/usr/sbin/universalaccessd') AND processes.path NOT LIKE '/System/Library/%' AND processes.path
|
||||
NOT LIKE '%/steamapps/%' AND processes.path NOT LIKE '%.app%' AND event_taps.enabled=1;
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -455,6 +460,13 @@ spec:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Shows information about the wifi network that a host is currently connected to.
|
||||
name: wifi_status_snapshot
|
||||
query: SELECT * FROM wifi_status;
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Snapshot query for macosx_kextstat
|
||||
name: macosx_kextstat_snapshot
|
||||
@@ -479,7 +491,7 @@ kind: query
|
||||
spec:
|
||||
description: Safari browser extension details for all users.
|
||||
name: safari_extensions
|
||||
query: SELECT * FROM users JOIN safari_extensions USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN safari_extensions USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -500,7 +512,7 @@ kind: query
|
||||
spec:
|
||||
description: List authorized_keys for each user on the system
|
||||
name: authorized_keys
|
||||
query: SELECT * FROM users JOIN authorized_keys USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN authorized_keys USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -508,7 +520,7 @@ spec:
|
||||
description: Application, System, and Mobile App crash logs.
|
||||
name: crashes
|
||||
query: SELECT uid, datetime, responsible, exception_type, identifier, version, crash_path
|
||||
FROM users JOIN crashes USING (uid);
|
||||
FROM users CROSS JOIN crashes USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -516,7 +528,7 @@ spec:
|
||||
description: Displays the percentage of free space available on the primary disk
|
||||
partition
|
||||
name: disk_free_space_pct
|
||||
query: SELECT (blocks_available * 100 / blocks) AS pct FROM mounts WHERE device='/dev/disk1';
|
||||
query: SELECT (blocks_available * 100 / blocks) AS pct FROM mounts WHERE device='/dev/disk1s1';
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -553,7 +565,7 @@ kind: query
|
||||
spec:
|
||||
description: Snapshot query for Chrome extensions
|
||||
name: chrome_extensions_snapshot
|
||||
query: SELECT * FROM users JOIN chrome_extensions USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN chrome_extensions USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -589,14 +601,14 @@ kind: query
|
||||
spec:
|
||||
description: All C/NPAPI browser plugin details for all users.
|
||||
name: browser_plugins
|
||||
query: SELECT * FROM users JOIN browser_plugins USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN browser_plugins USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: List installed Firefox addons for all users
|
||||
name: firefox_addons
|
||||
query: SELECT * FROM users JOIN firefox_addons USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN firefox_addons USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -625,13 +637,13 @@ kind: query
|
||||
spec:
|
||||
description: List installed Chrome Extensions for all users
|
||||
name: chrome_extensions
|
||||
query: SELECT * FROM users JOIN chrome_extensions USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN chrome_extensions USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Disk encryption status and information.
|
||||
name: disk_encryption
|
||||
name: disk_encryption_snapshot
|
||||
query: SELECT * FROM disk_encryption;
|
||||
---
|
||||
apiVersion: v1
|
||||
@@ -691,4 +703,4 @@ spec:
|
||||
description: Returns the private keys in the users ~/.ssh directory and whether
|
||||
or not they are encrypted.
|
||||
name: user_ssh_keys
|
||||
query: SELECT * FROM users JOIN user_ssh_keys USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN user_ssh_keys USING (uid);
|
||||
@@ -227,8 +227,35 @@ spec:
|
||||
platform: windows
|
||||
query: scheduled_tasks_snapshot
|
||||
snapshot: true
|
||||
- description: Appcompat shims (.sdb files) installed on Windows hosts.
|
||||
interval: 3600
|
||||
name: appcompat_shims
|
||||
platform: windows
|
||||
query: appcompat_shims
|
||||
- description: Disk encryption status and information snapshot query.
|
||||
interval: 28800
|
||||
name: bitlocker_info_snapshot
|
||||
platform: windows
|
||||
query: bitlocker_info_snapshot
|
||||
snapshot: true
|
||||
targets:
|
||||
labels: null
|
||||
labels:
|
||||
- MS Windows
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Appcompat shims (.sdb files) installed on Windows hosts.
|
||||
name: appcompat_shims
|
||||
query: SELECT * FROM appcompat_shims WHERE description!='EMET_Database' AND
|
||||
executable NOT IN ('setuphost.exe','setupprep.exe','iisexpress.exe');
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Disk encryption status and information snapshot query.
|
||||
name: bitlocker_info_snapshot
|
||||
query: SELECT * FROM bitlocker_info;
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -302,7 +329,7 @@ kind: query
|
||||
spec:
|
||||
description: Snapshot query for Chrome extensions
|
||||
name: chrome_extensions_snapshot
|
||||
query: SELECT * FROM users JOIN chrome_extensions USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN chrome_extensions USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -466,7 +493,7 @@ kind: query
|
||||
spec:
|
||||
description: List installed Chrome Extensions for all users
|
||||
name: chrome_extensions
|
||||
query: SELECT * FROM users JOIN chrome_extensions USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN chrome_extensions USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -3,17 +3,9 @@ kind: options
|
||||
spec:
|
||||
config:
|
||||
decorators:
|
||||
always:
|
||||
load:
|
||||
- SELECT uuid AS host_uuid FROM system_info;
|
||||
- SELECT hostname AS hostname FROM system_info;
|
||||
- SELECT codename FROM os_version;
|
||||
- SELECT uuid AS LiveQuery FROM system_info;
|
||||
- SELECT address AS EndpointIP1 FROM interface_addresses where address not
|
||||
like '%:%' and address not like '127%' and address not like '169%' order by
|
||||
interface desc limit 1;
|
||||
- SELECT address AS EndpointIP2 FROM interface_addresses where address not
|
||||
like '%:%' and address not like '127%' and address not like '169%' order by
|
||||
interface asc limit 1;
|
||||
- SELECT hardware_serial FROM system_info;
|
||||
file_paths:
|
||||
binaries:
|
||||
- /usr/bin/%%
|
||||
@@ -29,7 +21,6 @@ spec:
|
||||
efi:
|
||||
- /System/Library/CoreServices/boot.efi
|
||||
options:
|
||||
decorations_top_level: true
|
||||
disable_distributed: false
|
||||
disable_tables: windows_events
|
||||
distributed_interval: 10
|
||||
@@ -26,7 +26,9 @@ spec:
|
||||
query: backup_tool_perf
|
||||
snapshot: true
|
||||
targets:
|
||||
labels: null
|
||||
labels:
|
||||
- MS Windows
|
||||
- macOS
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -26,7 +26,9 @@ spec:
|
||||
platform: windows
|
||||
query: endpoint_security_tool_backend_server_registry_misconfigured
|
||||
targets:
|
||||
labels: null
|
||||
labels:
|
||||
- MS Windows
|
||||
- macOS
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -40,7 +40,8 @@ spec:
|
||||
platform: windows
|
||||
query: uac_settings_registry
|
||||
targets:
|
||||
labels: null
|
||||
labels:
|
||||
- MS Windows
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -137,7 +137,8 @@ spec:
|
||||
platform: windows
|
||||
query: send_error_alert_registry
|
||||
targets:
|
||||
labels: null
|
||||
labels:
|
||||
- MS Windows
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -185,7 +185,8 @@ spec:
|
||||
platform: windows
|
||||
query: send_error_alert_registry_exists
|
||||
targets:
|
||||
labels: null
|
||||
labels:
|
||||
- MS Windows
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -5,254 +5,251 @@ spec:
|
||||
name: LinuxPack
|
||||
queries:
|
||||
- description: Retrieves all the jobs scheduled in crontab in the target system.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: crontab_snapshot
|
||||
platform: linux
|
||||
query: crontab_snapshot
|
||||
snapshot: true
|
||||
- description: Various Linux kernel integrity checked attributes.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: kernel_integrity
|
||||
platform: linux
|
||||
query: kernel_integrity
|
||||
- description: Linux kernel modules both loaded and within the load search path.
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: kernel_modules
|
||||
platform: linux
|
||||
query: kernel_modules
|
||||
- description: Retrieves the current list of mounted drives in the target system.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: mounts
|
||||
platform: linux
|
||||
query: mounts
|
||||
- description: The percentage of total CPU time (system+user) consumed by osqueryd
|
||||
interval: 0
|
||||
name: osquery_cpu_pct
|
||||
platform: linux
|
||||
query: osquery_cpu_pct
|
||||
snapshot: true
|
||||
- description: Socket events collected from the audit framework
|
||||
interval: 0
|
||||
interval: 10
|
||||
name: socket_events
|
||||
platform: linux
|
||||
query: socket_events
|
||||
- description: Record the network interfaces and their associated IP and MAC addresses
|
||||
interval: 0
|
||||
interval: 600
|
||||
name: network_interfaces_snapshot
|
||||
platform: linux
|
||||
query: network_interfaces_snapshot
|
||||
snapshot: true
|
||||
version: 1.4.5
|
||||
- description: Information about the running osquery configuration
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: osquery_info
|
||||
platform: linux
|
||||
query: osquery_info
|
||||
snapshot: true
|
||||
- description: Display all installed RPM packages
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: rpm_packages
|
||||
platform: centos
|
||||
query: rpm_packages
|
||||
snapshot: true
|
||||
- description: Record shell history for all users on system (instead of just root)
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: shell_history
|
||||
platform: linux
|
||||
query: shell_history
|
||||
- description: File events collected from file integrity monitoring
|
||||
interval: 0
|
||||
interval: 10
|
||||
name: file_events
|
||||
platform: linux
|
||||
query: file_events
|
||||
removed: false
|
||||
- description: Retrieve the EC2 metadata for this endpoint
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: ec2_instance_metadata
|
||||
platform: linux
|
||||
query: ec2_instance_metadata
|
||||
- description: Retrieve the EC2 tags for this endpoint
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: ec2_instance_tags
|
||||
platform: linux
|
||||
query: ec2_instance_tags
|
||||
- description: Snapshot query to retrieve the EC2 tags for this instance
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: ec2_instance_tags_snapshot
|
||||
platform: linux
|
||||
query: ec2_instance_tags_snapshot
|
||||
snapshot: true
|
||||
- description: Retrieves the current filters and chains per filter in the target
|
||||
system.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: iptables
|
||||
platform: linux
|
||||
query: iptables
|
||||
- description: Display any SUID binaries that are owned by root
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: suid_bin
|
||||
platform: linux
|
||||
query: suid_bin
|
||||
- description: Display all installed DEB packages
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: deb_packages
|
||||
platform: ubuntu
|
||||
query: deb_packages
|
||||
snapshot: true
|
||||
- description: Find shell processes that have open sockets
|
||||
interval: 0
|
||||
interval: 600
|
||||
name: behavioral_reverse_shell
|
||||
platform: linux
|
||||
query: behavioral_reverse_shell
|
||||
- description: Retrieves all the jobs scheduled in crontab in the target system.
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: crontab
|
||||
platform: linux
|
||||
query: crontab
|
||||
- description: Records the system resources used by each query
|
||||
interval: 0
|
||||
name: per_query_perf
|
||||
platform: linux
|
||||
query: per_query_perf
|
||||
- description: Records avg rate of socket events since daemon started
|
||||
interval: 0
|
||||
name: socket_rates
|
||||
platform: linux
|
||||
query: socket_rates
|
||||
snapshot: true
|
||||
- description: Local system users.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: users
|
||||
platform: linux
|
||||
query: users
|
||||
- description: Process events collected from the audit framework
|
||||
interval: 0
|
||||
interval: 10
|
||||
name: process_events
|
||||
platform: linux
|
||||
query: process_events
|
||||
- description: Retrieves the list of the latest logins with PID, username and timestamp.
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: last
|
||||
platform: linux
|
||||
query: last
|
||||
- description: Any processes that run with an LD_PRELOAD environment variable
|
||||
interval: 0
|
||||
interval: 60
|
||||
name: ld_preload
|
||||
platform: linux
|
||||
query: ld_preload
|
||||
- description: Records avg rate of process events since daemon started
|
||||
interval: 0
|
||||
name: process_rates
|
||||
platform: linux
|
||||
query: process_rates
|
||||
snapshot: true
|
||||
- description: Information about the system hardware and name
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: system_info
|
||||
platform: linux
|
||||
query: system_info
|
||||
snapshot: true
|
||||
- description: Returns the private keys in the users ~/.ssh directory and whether
|
||||
or not they are encrypted
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: user_ssh_keys
|
||||
platform: linux
|
||||
query: user_ssh_keys
|
||||
- description: Local system users.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: users_snapshot
|
||||
platform: linux
|
||||
query: users_snapshot
|
||||
snapshot: true
|
||||
- description: DNS resolvers used by the host
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: dns_resolvers
|
||||
platform: linux
|
||||
query: dns_resolvers
|
||||
- description: Retrieves information from the current kernel in the target system.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: kernel_info
|
||||
platform: linux
|
||||
query: kernel_info
|
||||
snapshot: true
|
||||
- description: Linux kernel modules both loaded and within the load search path.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: kernel_modules_snapshot
|
||||
platform: linux
|
||||
query: kernel_modules_snapshot
|
||||
snapshot: true
|
||||
- description: Generates an event if ld.so.preload is present - used by rootkits
|
||||
such as Jynx
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: ld_so_preload_exists
|
||||
platform: linux
|
||||
query: ld_so_preload_exists
|
||||
snapshot: true
|
||||
- description: Records system/user time, db size, and many other system metrics
|
||||
interval: 0
|
||||
interval: 1800
|
||||
name: runtime_perf
|
||||
platform: linux
|
||||
query: runtime_perf
|
||||
- description: Retrieves all the entries in the target system /etc/hosts file.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: etc_hosts_snapshot
|
||||
platform: linux
|
||||
query: etc_hosts_snapshot
|
||||
snapshot: true
|
||||
- description: Snapshot query to retrieve the EC2 metadata for this endpoint
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: ec2_instance_metadata_snapshot
|
||||
platform: linux
|
||||
query: ec2_instance_metadata_snapshot
|
||||
snapshot: true
|
||||
- description: ""
|
||||
interval: 0
|
||||
interval: 10
|
||||
name: hardware_events
|
||||
platform: linux
|
||||
query: hardware_events
|
||||
removed: false
|
||||
- description: Information about memory usage on the system
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: memory_info
|
||||
platform: linux
|
||||
query: memory_info
|
||||
- description: Displays information from /proc/stat file about the time the CPU
|
||||
cores spent in different parts of the system
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: cpu_time
|
||||
platform: linux
|
||||
query: cpu_time
|
||||
- description: Retrieves all the entries in the target system /etc/hosts file.
|
||||
interval: 0
|
||||
interval: 3600
|
||||
name: etc_hosts
|
||||
platform: linux
|
||||
query: etc_hosts
|
||||
- description: Retrieves information from the Operating System where osquery is
|
||||
currently running.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: os_version
|
||||
platform: linux
|
||||
query: os_version
|
||||
snapshot: true
|
||||
- description: A snapshot of all processes running on the host. Useful for outlier
|
||||
analysis.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: processes_snapshot
|
||||
platform: linux
|
||||
query: processes_snapshot
|
||||
snapshot: true
|
||||
- description: Retrieves the current list of USB devices in the target system.
|
||||
interval: 0
|
||||
interval: 120
|
||||
name: usb_devices
|
||||
platform: linux
|
||||
query: usb_devices
|
||||
- description: A line-delimited authorized_keys table.
|
||||
interval: 0
|
||||
interval: 86400
|
||||
name: authorized_keys
|
||||
platform: linux
|
||||
query: authorized_keys
|
||||
- description: Display apt package manager sources.
|
||||
interval: 86400
|
||||
name: apt_sources
|
||||
platform: ubuntu
|
||||
query: apt_sources
|
||||
snapshot: true
|
||||
- description: Gather information about processes that are listening on a socket.
|
||||
interval: 86400
|
||||
name: listening_ports
|
||||
platform: linux
|
||||
query: listening_ports
|
||||
snapshot: true
|
||||
- description: Display yum package manager sources.
|
||||
interval: 86400
|
||||
name: yum_sources
|
||||
platform: centos
|
||||
query: yum_sources
|
||||
snapshot: true
|
||||
targets:
|
||||
labels: null
|
||||
labels:
|
||||
- Ubuntu Linux
|
||||
- CentOS Linux
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -284,15 +281,6 @@ spec:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: The percentage of total CPU time (system+user) consumed by osqueryd
|
||||
name: osquery_cpu_pct
|
||||
query: SELECT ((osqueryd_time*100)/(SUM(system_time) + SUM(user_time))) AS pct FROM
|
||||
processes, (SELECT (SUM(processes.system_time)+SUM(processes.user_time)) AS osqueryd_time
|
||||
FROM processes WHERE name='osqueryd');
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Socket events collected from the audit framework
|
||||
name: socket_events
|
||||
@@ -329,7 +317,7 @@ kind: query
|
||||
spec:
|
||||
description: Record shell history for all users on system (instead of just root)
|
||||
name: shell_history
|
||||
query: SELECT * FROM users JOIN shell_history USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN shell_history USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -403,23 +391,6 @@ spec:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Records the system resources used by each query
|
||||
name: per_query_perf
|
||||
query: SELECT name, interval, executions, output_size, wall_time, (user_time/executions)
|
||||
AS avg_user_time, (system_time/executions) AS avg_system_time, average_memory
|
||||
FROM osquery_schedule;
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Records avg rate of socket events since daemon started
|
||||
name: socket_rates
|
||||
query: SELECT COUNT(1) AS num, count(1)/s AS rate FROM socket_events, (SELECT (julianday('now')
|
||||
- 2440587.5)*86400.0 - start_time AS s FROM osquery_info LIMIT 1);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Local system users.
|
||||
name: users
|
||||
@@ -454,14 +425,6 @@ spec:
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Records avg rate of process events since daemon started
|
||||
name: process_rates
|
||||
query: SELECT COUNT(1) AS num, count(1)/s AS rate FROM process_events, (SELECT (julianday('now')
|
||||
- 2440587.5)*86400.0 - start_time AS s FROM osquery_info LIMIT 1);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Information about the system hardware and name
|
||||
name: system_info
|
||||
@@ -473,7 +436,7 @@ spec:
|
||||
description: Returns the private keys in the users ~/.ssh directory and whether
|
||||
or not they are encrypted
|
||||
name: user_ssh_keys
|
||||
query: SELECT * FROM users JOIN user_ssh_keys USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN user_ssh_keys USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
@@ -593,4 +556,25 @@ kind: query
|
||||
spec:
|
||||
description: A line-delimited authorized_keys table.
|
||||
name: authorized_keys
|
||||
query: SELECT * FROM users JOIN authorized_keys USING (uid);
|
||||
query: SELECT * FROM users CROSS JOIN authorized_keys USING (uid);
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Display apt package manager sources.
|
||||
name: apt_sources
|
||||
query: SELECT * FROM apt_sources;
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Gather information about processes that are listening on a socket.
|
||||
name: listening_ports
|
||||
query: SELECT pid, port, processes.path, cmdline, cwd FROM listening_ports JOIN processes USING (pid) WHERE port!=0;
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: query
|
||||
spec:
|
||||
description: Display yum package manager sources.
|
||||
name: yum_sources
|
||||
query: SELECT name, baseurl, enabled, gpgcheck FROM yum_sources;
|
||||
@@ -17,6 +17,7 @@ spec:
|
||||
configuration:
|
||||
- /etc/passwd
|
||||
- /etc/shadow
|
||||
- /etc/ld.so.preload
|
||||
- /etc/ld.so.conf
|
||||
- /etc/ld.so.conf.d/%%
|
||||
- /etc/pam.d/%%
|
||||
@@ -10,11 +10,11 @@ exactly what we have done with our [unwanted-chrome-extensions](https://github.c
|
||||
However, we have included additional query packs
|
||||
that are more tailored to our specific environment that may be useful to some or at least serve as a reference to other organizations. osquery operates best when
|
||||
operators have carefully considered the datasets to be collected and the potential use-cases for that data.
|
||||
* [performance-metrics.conf](https://github.com/palantir/osquery-configuration/blob/master/Endpoints/packs/performance-metrics.conf)
|
||||
* [security-tooling-checks.conf](https://github.com/palantir/osquery-configuration/blob/master/Endpoints/packs/security-tooling-checks.conf)
|
||||
* [windows-application-security.conf](https://github.com/palantir/osquery-configuration/blob/master/Endpoints/packs/windows-application-security.conf)
|
||||
* [windows-compliance.conf](https://github.com/palantir/osquery-configuration/blob/master/Endpoints/packs/windows-compliance.conf)
|
||||
* [windows-registry-monitoring.conf](https://github.com/palantir/osquery-configuration/blob/master/Endpoints/packs/windows-registry-monitoring.conf)
|
||||
* [performance-metrics.conf](https://github.com/palantir/osquery-configuration/blob/master/Classic/Endpoints/packs/performance-metrics.conf)
|
||||
* [security-tooling-checks.conf](https://github.com/palantir/osquery-configuration/blob/master/Classic/Endpoints/packs/security-tooling-checks.conf)
|
||||
* [windows-application-security.conf](https://github.com/palantir/osquery-configuration/blob/master/Classic/Endpoints/packs/windows-application-security.conf)
|
||||
* [windows-compliance.conf](https://github.com/palantir/osquery-configuration/blob/master/Classic/Endpoints/packs/windows-compliance.conf)
|
||||
* [windows-registry-monitoring.conf](https://github.com/palantir/osquery-configuration/blob/master/Classic/Endpoints/packs/windows-registry-monitoring.conf)
|
||||
|
||||
|
||||
**Note**: We also utilize packs that are maintained in the official osquery project. In order to ensure you receive the most up to date version of the pack, please view them using the links below:
|
||||
@@ -41,15 +41,15 @@ environment.
|
||||
**Endpoints Configuration Overview**
|
||||
* The configurations in this folder are meant for MacOS and Windows and the interval timings assume that these hosts are only online for ~8 hours per day
|
||||
* The flags included in this configuration enable TLS client mode in osquery and assume it will be connected to a TLS server. We have also included non-TLS flagfiles for local testing.
|
||||
* File integrity monitoring on MacOS is enabled for specific files and directories defined in [osquery.conf](./Endpoints/MacOS/osquery.conf)
|
||||
* Events are disabled on Windows via the `--disable_events` flag in [osquery.flags](./Endpoints/Windows/osquery.flags). We use [Windows Event Forwarding](https://github.com/palantir/windows-event-forwarding) and don't have a need for osquery to process Windows event logs.
|
||||
* These configuration files utilize packs within the [packs](./Endpoints/packs) folder and may generate errors if started without them
|
||||
* File integrity monitoring on MacOS is enabled for specific files and directories defined in [osquery.conf](./Classic/Endpoints/MacOS/osquery.conf)
|
||||
* Events are disabled on Windows via the `--disable_events` flag in [osquery.flags](./Classic/Endpoints/Windows/osquery.flags). We use [Windows Event Forwarding](https://github.com/palantir/windows-event-forwarding) and don't have a need for osquery to process Windows event logs.
|
||||
* These configuration files utilize packs within the [packs](./Classic/Endpoints/packs) folder and may generate errors if started without them
|
||||
|
||||
**Servers Configuration Overview**
|
||||
* This configuration assumes the destination operating system is Linux-based and that the hosts are online at all times
|
||||
* Auditing mode is enabled for processes and network events. Ensure auditd is disabled or removed from the system where this will be running as it may conflict with osqueryd.
|
||||
* File integrity monitoring is enabled for specific files and directories defined in [osquery.conf](./Servers/Linux/osquery.conf)
|
||||
* Requires the [ossec-rootkit.conf](./Servers/Linux/packs/ossec-rootkit.conf) pack found to be located at `/etc/osquery/packs/ossec-rootkit.conf`
|
||||
* File integrity monitoring is enabled for specific files and directories defined in [osquery.conf](./Classic/Servers/Linux/osquery.conf)
|
||||
* Requires the [ossec-rootkit.conf](./Classic/Servers/Linux/packs/ossec-rootkit.conf) pack found to be located at `/etc/osquery/packs/ossec-rootkit.conf`
|
||||
* The subscriber for `user_events` is disabled
|
||||
|
||||
## Quickstart - Classic
|
||||
@@ -59,10 +59,10 @@ environment.
|
||||
4. Logs are located in `/var/log/osquery` (Linux/MacOS) and `c:\ProgramData\osquery\logs` (Windows)
|
||||
|
||||
## Quickstart - Fleet
|
||||
1. Install Fleet version 2.0.0 or higher
|
||||
2. [Enroll hosts to your Fleet server](https://github.com/kolide/fleet/blob/master/docs/infrastructure/adding-hosts-to-fleet.md) by configuring the appropriate [flags](https://github.com/kolide/fleet/blob/master/tools/osquery/example_osquery.flags)
|
||||
2. [Configure the fleetctl utility](https://github.com/kolide/fleet/blob/master/docs/cli/setup-guide.md#fleetctl-setup) to communicate with your Fleet server
|
||||
3. Assuming you'd like to use the endpoint configs, you can use the commands below to apply them:
|
||||
Install Fleet version 2.0.0 or higher
|
||||
2. [Enroll hosts to your Fleet server](https://github.com/kolide/fleet/blob/master/docs/infrastructure/adding-hosts-to-fleet.md) by configuring the appropriate [flags]
|
||||
3. [Configure the fleetctl utility](https://github.com/kolide/fleet/blob/master/docs/cli/setup-guide.md#fleetctl-setup) to communicate with your Fleet server
|
||||
4. Assuming you'd like to use the endpoint configs, you can use the commands below to apply them:
|
||||
|
||||
```
|
||||
git clone https://github.com/palantir/osquery-configuration.git
|
||||
@@ -79,7 +79,7 @@ The desired osquery directory structure for Linux, MacOS, and Windows is outline
|
||||
**Linux**
|
||||
```
|
||||
$ git clone https://github.com/palantir/osquery-configuration.git
|
||||
$ cp -R osquery-configuration/Servers/Linux/* /etc/osquery
|
||||
$ cp -R osquery-configuration/Fleet/Servers/Linux/* /etc/osquery
|
||||
$ sudo osqueryctl start
|
||||
|
||||
/etc/osquery
|
||||
@@ -93,8 +93,8 @@ $ sudo osqueryctl start
|
||||
**MacOS**
|
||||
```
|
||||
$ git clone https://github.com/palantir/osquery-configuration.git
|
||||
$ cp osquery-configuration/Endpoints/MacOS/* /var/osquery
|
||||
$ cp osquery-configuration/Endpoints/packs/* /var/osquery/packs
|
||||
$ cp osquery-configuration/Fleet/Endpoints/MacOS/* /var/osquery
|
||||
$ cp osquery-configuration/Fleet/Endpoints/packs/* /var/osquery/packs
|
||||
$ mv /var/osquery/osquery_no_tls.flags /var/osquery/osquery.flags ## Non-TLS server testing
|
||||
$ sudo osqueryctl start
|
||||
|
||||
@@ -113,8 +113,8 @@ $ sudo osqueryctl start
|
||||
**Windows**
|
||||
```
|
||||
PS> git clone https://github.com/palantir/osquery-configuration.git
|
||||
PS> copy-item osquery-configuration/Endpoints/Windows/* c:\ProgramData\osquery
|
||||
PS> copy-item osquery-configuration/Endpoints/packs/* c:\ProgramData\osquery\packs
|
||||
PS> copy-item osquery-configuration/Fleet/Endpoints/Windows/* c:\ProgramData\osquery
|
||||
PS> copy-item osquery-configuration/Fleet/Endpoints/packs/* c:\ProgramData\osquery\packs
|
||||
PS> copy-item c:\ProgramData\osquery\osquery_no_tls.flags c:\ProgramData\osquery\osquery.flags -force ## Non-TLS server testing
|
||||
PS> start-service osqueryd
|
||||
|
||||
33
salt/fleet/files/scripts/so-fleet-packages
Normal file
33
salt/fleet/files/scripts/so-fleet-packages
Normal file
@@ -0,0 +1,33 @@
|
||||
#!/bin/bash
|
||||
{% set MAIN_HOSTNAME = salt['grains.get']('host') %}
|
||||
{% set MAIN_IP = salt['pillar.get']('node:mainip') %}
|
||||
|
||||
|
||||
#so-fleet-packages $FleetHostname/IP
|
||||
|
||||
#if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
|
||||
# echo "so-fleet container not running... Exiting..."
|
||||
# exit 1
|
||||
#fi
|
||||
|
||||
#docker exec so-fleet /bin/ash -c "echo {{ MAIN_IP }} {{ MAIN_HOSTNAME }} >> /etc/hosts"
|
||||
#esecret=$(docker exec so-fleet fleetctl get enroll-secret)
|
||||
|
||||
#Concat fleet.crt & ca.crt - this is required for launcher connectivity
|
||||
#cat /etc/pki/fleet.crt /etc/pki/ca.crt > /etc/pki/launcher.crt
|
||||
#Actually only need to use /etc/ssl/certs/intca.crt
|
||||
|
||||
#Create the output directory
|
||||
#mkdir /opt/so/conf/fleet/packages
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
--mount type=bind,source=/opt/so/conf/fleet/packages,target=/output \
|
||||
--mount type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt \
|
||||
docker.io/soshybridhunter/so-fleet-launcher:HH1.1.0 "$esecret" "$1":8090
|
||||
|
||||
cp /opt/so/conf/fleet/packages/launcher.* /opt/so/saltstack/salt/launcher/packages/
|
||||
|
||||
#Update timestamp on packages webpage
|
||||
sed -i "s@.*Generated.*@Generated: $(date '+%m%d%Y')@g" /opt/so/conf/fleet/packages/index.html
|
||||
sed -i "s@.*Generated.*@Generated: $(date '+%m%d%Y')@g" /opt/so/saltstack/salt/fleet/files/dedicated-index.html
|
||||
46
salt/fleet/files/scripts/so-fleet-setup
Normal file
46
salt/fleet/files/scripts/so-fleet-setup
Normal file
@@ -0,0 +1,46 @@
|
||||
#!/bin/bash
|
||||
{% set MAIN_HOSTNAME = salt['grains.get']('host') %}
|
||||
{% set MAIN_IP = salt['pillar.get']('node:mainip') %}
|
||||
|
||||
#so-fleet-setup.sh $FleetEmail
|
||||
|
||||
# Enable Fleet
|
||||
echo "Starting Docker Containers..."
|
||||
salt-call state.apply mysql queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply fleet queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply redis queue=True >> /root/fleet-setup.log
|
||||
|
||||
if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
|
||||
echo "so-fleet container not running... Exiting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
initpw=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
|
||||
|
||||
docker exec so-fleet /bin/ash -c "echo {{ MAIN_IP }} {{ MAIN_HOSTNAME }} >> /etc/hosts"
|
||||
docker exec so-fleet fleetctl config set --address https://{{ MAIN_HOSTNAME }}:443 --tls-skip-verify --url-prefix /fleet
|
||||
docker exec so-fleet fleetctl setup --email $1 --password $initpw
|
||||
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/Windows/osquery.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/hh/hhdefault.yml
|
||||
docker exec so-fleet /bin/sh -c 'for pack in /packs/palantir/Fleet/Endpoints/packs/*.yaml; do fleetctl apply -f "$pack"; done'
|
||||
docker exec so-fleet fleetctl apply -f /packs/hh/osquery.conf
|
||||
|
||||
|
||||
# Enable Fleet
|
||||
echo "Enabling Fleet..."
|
||||
salt-call state.apply fleet.event_enable-fleet queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply common queue=True >> /root/fleet-setup.log
|
||||
|
||||
# Generate osquery install packages
|
||||
echo "Generating osquery install packages - this will take some time..."
|
||||
salt-call state.apply fleet.event_gen-packages queue=True >> /root/fleet-setup.log
|
||||
sleep 120
|
||||
|
||||
echo "Installing launcher via salt..."
|
||||
salt-call state.apply fleet.install_package queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply filebeat queue=True >> /root/fleet-setup.log
|
||||
|
||||
echo "Fleet Setup Complete - Login here: https://{{ MAIN_HOSTNAME }}"
|
||||
echo "Your username is $2 and your password is $initpw"
|
||||
@@ -1,8 +1,25 @@
|
||||
{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', 'iwonttellyou') %}
|
||||
{%- set FLEETPASS = salt['pillar.get']('auth:fleet', 'bazinga') -%}
|
||||
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') -%}
|
||||
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
|
||||
{%- set FLEETPASS = salt['pillar.get']('secrets:fleet', None) -%}
|
||||
{%- set FLEETJWT = salt['pillar.get']('secrets:fleet_jwt', None) -%}
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.1.4') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
{% set MAINIP = salt['pillar.get']('node:mainip') %}
|
||||
{% set FLEETARCH = salt['grains.get']('role') %}
|
||||
|
||||
|
||||
{% if FLEETARCH == "so-fleet" %}
|
||||
{% set MAINIP = salt['pillar.get']('node:mainip') %}
|
||||
{% else %}
|
||||
{% set MAINIP = salt['pillar.get']('static:masterip') %}
|
||||
{% endif %}
|
||||
|
||||
#{% if grains.id.split('_')|last in ['master', 'eval', 'fleet'] %}
|
||||
#so/fleet:
|
||||
# event.send:
|
||||
# - data:
|
||||
# action: 'enablefleet'
|
||||
# hostname: {{ grains.host }}
|
||||
#{% endif %}
|
||||
|
||||
# Fleet Setup
|
||||
fleetcdir:
|
||||
@@ -18,11 +35,25 @@ fleetpackcdir:
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
fleetnsmdir:
|
||||
file.directory:
|
||||
- name: /nsm/osquery/fleet
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
fleetpacksync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/fleet/packs
|
||||
- source: salt://fleet/packs
|
||||
- source: salt://fleet/files/packs
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
fleetpackagessync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/fleet/packages
|
||||
- source: salt://fleet/packages/
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
@@ -33,24 +64,35 @@ fleetlogdir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
fleetsetupscript:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/fleet/so-fleet-setup.sh
|
||||
- source: salt://fleet/so-fleet-setup.sh
|
||||
fleetsetupscripts:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- user: 0
|
||||
- group: 0
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- source: salt://fleet/files/scripts
|
||||
|
||||
osquerypackageswebpage:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/fleet/packages/index.html
|
||||
- source: salt://fleet/osquery-packages.html
|
||||
- source: salt://fleet/files/dedicated-index.html
|
||||
- template: jinja
|
||||
|
||||
fleetdb:
|
||||
mysql_database.present:
|
||||
- name: fleet
|
||||
- connection_host: {{ MAINIP }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
fleetdbuser:
|
||||
mysql_user.present:
|
||||
- host: 172.17.0.0/255.255.0.0
|
||||
- password: {{ FLEETPASS }}
|
||||
- connection_host: {{ MAINIP }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
@@ -60,6 +102,21 @@ fleetdbpriv:
|
||||
- database: fleet.*
|
||||
- user: fleetdbuser
|
||||
- host: 172.17.0.0/255.255.0.0
|
||||
- connection_host: {{ MAINIP }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
|
||||
{% if FLEETPASS == None or FLEETJWT == None %}
|
||||
|
||||
fleet_password_none:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "Fleet MySQL Password or JWT Key Error - Not Starting Fleet"
|
||||
|
||||
{% else %}
|
||||
|
||||
so-fleet:
|
||||
docker_container.running:
|
||||
@@ -68,22 +125,25 @@ so-fleet:
|
||||
- port_bindings:
|
||||
- 0.0.0.0:8080:8080
|
||||
- environment:
|
||||
- KOLIDE_MYSQL_ADDRESS={{ MASTERIP }}:3306
|
||||
- KOLIDE_MYSQL_ADDRESS={{ MAINIP }}:3306
|
||||
- KOLIDE_REDIS_ADDRESS={{ MAINIP }}:6379
|
||||
- KOLIDE_MYSQL_DATABASE=fleet
|
||||
- KOLIDE_MYSQL_USERNAME=fleetdbuser
|
||||
- KOLIDE_MYSQL_PASSWORD={{ FLEETPASS }}
|
||||
- KOLIDE_REDIS_ADDRESS={{ MASTERIP }}:6379
|
||||
- KOLIDE_SERVER_CERT=/ssl/server.cert
|
||||
- KOLIDE_SERVER_KEY=/ssl/server.key
|
||||
- KOLIDE_LOGGING_JSON=true
|
||||
- KOLIDE_AUTH_JWT_KEY=thisisatest
|
||||
- KOLIDE_OSQUERY_STATUS_LOG_FILE=/var/log/osquery/status.log
|
||||
- KOLIDE_AUTH_JWT_KEY= {{ FLEETJWT }}
|
||||
- KOLIDE_OSQUERY_STATUS_LOG_FILE=/var/log/fleet/status.log
|
||||
- KOLIDE_OSQUERY_RESULT_LOG_FILE=/var/log/osquery/result.log
|
||||
- KOLIDE_SERVER_URL_PREFIX=/fleet
|
||||
- binds:
|
||||
- /etc/pki/fleet.key:/ssl/server.key:ro
|
||||
- /etc/pki/fleet.crt:/ssl/server.cert:ro
|
||||
- /opt/so/log/fleet:/var/log/osquery
|
||||
- /opt/so/log/fleet:/var/log/fleet
|
||||
- /nsm/osquery/fleet:/var/log/osquery
|
||||
- /opt/so/conf/fleet/packs:/packs
|
||||
- watch:
|
||||
- /opt/so/conf/fleet/etc
|
||||
|
||||
{% endif %}
|
||||
|
||||
21
salt/fleet/install_package.sls
Normal file
21
salt/fleet/install_package.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
{%- set FLEETMASTER = salt['pillar.get']('static:fleet_master', False) -%}
|
||||
{%- set FLEETNODE = salt['pillar.get']('static:fleet_node', False) -%}
|
||||
{%- set FLEETHOSTNAME = salt['pillar.get']('static:fleet_hostname', False) -%}
|
||||
{%- set FLEETIP = salt['pillar.get']('static:fleet_ip', False) -%}
|
||||
|
||||
{%- if FLEETMASTER or FLEETNODE %}
|
||||
|
||||
{{ FLEETHOSTNAME }}:
|
||||
host.present:
|
||||
- ip: {{ FLEETIP }}
|
||||
- clean: True
|
||||
|
||||
launcherpkg:
|
||||
pkg.installed:
|
||||
- sources:
|
||||
{% if grains['os'] == 'CentOS' %}
|
||||
- launcher-final: salt://fleet/packages/launcher.rpm
|
||||
{% elif grains['os'] == 'Ubuntu' %}
|
||||
- launcher-final: salt://fleet/packages/launcher.deb
|
||||
{% endif %}
|
||||
{%- endif %}
|
||||
1
salt/fleet/packages/info.txt
Normal file
1
salt/fleet/packages/info.txt
Normal file
@@ -0,0 +1 @@
|
||||
Osquery Packages will be copied to this folder
|
||||
@@ -1,53 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
#so-fleet-setup.sh $MasterIP $FleetEmail
|
||||
|
||||
if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
|
||||
echo "so-fleet container not running... Exiting..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
initpw=$(date +%s | sha256sum | base64 | head -c 16 ; echo)
|
||||
|
||||
docker exec so-fleet fleetctl config set --address https://$1:443 --tls-skip-verify --url-prefix /fleet
|
||||
docker exec so-fleet fleetctl setup --email $2 --password $initpw
|
||||
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/options.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/Windows/osquery.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/hh/hhdefault.yml
|
||||
docker exec so-fleet /bin/sh -c 'for pack in /packs/palantir/Fleet/Endpoints/packs/*.yaml; do fleetctl apply -f "$pack"; done'
|
||||
|
||||
esecret=$(docker exec so-fleet fleetctl get enroll-secret)
|
||||
|
||||
#Concat fleet.crt & ca.crt - this is required for launcher connectivity
|
||||
cat /etc/pki/fleet.crt /etc/pki/ca.crt > /etc/pki/launcher.crt
|
||||
|
||||
#Create the output directory
|
||||
mkdir /opt/so/conf/fleet/packages
|
||||
|
||||
#At some point we should version launcher `latest` to avoid hard pinning here
|
||||
docker run \
|
||||
--rm \
|
||||
--mount type=bind,source=/opt/so/conf/fleet/packages,target=/output \
|
||||
--mount type=bind,source=/etc/pki/launcher.crt,target=/var/launcher/launcher.crt \
|
||||
docker.io/soshybridhunter/so-fleet-launcher:HH1.1.0 "$esecret" "$1":8080
|
||||
|
||||
cp /opt/so/conf/fleet/packages/launcher.* /opt/so/saltstack/salt/launcher/packages/
|
||||
#Update timestamp on packages webpage
|
||||
sed -i "s@.*Generated.*@Generated: $(date '+%m%d%Y')@g" /opt/so/conf/fleet/packages/index.html
|
||||
sed -i "s@.*Generated.*@Generated: $(date '+%m%d%Y')@g" /opt/so/saltstack/salt/fleet/osquery-packages.html
|
||||
|
||||
# Enable Fleet on all the other parts of the infrastructure
|
||||
sed -i 's/fleetsetup: 0/fleetsetup: 1/g' /opt/so/saltstack/pillar/static.sls
|
||||
|
||||
# Install osquery locally
|
||||
#if cat /etc/os-release | grep -q 'debian'; then
|
||||
# dpkg -i /opt/so/conf/fleet/packages/launcher.deb
|
||||
#else
|
||||
# rpm -i /opt/so/conf/fleet/packages/launcher.rpm
|
||||
#fi
|
||||
echo "Installing launcher via salt"
|
||||
salt-call state.apply launcher queue=True > /root/launcher.log
|
||||
echo "Fleet Setup Complete - Login here: https://$1"
|
||||
echo "Your username is $2 and your password is $initpw"
|
||||
25
salt/healthcheck/init.sls
Normal file
25
salt/healthcheck/init.sls
Normal file
@@ -0,0 +1,25 @@
|
||||
{% set CHECKS = salt['pillar.get']('healthcheck:checks', {}) %}
|
||||
{% set ENABLED = salt['pillar.get']('healthcheck:enabled', False) %}
|
||||
{% set SCHEDULE = salt['pillar.get']('healthcheck:schedule', 300) %}
|
||||
|
||||
{% if CHECKS and ENABLED %}
|
||||
{% set STATUS = ['present','enabled'] %}
|
||||
{% else %}
|
||||
{% set STATUS = ['absent','disabled'] %}
|
||||
nohealthchecks:
|
||||
test.configurable_test_state:
|
||||
- name: nohealthchecks
|
||||
- changes: True
|
||||
- result: True
|
||||
- comment: 'No checks are enabled for the healthcheck schedule'
|
||||
{% endif %}
|
||||
|
||||
healthcheck_schedule_{{ STATUS[0] }}:
|
||||
schedule.{{ STATUS[0] }}:
|
||||
- name: healthcheck
|
||||
- function: healthcheck.run
|
||||
- seconds: {{ SCHEDULE }}
|
||||
|
||||
healthcheck_schedule_{{ STATUS[1] }}:
|
||||
schedule.{{ STATUS[1] }}:
|
||||
- name: healthcheck
|
||||
34
salt/kibana/bin/so-kibana-config-load
Normal file
34
salt/kibana/bin/so-kibana-config-load
Normal file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
KIBANA_VERSION="7.6.1"
|
||||
MAX_WAIT=60
|
||||
|
||||
# Check to see if Kibana is available
|
||||
wait_step=0
|
||||
until curl -s -XGET http://localhost:5601 > /dev/null ; do
|
||||
wait_step=$(( ${wait_step} + 1 ))
|
||||
echo "Waiting on Kibana...Attempt #$wait_step"
|
||||
if [ ${wait_step} -gt ${MAX_WAIT} ]; then
|
||||
echo "ERROR: Kibana not available for more than ${MAX_WAIT} seconds."
|
||||
exit 5
|
||||
fi
|
||||
sleep 1s;
|
||||
done
|
||||
|
||||
# Sleep additional JIC server is not ready
|
||||
sleep 30s
|
||||
|
||||
# Load config
|
||||
curl -X PUT "localhost:5601/api/saved_objects/config/$KIBANA_VERSION" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'
|
||||
{ "attributes":
|
||||
{
|
||||
"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29",
|
||||
"defaultRoute":"/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645",
|
||||
"discover:sampleSize":"100",
|
||||
"dashboard:defaultDarkTheme":true,
|
||||
"theme:darkMode":true,
|
||||
"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\",\n \"mode\": \"quick\"\n}"
|
||||
}
|
||||
}'
|
||||
|
||||
# Load saved objects
|
||||
curl -X POST "localhost:5601/api/saved_objects/_import" -H "kbn-xsrf: true" --form file=@/opt/so/saltstack/salt/kibana/saved_objects.ndjson
|
||||
@@ -1,9 +1,10 @@
|
||||
{ "attributes":
|
||||
{
|
||||
"defaultIndex": "*:so-*",
|
||||
"discover:sampleSize":"100",
|
||||
"dashboard:defaultDarkTheme":true,
|
||||
"theme:darkMode":true,
|
||||
"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\",\n \"mode\": \"quick\"\n}"
|
||||
}
|
||||
{
|
||||
"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29",
|
||||
"defaultRoute":"/app/kibana#/dashboard/a8411b30-6d03-11ea-b301-3d6c35840645",
|
||||
"discover:sampleSize":"100",
|
||||
"dashboard:defaultDarkTheme":true,
|
||||
"theme:darkMode":true,
|
||||
"timepicker:timeDefaults":"{\n \"from\": \"now-24h\",\n \"to\": \"now\",\n \"mode\": \"quick\"\n}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,3 +11,4 @@ elasticsearch.hosts: [ "http://{{ ES }}:9200" ]
|
||||
#xpack.monitoring.ui.container.elasticsearch.enabled: true
|
||||
elasticsearch.requestTimeout: 90000
|
||||
logging.dest: /var/log/kibana/kibana.log
|
||||
telemetry.enabled: false
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.1.4') %}
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.2.1') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
|
||||
{% if FEATURES %}
|
||||
@@ -59,6 +59,8 @@ synckibanacustom:
|
||||
- user: 932
|
||||
- group: 939
|
||||
|
||||
# File.Recurse for custom saved dashboards
|
||||
|
||||
# Start the kibana docker
|
||||
so-kibana:
|
||||
docker_container.running:
|
||||
@@ -66,7 +68,6 @@ so-kibana:
|
||||
- hostname: kibana
|
||||
- user: kibana
|
||||
- environment:
|
||||
- KIBANA_DEFAULTAPPID=dashboard/94b52620-342a-11e7-9d52-4f090484f59e
|
||||
- ELASTICSEARCH_HOST={{ MASTER }}
|
||||
- ELASTICSEARCH_PORT=9200
|
||||
- MASTER={{ MASTER }}
|
||||
@@ -77,3 +78,17 @@ so-kibana:
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
- port_bindings:
|
||||
- 0.0.0.0:5601:5601
|
||||
|
||||
so-kibana-config-load:
|
||||
cmd.script:
|
||||
- shell: /bin/bash
|
||||
- runas: socore
|
||||
- source: salt://kibana/bin/so-kibana-config-load
|
||||
|
||||
# Keep the setting correct
|
||||
#KibanaHappy:
|
||||
# cmd.script:
|
||||
# - shell: /bin/bash
|
||||
# - runas: socore
|
||||
# - source: salt://kibana/bin/keepkibanahappy.sh
|
||||
# - template: jinja
|
||||
|
||||
583
salt/kibana/saved_objects.ndjson
Normal file
583
salt/kibana/saved_objects.ndjson
Normal file
File diff suppressed because one or more lines are too long
@@ -10,17 +10,16 @@
|
||||
|
||||
|
||||
filter {
|
||||
if "zeek" in [tags] and "test_data" not in [tags] and "import" not in [tags] {
|
||||
if [module] =~ "zeek" {
|
||||
mutate {
|
||||
##add_tag => [ "conf_file_9000"]
|
||||
}
|
||||
}
|
||||
}
|
||||
output {
|
||||
if "zeek" in [tags] and "test_data" not in [tags] and "import" not in [tags] {
|
||||
# stdout { codec => rubydebug }
|
||||
if [module] =~ "zeek" {
|
||||
elasticsearch {
|
||||
pipeline => "%{event_type}"
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-zeek-%{+YYYY.MM.dd}"
|
||||
template_name => "so-zeek"
|
||||
@@ -9,11 +9,12 @@
|
||||
|
||||
|
||||
output {
|
||||
if "osquery" in [tags] {
|
||||
if [module] =~ "osquery" {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-osquery-%{+YYYY.MM.dd}"
|
||||
template => "/so-common-template.json"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,16 +9,16 @@
|
||||
# Last Update: 12/9/2016
|
||||
|
||||
filter {
|
||||
if [event_type] == "suricata" and "test_data" not in [tags] {
|
||||
if [module] == "suricata" {
|
||||
mutate {
|
||||
##add_tag => [ "conf_file_9400"]
|
||||
}
|
||||
}
|
||||
}
|
||||
output {
|
||||
if [event_type] == "suricata" and "test_data" not in [tags] {
|
||||
#stdout { codec => rubydebug }
|
||||
if [module] =~ "suricata" {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-ids-%{+YYYY.MM.dd}"
|
||||
template => "/so-common-template.json"
|
||||
|
||||
@@ -9,7 +9,7 @@
|
||||
# Last Update: 9/19/2018
|
||||
|
||||
filter {
|
||||
if [event_type] =~ "ossec" {
|
||||
if [module] =~ "ossec" {
|
||||
mutate {
|
||||
##add_tag => [ "conf_file_9600"]
|
||||
}
|
||||
@@ -17,9 +17,9 @@ filter {
|
||||
}
|
||||
|
||||
output {
|
||||
if [event_type] =~ "ossec" or "ossec" in [tags] {
|
||||
if [module] =~ "ossec" {
|
||||
elasticsearch {
|
||||
pipeline => "%{event_type}"
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-ossec-%{+YYYY.MM.dd}"
|
||||
template_name => "so-common"
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
|
||||
filter {
|
||||
if [event_type] =~ "strelka" {
|
||||
if [module] =~ "strelka" {
|
||||
mutate {
|
||||
##add_tag => [ "conf_file_9000"]
|
||||
}
|
||||
@@ -19,6 +19,7 @@ filter {
|
||||
output {
|
||||
if [event_type] =~ "strelka" {
|
||||
elasticsearch {
|
||||
pipeline => "%{module}.%{dataset}"
|
||||
hosts => "{{ ES }}"
|
||||
index => "so-strelka-%{+YYYY.MM.dd}"
|
||||
template_name => "so-common"
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{
|
||||
"index_patterns": ["so-ids-*", "so-firewall-*", "so-syslog-*", "so-zeek-*", "so-import-*", "so-ossec-*", "so-strelka-*", "so-beats-*"],
|
||||
"index_patterns": ["so-ids-*", "so-firewall-*", "so-syslog-*", "so-zeek-*", "so-import-*", "so-ossec-*", "so-strelka-*", "so-beats-*", "so-osquery-*"],
|
||||
"version":50001,
|
||||
"order" : 10,
|
||||
"settings":{
|
||||
@@ -17,6 +17,10 @@
|
||||
"@version":{
|
||||
"type":"keyword"
|
||||
},
|
||||
"osquery":{
|
||||
"type":"object",
|
||||
"dynamic": true
|
||||
},
|
||||
"geoip":{
|
||||
"dynamic":true,
|
||||
"properties":{
|
||||
@@ -192,6 +196,14 @@
|
||||
"type":"object",
|
||||
"dynamic": true
|
||||
},
|
||||
"message":{
|
||||
"type":"text",
|
||||
"fields":{
|
||||
"keyword":{
|
||||
"type":"keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"modbus":{
|
||||
"type":"object",
|
||||
"dynamic": true
|
||||
@@ -244,6 +256,10 @@
|
||||
"type":"object",
|
||||
"dynamic": true
|
||||
},
|
||||
"request":{
|
||||
"type":"object",
|
||||
"dynamic": true
|
||||
},
|
||||
"rfb":{
|
||||
"type":"object",
|
||||
"dynamic": true
|
||||
@@ -252,6 +268,10 @@
|
||||
"type":"object",
|
||||
"dynamic": true
|
||||
},
|
||||
"scan":{
|
||||
"type":"object",
|
||||
"dynamic": true
|
||||
},
|
||||
"server":{
|
||||
"type":"object",
|
||||
"dynamic": true
|
||||
|
||||
10
salt/master/files/add_minion.sh
Executable file
10
salt/master/files/add_minion.sh
Executable file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script adds pillar and schedule files securely
|
||||
|
||||
MINION=$1
|
||||
|
||||
echo "Adding $1"
|
||||
cp /tmp/$MINION/pillar/$MINION.sls /opt/so/saltstack/pillar/minions/
|
||||
cp /tmp/$MINION/schedules/* /opt/so/saltstack/salt/patch/os/schedules/
|
||||
rm -rf /tmp/$MINION
|
||||
@@ -1,12 +1,13 @@
|
||||
#!/bin/bash
|
||||
|
||||
MASTER={{ MASTER }}
|
||||
VERSION="HH1.1.4"
|
||||
VERSION="HH1.2.1"
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-core:$VERSION" \
|
||||
"so-cyberchef:$VERSION" \
|
||||
"so-acng:$VERSION" \
|
||||
"so-sensoroni:$VERSION" \
|
||||
"so-soc:$VERSION" \
|
||||
"so-kratos:$VERSION" \
|
||||
"so-fleet:$VERSION" \
|
||||
"so-soctopus:$VERSION" \
|
||||
"so-steno:$VERSION" \
|
||||
@@ -16,8 +17,6 @@ TRUSTED_CONTAINERS=( \
|
||||
"so-thehive-es:$VERSION" \
|
||||
"so-wazuh:$VERSION" \
|
||||
"so-kibana:$VERSION" \
|
||||
"so-auth-ui:$VERSION" \
|
||||
"so-auth-api:$VERSION" \
|
||||
"so-elastalert:$VERSION" \
|
||||
"so-navigator:$VERSION" \
|
||||
"so-filebeat:$VERSION" \
|
||||
|
||||
@@ -1,2 +1,2 @@
|
||||
{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', 'iwonttellyou') -%}
|
||||
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
|
||||
{{ MYSQLPASS }}
|
||||
|
||||
@@ -1,8 +1,16 @@
|
||||
{%- set MYSQLPASS = salt['pillar.get']('auth:mysql', 'iwonttellyou') %}
|
||||
{%- set FLEETPASS = salt['pillar.get']('auth:fleet', 'bazinga') %}
|
||||
{%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) %}
|
||||
{%- set MASTERIP = salt['pillar.get']('static:masterip', '') %}
|
||||
{% set VERSION = salt['pillar.get']('static:soversion', 'HH1.1.4') %}
|
||||
{% set MASTER = salt['grains.get']('master') %}
|
||||
{% set MAINIP = salt['pillar.get']('node:mainip') %}
|
||||
{% set FLEETARCH = salt['grains.get']('role') %}
|
||||
|
||||
{% if FLEETARCH == "so-fleet" %}
|
||||
{% set MAINIP = salt['pillar.get']('node:mainip') %}
|
||||
{% else %}
|
||||
{% set MAINIP = salt['pillar.get']('static:masterip') %}
|
||||
{% endif %}
|
||||
|
||||
# MySQL Setup
|
||||
mysqlpkgs:
|
||||
pkg.installed:
|
||||
@@ -50,6 +58,16 @@ mysqldatadir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
{% if MYSQLPASS == None %}
|
||||
|
||||
mysql_password_none:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "MySQL Password Error - Not Starting MySQL"
|
||||
|
||||
{% else %}
|
||||
|
||||
so-mysql:
|
||||
docker_container.running:
|
||||
- image: {{ MASTER }}:5000/soshybridhunter/so-mysql:{{ VERSION }}
|
||||
@@ -58,7 +76,7 @@ so-mysql:
|
||||
- port_bindings:
|
||||
- 0.0.0.0:3306:3306
|
||||
- environment:
|
||||
- MYSQL_ROOT_HOST={{ MASTERIP }}
|
||||
- MYSQL_ROOT_HOST={{ MAINIP }}
|
||||
- MYSQL_ROOT_PASSWORD=/etc/mypass
|
||||
- binds:
|
||||
- /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro
|
||||
@@ -67,3 +85,4 @@ so-mysql:
|
||||
- /opt/so/log/mysql:/var/log/mysql:rw
|
||||
- watch:
|
||||
- /opt/so/conf/mysql/etc
|
||||
{% endif %}
|
||||
@@ -1,7 +1,7 @@
|
||||
{%- set MASTER = grains['master'] -%}
|
||||
{%- set SENSORONIKEY = salt['pillar.get']('static:sensoronikey', '') -%}
|
||||
{
|
||||
"logFilename": "/opt/sensoroni/log/sensoroni.log",
|
||||
"logFilename": "/opt/sensoroni/logs/sensoroni.log",
|
||||
"logLevel":"debug",
|
||||
"agent": {
|
||||
"pollIntervalMs": 10000,
|
||||
|
||||
60
salt/reactor/fleet.sls
Normal file
60
salt/reactor/fleet.sls
Normal file
@@ -0,0 +1,60 @@
|
||||
#!py
|
||||
|
||||
from time import gmtime, strftime
|
||||
import fileinput
|
||||
import logging
|
||||
import re
|
||||
import subprocess
|
||||
|
||||
def run():
|
||||
MINIONID = data['id']
|
||||
ACTION = data['data']['action']
|
||||
HOSTNAME = data['data']['hostname']
|
||||
ROLE = data['data']['role']
|
||||
ESECRET = data['data']['enroll-secret']
|
||||
MAINIP = data['data']['mainip']
|
||||
|
||||
STATICFILE = '/opt/so/saltstack/pillar/static.sls'
|
||||
AUTHFILE = '/opt/so/saltstack/pillar/auth.sls'
|
||||
|
||||
if MINIONID.split('_')[-1] in ['master','eval','fleet']:
|
||||
if ACTION == 'enablefleet':
|
||||
logging.info('so/fleet enablefleet reactor')
|
||||
|
||||
# Enable Fleet
|
||||
for line in fileinput.input(STATICFILE, inplace=True):
|
||||
if ROLE == 'so-fleet':
|
||||
line = re.sub(r'fleet_node: \S*', f"fleet_node: True", line.rstrip())
|
||||
else:
|
||||
line = re.sub(r'fleet_master: \S*', f"fleet_master: True", line.rstrip())
|
||||
print(line)
|
||||
|
||||
# Update the enroll secret in the auth pillar
|
||||
for line in fileinput.input(AUTHFILE, inplace=True):
|
||||
line = re.sub(r'fleet_enroll-secret: \S*', f"fleet_enroll-secret: {ESECRET}", line.rstrip())
|
||||
print(line)
|
||||
|
||||
# Update the Fleet host in the static pillar
|
||||
for line in fileinput.input(STATICFILE, inplace=True):
|
||||
line = re.sub(r'fleet_hostname: \S*', f"fleet_hostname: {HOSTNAME}", line.rstrip())
|
||||
print(line)
|
||||
|
||||
# Update the Fleet IP in the static pillar
|
||||
for line in fileinput.input(STATICFILE, inplace=True):
|
||||
line = re.sub(r'fleet_ip: \S*', f"fleet_ip: {MAINIP}", line.rstrip())
|
||||
print(line)
|
||||
|
||||
if ACTION == 'genpackages':
|
||||
logging.info('so/fleet genpackages reactor')
|
||||
|
||||
# Run Docker container that will build the packages
|
||||
gen_packages = subprocess.run(["docker", "run","--rm", "--mount", "type=bind,source=/opt/so/saltstack/salt/fleet/packages,target=/output", \
|
||||
"--mount", "type=bind,source=/etc/ssl/certs/intca.crt,target=/var/launcher/launcher.crt", "docker.io/soshybridhunter/so-fleet-launcher:HH1.1.0", \
|
||||
f"{ESECRET}", f"{HOSTNAME}:8090"], stdout=subprocess.PIPE, encoding='ascii')
|
||||
|
||||
# Update the 'packages-built' timestamp on the webpage (stored in the static pillar)
|
||||
for line in fileinput.input(STATICFILE, inplace=True):
|
||||
line = re.sub(r'fleet_packages-timestamp: \S*', f"fleet_packages-timestamp: {strftime('%Y-%m-%d-%H:%M', gmtime())}", line.rstrip())
|
||||
print(line)
|
||||
|
||||
return {}
|
||||
18
salt/reactor/zeek.sls
Normal file
18
salt/reactor/zeek.sls
Normal file
@@ -0,0 +1,18 @@
|
||||
#!py
|
||||
|
||||
import logging
|
||||
import salt.client
|
||||
local = salt.client.LocalClient()
|
||||
|
||||
def run():
|
||||
minionid = data['id']
|
||||
zeek_restart = data['data']['zeek_restart']
|
||||
|
||||
logging.info('zeek_reactor: zeek_need_restarted:%s on:%s' % (zeek_restart, minionid))
|
||||
if zeek_restart:
|
||||
local.cmd(minionid, 'healthcheck.docker_stop', ['so-zeek'])
|
||||
local.cmd(minionid, 'state.apply', ['zeek'])
|
||||
|
||||
# __salt__['telegraf.send']('healthcheck zeek_restarted=%s' % str(zeek_restarted))
|
||||
|
||||
return {}
|
||||
@@ -27,15 +27,15 @@ dockerregistryconf:
|
||||
- source: salt://registry/etc/config.yml
|
||||
|
||||
# Copy the registry script
|
||||
dockerregistrybuild:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/docker-registry/so-buildregistry
|
||||
- source: salt://registry/bin/so-buildregistry
|
||||
- mode: 755
|
||||
#dockerregistrybuild:
|
||||
# file.managed:
|
||||
# - name: /opt/so/conf/docker-registry/so-buildregistry
|
||||
# - source: salt://registry/bin/so-buildregistry
|
||||
# - mode: 755
|
||||
|
||||
dockerexpandregistry:
|
||||
cmd.run:
|
||||
- name: /opt/so/conf/docker-registry/so-buildregistry
|
||||
#dockerexpandregistry:
|
||||
# cmd.run:
|
||||
# - name: /opt/so/conf/docker-registry/so-buildregistry
|
||||
|
||||
# Install the registry container
|
||||
so-dockerregistry:
|
||||
|
||||
25
salt/salt/beacons.sls
Normal file
25
salt/salt/beacons.sls
Normal file
@@ -0,0 +1,25 @@
|
||||
{% set CHECKS = salt['pillar.get']('healthcheck:checks', {}) %}
|
||||
{% set ENABLED = salt['pillar.get']('healthcheck:enabled', False) %}
|
||||
{% set SCHEDULE = salt['pillar.get']('healthcheck:schedule', 30) %}
|
||||
|
||||
include:
|
||||
- salt
|
||||
|
||||
{% if CHECKS and ENABLED %}
|
||||
salt_beacons:
|
||||
file.managed:
|
||||
- name: /etc/salt/minion.d/beacons.conf
|
||||
- source: salt://salt/files/beacons.conf.jinja
|
||||
- template: jinja
|
||||
- defaults:
|
||||
CHECKS: {{ CHECKS }}
|
||||
SCHEDULE: {{ SCHEDULE }}
|
||||
- watch_in:
|
||||
- service: salt_minion_service
|
||||
{% else %}
|
||||
salt_beacons:
|
||||
file.absent:
|
||||
- name: /etc/salt/minion.d/beacons.conf
|
||||
- watch_in:
|
||||
- service: salt_minion_service
|
||||
{% endif %}
|
||||
8
salt/salt/files/beacons.conf.jinja
Normal file
8
salt/salt/files/beacons.conf.jinja
Normal file
@@ -0,0 +1,8 @@
|
||||
{% if CHECKS -%}
|
||||
beacons:
|
||||
{%- for check in CHECKS %}
|
||||
{{ check }}:
|
||||
- disable_during_state_run: True
|
||||
- interval: {{ SCHEDULE }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
4
salt/salt/init.sls
Normal file
4
salt/salt/init.sls
Normal file
@@ -0,0 +1,4 @@
|
||||
salt_minion_service:
|
||||
service.running:
|
||||
- name: salt-minion
|
||||
- enable: True
|
||||
78
salt/soc/files/kratos/kratos.yaml
Normal file
78
salt/soc/files/kratos/kratos.yaml
Normal file
@@ -0,0 +1,78 @@
|
||||
{%- set WEBACCESS = salt['pillar.get']('kratos:redirect', '') -%}
|
||||
{%- set KRATOSKEY = salt['pillar.get']('kratos:kratoskey', '') -%}
|
||||
|
||||
selfservice:
|
||||
strategies:
|
||||
password:
|
||||
enabled: true
|
||||
|
||||
verify:
|
||||
return_to: https://{{ WEBACCESS }}/
|
||||
|
||||
logout:
|
||||
redirect_to: https://{{ WEBACCESS }}/login/
|
||||
|
||||
login:
|
||||
request_lifespan: 10m
|
||||
after:
|
||||
password:
|
||||
-
|
||||
job: session
|
||||
-
|
||||
job: redirect
|
||||
config:
|
||||
default_redirect_url: https://{{ WEBACCESS }}/
|
||||
allow_user_defined_redirect: true
|
||||
|
||||
registration:
|
||||
request_lifespan: 10m
|
||||
after:
|
||||
password:
|
||||
-
|
||||
job: verify
|
||||
-
|
||||
job: session
|
||||
-
|
||||
job: redirect
|
||||
config:
|
||||
default_redirect_url: https://{{ WEBACCESS }}/
|
||||
allow_user_defined_redirect: true
|
||||
|
||||
log:
|
||||
level: debug
|
||||
format: json
|
||||
|
||||
secrets:
|
||||
session:
|
||||
- {{ KRATOSKEY }}
|
||||
|
||||
urls:
|
||||
login_ui: https://{{ WEBACCESS }}/login/
|
||||
registration_ui: https://{{ WEBACCESS }}/login/
|
||||
error_ui: https://{{ WEBACCESS }}/login/
|
||||
profile_ui: https://{{ WEBACCESS }}/
|
||||
verify_ui: https://{{ WEBACCESS }}/
|
||||
mfa_ui: https://{{ WEBACCESS }}/
|
||||
|
||||
self:
|
||||
public: https://{{ WEBACCESS }}/auth/
|
||||
admin: https://{{ WEBACCESS }}/kratos/
|
||||
default_return_to: https://{{ WEBACCESS }}/
|
||||
whitelisted_return_to_domains:
|
||||
- http://127.0.0.1
|
||||
|
||||
hashers:
|
||||
argon2:
|
||||
parallelism: 2
|
||||
memory: 16384
|
||||
iterations: 3
|
||||
salt_length: 16
|
||||
key_length: 32
|
||||
|
||||
identity:
|
||||
traits:
|
||||
default_schema_url: file:///kratos-conf/schema.json
|
||||
|
||||
courier:
|
||||
smtp:
|
||||
connection_uri: smtps://{{ WEBACCESS }}:25
|
||||
28
salt/soc/files/kratos/schema.json
Normal file
28
salt/soc/files/kratos/schema.json
Normal file
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"$id": "securityonion.schema.json",
|
||||
"$schema": "http://json-schema.org/draft-07/schema#",
|
||||
"title": "Person",
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"email": {
|
||||
"type": "string",
|
||||
"format": "email",
|
||||
"title": "E-Mail",
|
||||
"minLength": 6,
|
||||
"ory.sh/kratos": {
|
||||
"credentials": {
|
||||
"password": {
|
||||
"identifier": true
|
||||
}
|
||||
},
|
||||
"verification": {
|
||||
"via": "email"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"required": [
|
||||
"email"
|
||||
],
|
||||
"additionalProperties": false
|
||||
}
|
||||
23
salt/soc/files/soc/changes.json
Normal file
23
salt/soc/files/soc/changes.json
Normal file
@@ -0,0 +1,23 @@
|
||||
{
|
||||
"title": "Introducing Hybrid Hunter 1.2.1 Beta",
|
||||
"changes": [
|
||||
{ "summary": "New authentication framework" },
|
||||
{ "summary": "New Logstash pipeline setup. Now uses multiple pipelines." },
|
||||
{ "summary": "New Master + Search node type and well as a Heavy Node type in the install." },
|
||||
{ "summary": "Change all nodes to point to the docker registry on the Master. This cuts down on the calls to dockerhub." },
|
||||
{ "summary": "Upgraded to Zeek 3.0" },
|
||||
{ "summary": "Upgraded to Elastic 7.6" },
|
||||
{ "summary": "New SO Start | Stop | Restart scripts for all components (eg. `so-playbook-restart`)." },
|
||||
{ "summary": "BPF support for Suricata (NIDS), Steno (PCAP) & Zeek (<a target='new' href='https://github.com/Security-Onion-Solutions/securityonion-saltstack/wiki/BPF'>More Info</a>)." },
|
||||
{ "summary": "Updated Domain Stats & Frequency Server containers to Python3 & created new Salt states for them." },
|
||||
{ "summary": "Added so-status script which gives an easy to read look at container status." },
|
||||
{ "summary": "Manage threshold.conf for Suricata using the thresholding pillar." },
|
||||
{ "summary": "The ISO now includes all the docker containers for faster install speeds." },
|
||||
{ "summary": "You now set the password for the onion account during the iso install. This account is temporary and will be removed after so-setup." },
|
||||
{ "summary": "Updated Helix parsers for better compatibility." },
|
||||
{ "summary": "Updated telegraf docker to include curl and jq." },
|
||||
{ "summary": "CVE-2020-0601 Zeek Detection Script." },
|
||||
{ "summary": "ISO Install now prompts you to create a password for the onion user during imaging. This account gets disabled during setup." },
|
||||
{ "summary": "Check out the Hybrid Hunter Quick Start Guide." }
|
||||
]
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user