mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-01-12 03:03:09 +01:00
Compare commits
1 Commits
kilo
...
2.4.60-202
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b658c82cdc |
32
.github/workflows/close-threads.yml
vendored
32
.github/workflows/close-threads.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: 'Close Threads'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '50 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
discussions: write
|
||||
|
||||
concurrency:
|
||||
group: lock-threads
|
||||
|
||||
jobs:
|
||||
close-threads:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
days-before-issue-stale: -1
|
||||
days-before-issue-close: 60
|
||||
stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution."
|
||||
close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available."
|
||||
days-before-pr-stale: 45
|
||||
days-before-pr-close: 60
|
||||
stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes."
|
||||
close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked."
|
||||
19
.github/workflows/lock-threads.yml
vendored
19
.github/workflows/lock-threads.yml
vendored
@@ -2,7 +2,7 @@ name: 'Lock Threads'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '50 2 * * *'
|
||||
- cron: '50 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -14,6 +14,23 @@ concurrency:
|
||||
group: lock-threads
|
||||
|
||||
jobs:
|
||||
close-threads:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
days-before-issue-stale: -1
|
||||
days-before-issue-close: 60
|
||||
stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution."
|
||||
close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available."
|
||||
days-before-pr-stale: 45
|
||||
days-before-pr-close: 60
|
||||
stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes."
|
||||
close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked."
|
||||
|
||||
lock-threads:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
@@ -43,6 +43,8 @@ base:
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- kratos.soc_kratos
|
||||
@@ -59,6 +61,8 @@ base:
|
||||
- elastalert.adv_elastalert
|
||||
- backup.soc_backup
|
||||
- backup.adv_backup
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
@@ -104,6 +108,8 @@ base:
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
@@ -159,6 +165,8 @@ base:
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
@@ -254,6 +262,8 @@ base:
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- backup.soc_backup
|
||||
|
||||
@@ -34,6 +34,7 @@
|
||||
'suricata',
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
@@ -100,6 +101,7 @@
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
],
|
||||
@@ -121,6 +123,7 @@
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
],
|
||||
@@ -154,6 +157,7 @@
|
||||
'healthcheck',
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
@@ -196,6 +200,10 @@
|
||||
],
|
||||
}, grain='role') %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
{% do allowed_states.append('mysql') %}
|
||||
{% endif %}
|
||||
|
||||
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('zeek') %}
|
||||
{%- endif %}
|
||||
@@ -221,6 +229,10 @@
|
||||
{% do allowed_states.append('elastalert') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% do allowed_states.append('playbook') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% do allowed_states.append('logstash') %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
bpf:
|
||||
pcap:
|
||||
description: List of BPF filters to apply to Stenographer.
|
||||
description: List of BPF filters to apply to PCAP.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
|
||||
@@ -53,10 +53,13 @@ container_list() {
|
||||
"so-kibana"
|
||||
"so-kratos"
|
||||
"so-logstash"
|
||||
"so-mysql"
|
||||
"so-nginx"
|
||||
"so-pcaptools"
|
||||
"so-playbook"
|
||||
"so-redis"
|
||||
"so-soc"
|
||||
"so-soctopus"
|
||||
"so-steno"
|
||||
"so-strelka-backend"
|
||||
"so-strelka-filestream"
|
||||
|
||||
@@ -49,6 +49,10 @@ if [ "$CONTINUE" == "y" ]; then
|
||||
sed -i "s|$OLD_IP|$NEW_IP|g" $file
|
||||
done
|
||||
|
||||
echo "Granting MySQL root user permissions on $NEW_IP"
|
||||
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null
|
||||
echo "Removing MySQL root user from $OLD_IP"
|
||||
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null
|
||||
echo "Updating Kibana dashboards"
|
||||
salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True
|
||||
|
||||
|
||||
@@ -122,7 +122,6 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
@@ -155,11 +154,15 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
|
||||
@@ -207,6 +210,7 @@ RESULT=0
|
||||
CONTAINER_IDS=$(docker ps -q)
|
||||
exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary
|
||||
exclude_container so-idstools # ignore due to known issues and noisy logging
|
||||
exclude_container so-playbook # ignore due to several playbook known issues
|
||||
|
||||
for container_id in $CONTAINER_IDS; do
|
||||
container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names")
|
||||
|
||||
@@ -67,6 +67,13 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-mysql':
|
||||
final_octet: 30
|
||||
port_bindings:
|
||||
- 0.0.0.0:3306:3306
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-nginx':
|
||||
final_octet: 31
|
||||
port_bindings:
|
||||
@@ -84,6 +91,13 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-playbook':
|
||||
final_octet: 32
|
||||
port_bindings:
|
||||
- 0.0.0.0:3000:3000
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-redis':
|
||||
final_octet: 33
|
||||
port_bindings:
|
||||
@@ -104,6 +118,13 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-soctopus':
|
||||
final_octet: 35
|
||||
port_bindings:
|
||||
- 0.0.0.0:7000:7000
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-strelka-backend':
|
||||
final_octet: 36
|
||||
custom_bind_mounts: []
|
||||
|
||||
@@ -46,11 +46,14 @@ docker:
|
||||
so-kibana: *dockerOptions
|
||||
so-kratos: *dockerOptions
|
||||
so-logstash: *dockerOptions
|
||||
so-mysql: *dockerOptions
|
||||
so-nginx: *dockerOptions
|
||||
so-nginx-fleet-node: *dockerOptions
|
||||
so-playbook: *dockerOptions
|
||||
so-redis: *dockerOptions
|
||||
so-sensoroni: *dockerOptions
|
||||
so-soc: *dockerOptions
|
||||
so-soctopus: *dockerOptions
|
||||
so-strelka-backend: *dockerOptions
|
||||
so-strelka-filestream: *dockerOptions
|
||||
so-strelka-frontend: *dockerOptions
|
||||
|
||||
@@ -227,113 +227,6 @@ elasticsearch:
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
so-logs-soc:
|
||||
close: 30
|
||||
delete: 365
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- agent-mappings
|
||||
- dtc-agent-mappings
|
||||
- base-mappings
|
||||
- dtc-base-mappings
|
||||
- client-mappings
|
||||
- dtc-client-mappings
|
||||
- container-mappings
|
||||
- destination-mappings
|
||||
- dtc-destination-mappings
|
||||
- pb-override-destination-mappings
|
||||
- dll-mappings
|
||||
- dns-mappings
|
||||
- dtc-dns-mappings
|
||||
- ecs-mappings
|
||||
- dtc-ecs-mappings
|
||||
- error-mappings
|
||||
- event-mappings
|
||||
- dtc-event-mappings
|
||||
- file-mappings
|
||||
- dtc-file-mappings
|
||||
- group-mappings
|
||||
- host-mappings
|
||||
- dtc-host-mappings
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
- dtc-observer-mappings
|
||||
- organization-mappings
|
||||
- package-mappings
|
||||
- process-mappings
|
||||
- dtc-process-mappings
|
||||
- related-mappings
|
||||
- rule-mappings
|
||||
- dtc-rule-mappings
|
||||
- server-mappings
|
||||
- service-mappings
|
||||
- dtc-service-mappings
|
||||
- source-mappings
|
||||
- dtc-source-mappings
|
||||
- pb-override-source-mappings
|
||||
- threat-mappings
|
||||
- tls-mappings
|
||||
- url-mappings
|
||||
- user_agent-mappings
|
||||
- dtc-user_agent-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
data_stream: {}
|
||||
index_patterns:
|
||||
- logs-soc-so*
|
||||
priority: 500
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
dynamic_templates:
|
||||
- strings_as_keyword:
|
||||
mapping:
|
||||
ignore_above: 1024
|
||||
type: keyword
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-soc-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 30s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 30d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-common:
|
||||
close: 30
|
||||
delete: 365
|
||||
|
||||
@@ -57,11 +57,10 @@
|
||||
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" } },
|
||||
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" }},
|
||||
{ "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
|
||||
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" }},
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
{%- endraw %}
|
||||
{%- if HIGHLANDER %}
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"field": "_security",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
},
|
||||
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
|
||||
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
|
||||
|
||||
@@ -1,389 +0,0 @@
|
||||
{
|
||||
"description": "Pipeline for pfSense",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "ecs.version",
|
||||
"value": "8.10.0"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "observer.vendor",
|
||||
"value": "netgate"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "observer.type",
|
||||
"value": "firewall"
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message",
|
||||
"target_field": "event.original"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.kind",
|
||||
"value": "event"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.timezone",
|
||||
"value": "{{_tmp.tz_offset}}",
|
||||
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"description": "Parse syslog header",
|
||||
"field": "event.original",
|
||||
"patterns": [
|
||||
"^(%{ECS_SYSLOG_PRI})?%{TIMESTAMP} %{GREEDYDATA:message}"
|
||||
],
|
||||
"pattern_definitions": {
|
||||
"ECS_SYSLOG_PRI": "<%{NONNEGINT:log.syslog.priority:long}>(\\d )?",
|
||||
"TIMESTAMP": "(?:%{BSD_TIMESTAMP_FORMAT}|%{SYSLOG_TIMESTAMP_FORMAT})",
|
||||
"BSD_TIMESTAMP_FORMAT": "%{SYSLOGTIMESTAMP:_tmp.timestamp}(%{SPACE}%{BSD_PROCNAME}|%{SPACE}%{OBSERVER}%{SPACE}%{BSD_PROCNAME})(\\[%{POSINT:process.pid:long}\\])?:",
|
||||
"BSD_PROCNAME": "(?:\\b%{NAME:process.name}|\\(%{NAME:process.name}\\))",
|
||||
"NAME": "[[[:alnum:]]_-]+",
|
||||
"SYSLOG_TIMESTAMP_FORMAT": "%{TIMESTAMP_ISO8601:_tmp.timestamp8601}%{SPACE}%{OBSERVER}%{SPACE}%{PROCESS}%{SPACE}(%{POSINT:process.pid:long}|-) - (-|%{META})",
|
||||
"TIMESTAMP_ISO8601": "%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE:event.timezone}?",
|
||||
"OBSERVER": "(?:%{IP:observer.ip}|%{HOSTNAME:observer.name})",
|
||||
"PROCESS": "(\\(%{DATA:process.name}\\)|(?:%{UNIXPATH}*/)?%{BASEPATH:process.name})",
|
||||
"BASEPATH": "[[[:alnum:]]_%!$@:.,+~-]+",
|
||||
"META": "\\[[^\\]]*\\]"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"if": "ctx._tmp.timestamp8601 != null",
|
||||
"field": "_tmp.timestamp8601",
|
||||
"target_field": "@timestamp",
|
||||
"formats": [
|
||||
"ISO8601"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
|
||||
"field": "_tmp.timestamp",
|
||||
"target_field": "@timestamp",
|
||||
"formats": [
|
||||
"MMM d HH:mm:ss",
|
||||
"MMM d HH:mm:ss",
|
||||
"MMM dd HH:mm:ss"
|
||||
],
|
||||
"timezone": "{{ event.timezone }}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"description": "Set Event Provider",
|
||||
"field": "process.name",
|
||||
"patterns": [
|
||||
"^%{HYPHENATED_WORDS:event.provider}"
|
||||
],
|
||||
"pattern_definitions": {
|
||||
"HYPHENATED_WORDS": "\\b[A-Za-z0-9_]+(-[A-Za-z_]+)*\\b"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-firewall",
|
||||
"if": "ctx.event.provider == 'filterlog'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-openvpn",
|
||||
"if": "ctx.event.provider == 'openvpn'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-ipsec",
|
||||
"if": "ctx.event.provider == 'charon'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-dhcp",
|
||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-unbound",
|
||||
"if": "ctx.event.provider == 'unbound'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-haproxy",
|
||||
"if": "ctx.event.provider == 'haproxy'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-php-fpm",
|
||||
"if": "ctx.event.provider == 'php-fpm'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-squid",
|
||||
"if": "ctx.event.provider == 'squid'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-suricata",
|
||||
"if": "ctx.event.provider == 'suricata'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"drop": {
|
||||
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"suricata\"].contains(ctx.event?.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "event.category",
|
||||
"value": "network",
|
||||
"if": "ctx.network != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"convert": {
|
||||
"field": "source.address",
|
||||
"target_field": "source.ip",
|
||||
"type": "ip",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"convert": {
|
||||
"field": "destination.address",
|
||||
"target_field": "destination.ip",
|
||||
"type": "ip",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.type",
|
||||
"value": "ipv6",
|
||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.type",
|
||||
"value": "ipv4",
|
||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
|
||||
}
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"field": "source.ip",
|
||||
"target_field": "source.geo",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination.geo",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"ignore_missing": true,
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"field": "source.ip",
|
||||
"target_field": "source.as",
|
||||
"properties": [
|
||||
"asn",
|
||||
"organization_name"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination.as",
|
||||
"properties": [
|
||||
"asn",
|
||||
"organization_name"
|
||||
],
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "source.as.asn",
|
||||
"target_field": "source.as.number",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "source.as.organization_name",
|
||||
"target_field": "source.as.organization.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "destination.as.asn",
|
||||
"target_field": "destination.as.number",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "destination.as.organization_name",
|
||||
"target_field": "destination.as.organization.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"community_id": {
|
||||
"target_field": "network.community_id",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"field": "observer.ingress.interface.name",
|
||||
"patterns": [
|
||||
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
|
||||
],
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.vlan.id",
|
||||
"copy_from": "observer.ingress.vlan.id",
|
||||
"ignore_empty_value": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.ip",
|
||||
"value": "{{destination.ip}}",
|
||||
"allow_duplicates": false,
|
||||
"if": "ctx.destination?.ip != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.ip",
|
||||
"value": "{{source.ip}}",
|
||||
"allow_duplicates": false,
|
||||
"if": "ctx.source?.ip != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.ip",
|
||||
"value": "{{source.nat.ip}}",
|
||||
"allow_duplicates": false,
|
||||
"if": "ctx.source?.nat?.ip != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.hosts",
|
||||
"value": "{{destination.domain}}",
|
||||
"if": "ctx.destination?.domain != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.user",
|
||||
"value": "{{user.name}}",
|
||||
"if": "ctx.user?.name != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.direction",
|
||||
"value": "{{network.direction}}bound",
|
||||
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"_tmp"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"description": "This script processor iterates over the whole document to remove fields with null values.",
|
||||
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": "event.original",
|
||||
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log@custom",
|
||||
"ignore_missing_pipeline": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"on_failure": [
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"_tmp"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.kind",
|
||||
"value": "pipeline_error"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "error.message",
|
||||
"value": "{{{ _ingest.on_failure_message }}}"
|
||||
}
|
||||
}
|
||||
],
|
||||
"_meta": {
|
||||
"managed_by": "fleet",
|
||||
"managed": true,
|
||||
"package": {
|
||||
"name": "pfsense"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
{
|
||||
"description": "Pipeline for parsing pfSense Suricata logs.",
|
||||
"processors": [
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "suricata.common"
|
||||
}
|
||||
}
|
||||
],
|
||||
"on_failure": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.kind",
|
||||
"value": "pipeline_error"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "error.message",
|
||||
"value": "{{{ _ingest.on_failure_message }}}"
|
||||
}
|
||||
}
|
||||
],
|
||||
"_meta": {
|
||||
"managed_by": "fleet",
|
||||
"managed": true,
|
||||
"package": {
|
||||
"name": "pfsense"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,6 +13,7 @@
|
||||
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
|
||||
{ "lowercase": { "field": "network.transport", "ignore_failure": true } },
|
||||
{ "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
|
||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
|
||||
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
|
||||
|
||||
@@ -9,9 +9,11 @@
|
||||
'so-influxdb',
|
||||
'so-kibana',
|
||||
'so-kratos',
|
||||
'so-mysql',
|
||||
'so-nginx',
|
||||
'so-redis',
|
||||
'so-soc',
|
||||
'so-soctopus',
|
||||
'so-strelka-coordinator',
|
||||
'so-strelka-gatekeeper',
|
||||
'so-strelka-frontend',
|
||||
@@ -30,9 +32,11 @@
|
||||
'so-kibana',
|
||||
'so-kratos',
|
||||
'so-logstash',
|
||||
'so-mysql',
|
||||
'so-nginx',
|
||||
'so-redis',
|
||||
'so-soc',
|
||||
'so-soctopus',
|
||||
'so-strelka-coordinator',
|
||||
'so-strelka-gatekeeper',
|
||||
'so-strelka-frontend',
|
||||
|
||||
@@ -98,11 +98,19 @@ firewall:
|
||||
tcp:
|
||||
- 7788
|
||||
udp: []
|
||||
mysql:
|
||||
tcp:
|
||||
- 3306
|
||||
udp: []
|
||||
nginx:
|
||||
tcp:
|
||||
- 80
|
||||
- 443
|
||||
udp: []
|
||||
playbook:
|
||||
tcp:
|
||||
- 3000
|
||||
udp: []
|
||||
redis:
|
||||
tcp:
|
||||
- 6379
|
||||
@@ -170,6 +178,8 @@ firewall:
|
||||
hostgroups:
|
||||
eval:
|
||||
portgroups:
|
||||
- playbook
|
||||
- mysql
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
@@ -353,6 +363,8 @@ firewall:
|
||||
hostgroups:
|
||||
manager:
|
||||
portgroups:
|
||||
- playbook
|
||||
- mysql
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
@@ -547,6 +559,8 @@ firewall:
|
||||
hostgroups:
|
||||
managersearch:
|
||||
portgroups:
|
||||
- playbook
|
||||
- mysql
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
@@ -742,6 +756,8 @@ firewall:
|
||||
- all
|
||||
standalone:
|
||||
portgroups:
|
||||
- playbook
|
||||
- mysql
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
|
||||
@@ -121,9 +121,15 @@ firewall:
|
||||
localrules:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
mysql:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
nginx:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
playbook:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
redis:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
|
||||
@@ -117,6 +117,51 @@ rules_dir:
|
||||
- group: socore
|
||||
- makedirs: True
|
||||
|
||||
{% if STRELKAMERGED.rules.enabled %}
|
||||
strelkarepos:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/strelka/repos.txt
|
||||
- source: salt://strelka/rules/repos.txt.jinja
|
||||
- template: jinja
|
||||
- defaults:
|
||||
STRELKAREPOS: {{ STRELKAMERGED.rules.repos }}
|
||||
- makedirs: True
|
||||
strelka-yara-update:
|
||||
{% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %}
|
||||
cron.present:
|
||||
{% else %}
|
||||
cron.absent:
|
||||
{% endif %}
|
||||
- user: socore
|
||||
- name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1'
|
||||
- identifier: strelka-yara-update
|
||||
- hour: '7'
|
||||
- minute: '1'
|
||||
strelka-yara-download:
|
||||
{% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %}
|
||||
cron.present:
|
||||
{% else %}
|
||||
cron.absent:
|
||||
{% endif %}
|
||||
- user: socore
|
||||
- name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1'
|
||||
- identifier: strelka-yara-download
|
||||
- hour: '7'
|
||||
- minute: '1'
|
||||
{% if not GLOBALS.airgap %}
|
||||
update_yara_rules:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-yara-update
|
||||
- onchanges:
|
||||
- file: yara_update_scripts
|
||||
download_yara_rules:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-yara-download
|
||||
- onchanges:
|
||||
- file: yara_update_scripts
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
|
||||
@@ -20,6 +20,10 @@ manager:
|
||||
description: String of hosts to ignore the proxy settings for.
|
||||
global: True
|
||||
helpLink: proxy.html
|
||||
playbook:
|
||||
description: Enable playbook 1=enabled 0=disabled.
|
||||
global: True
|
||||
helpLink: playbook.html
|
||||
proxy:
|
||||
description: Proxy server to use for updates.
|
||||
global: True
|
||||
|
||||
@@ -286,6 +286,12 @@ function add_sensor_to_minion() {
|
||||
echo " " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_playbook_to_minion() {
|
||||
printf '%s\n'\
|
||||
"playbook:"\
|
||||
" enabled: True"\
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_elastalert_to_minion() {
|
||||
printf '%s\n'\
|
||||
@@ -347,6 +353,13 @@ function add_nginx_to_minion() {
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_soctopus_to_minion() {
|
||||
printf '%s\n'\
|
||||
"soctopus:"\
|
||||
" enabled: True"\
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_soc_to_minion() {
|
||||
printf '%s\n'\
|
||||
"soc:"\
|
||||
@@ -361,6 +374,13 @@ function add_registry_to_minion() {
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_mysql_to_minion() {
|
||||
printf '%s\n'\
|
||||
"mysql:"\
|
||||
" enabled: True"\
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_kratos_to_minion() {
|
||||
printf '%s\n'\
|
||||
"kratos:"\
|
||||
@@ -436,13 +456,16 @@ function createEVAL() {
|
||||
add_elasticsearch_to_minion
|
||||
add_sensor_to_minion
|
||||
add_strelka_to_minion
|
||||
add_playbook_to_minion
|
||||
add_elastalert_to_minion
|
||||
add_kibana_to_minion
|
||||
add_telegraf_to_minion
|
||||
add_influxdb_to_minion
|
||||
add_nginx_to_minion
|
||||
add_soctopus_to_minion
|
||||
add_soc_to_minion
|
||||
add_registry_to_minion
|
||||
add_mysql_to_minion
|
||||
add_kratos_to_minion
|
||||
add_idstools_to_minion
|
||||
add_elastic_fleet_package_registry_to_minion
|
||||
@@ -455,14 +478,17 @@ function createSTANDALONE() {
|
||||
add_logstash_to_minion
|
||||
add_sensor_to_minion
|
||||
add_strelka_to_minion
|
||||
add_playbook_to_minion
|
||||
add_elastalert_to_minion
|
||||
add_kibana_to_minion
|
||||
add_redis_to_minion
|
||||
add_telegraf_to_minion
|
||||
add_influxdb_to_minion
|
||||
add_nginx_to_minion
|
||||
add_soctopus_to_minion
|
||||
add_soc_to_minion
|
||||
add_registry_to_minion
|
||||
add_mysql_to_minion
|
||||
add_kratos_to_minion
|
||||
add_idstools_to_minion
|
||||
add_elastic_fleet_package_registry_to_minion
|
||||
@@ -471,14 +497,17 @@ function createSTANDALONE() {
|
||||
function createMANAGER() {
|
||||
add_elasticsearch_to_minion
|
||||
add_logstash_to_minion
|
||||
add_playbook_to_minion
|
||||
add_elastalert_to_minion
|
||||
add_kibana_to_minion
|
||||
add_redis_to_minion
|
||||
add_telegraf_to_minion
|
||||
add_influxdb_to_minion
|
||||
add_nginx_to_minion
|
||||
add_soctopus_to_minion
|
||||
add_soc_to_minion
|
||||
add_registry_to_minion
|
||||
add_mysql_to_minion
|
||||
add_kratos_to_minion
|
||||
add_idstools_to_minion
|
||||
add_elastic_fleet_package_registry_to_minion
|
||||
@@ -487,14 +516,17 @@ function createMANAGER() {
|
||||
function createMANAGERSEARCH() {
|
||||
add_elasticsearch_to_minion
|
||||
add_logstash_to_minion
|
||||
add_playbook_to_minion
|
||||
add_elastalert_to_minion
|
||||
add_kibana_to_minion
|
||||
add_redis_to_minion
|
||||
add_telegraf_to_minion
|
||||
add_influxdb_to_minion
|
||||
add_nginx_to_minion
|
||||
add_soctopus_to_minion
|
||||
add_soc_to_minion
|
||||
add_registry_to_minion
|
||||
add_mysql_to_minion
|
||||
add_kratos_to_minion
|
||||
add_idstools_to_minion
|
||||
add_elastic_fleet_package_registry_to_minion
|
||||
|
||||
89
salt/mysql/config.sls
Normal file
89
salt/mysql/config.sls
Normal file
@@ -0,0 +1,89 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
|
||||
|
||||
# MySQL Setup
|
||||
mysqlpkgs:
|
||||
pkg.removed:
|
||||
- skip_suggestions: False
|
||||
- pkgs:
|
||||
{% if grains['os_family'] != 'RedHat' %}
|
||||
- python3-mysqldb
|
||||
{% else %}
|
||||
- python3-mysqlclient
|
||||
{% endif %}
|
||||
|
||||
mysqletcdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/mysql/etc
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
mysqlpiddir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/mysql/pid
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
mysqlcnf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/mysql/etc/my.cnf
|
||||
- source: salt://mysql/etc/my.cnf
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
mysqlpass:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/mysql/etc/mypass
|
||||
- source: salt://mysql/etc/mypass
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
- defaults:
|
||||
MYSQLPASS: {{ MYSQLPASS }}
|
||||
|
||||
mysqllogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/mysql
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
mysqldatadir:
|
||||
file.directory:
|
||||
- name: /nsm/mysql
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
mysql_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://mysql/tools/sbin
|
||||
- user: 939
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
#mysql_sbin_jinja:
|
||||
# file.recurse:
|
||||
# - name: /usr/sbin
|
||||
# - source: salt://mysql/tools/sbin_jinja
|
||||
# - user: 939
|
||||
# - group: 939
|
||||
# - file_mode: 755
|
||||
# - template: jinja
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
2
salt/mysql/defaults.yaml
Normal file
2
salt/mysql/defaults.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
mysql:
|
||||
enabled: False
|
||||
27
salt/mysql/disabled.sls
Normal file
27
salt/mysql/disabled.sls
Normal file
@@ -0,0 +1,27 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- mysql.sostatus
|
||||
|
||||
so-mysql:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-mysql_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-mysql$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
84
salt/mysql/enabled.sls
Normal file
84
salt/mysql/enabled.sls
Normal file
@@ -0,0 +1,84 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
|
||||
|
||||
include:
|
||||
- mysql.config
|
||||
- mysql.sostatus
|
||||
|
||||
{% if MYSQLPASS == None %}
|
||||
|
||||
mysql_password_none:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "MySQL Password Error - Not Starting MySQL"
|
||||
|
||||
{% else %}
|
||||
|
||||
so-mysql:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-mysql:{{ GLOBALS.so_version }}
|
||||
- hostname: so-mysql
|
||||
- user: socore
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-mysql'].ip }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
{% if DOCKER.containers['so-mysql'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-mysql'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-mysql'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- environment:
|
||||
- MYSQL_ROOT_HOST={{ GLOBALS.so_docker_gateway }}
|
||||
- MYSQL_ROOT_PASSWORD=/etc/mypass
|
||||
{% if DOCKER.containers['so-mysql'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-mysql'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- binds:
|
||||
- /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro
|
||||
- /opt/so/conf/mysql/etc/mypass:/etc/mypass
|
||||
- /nsm/mysql:/var/lib/mysql:rw
|
||||
- /opt/so/log/mysql:/var/log/mysql:rw
|
||||
{% if DOCKER.containers['so-mysql'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-mysql'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- cap_add:
|
||||
- SYS_NICE
|
||||
- watch:
|
||||
- file: mysqlcnf
|
||||
- file: mysqlpass
|
||||
- require:
|
||||
- file: mysqlcnf
|
||||
- file: mysqlpass
|
||||
{% endif %}
|
||||
|
||||
delete_so-mysql_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-mysql$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
32
salt/mysql/etc/my.cnf
Normal file
32
salt/mysql/etc/my.cnf
Normal file
@@ -0,0 +1,32 @@
|
||||
# For advice on how to change settings please see
|
||||
# http://dev.mysql.com/doc/refman/5.7/en/server-configuration-defaults.html
|
||||
|
||||
[mysqld]
|
||||
#
|
||||
# Remove leading # and set to the amount of RAM for the most important data
|
||||
# cache in MySQL. Start at 70% of total RAM for dedicated server, else 10%.
|
||||
# innodb_buffer_pool_size = 128M
|
||||
#
|
||||
# Remove leading # to turn on a very important data integrity option: logging
|
||||
# changes to the binary log between backups.
|
||||
# log_bin
|
||||
#
|
||||
# Remove leading # to set options mainly useful for reporting servers.
|
||||
# The server defaults are faster for transactions and fast SELECTs.
|
||||
# Adjust sizes as needed, experiment to find the optimal values.
|
||||
# join_buffer_size = 128M
|
||||
# sort_buffer_size = 2M
|
||||
# read_rnd_buffer_size = 2M
|
||||
|
||||
host_cache_size=0
|
||||
skip-name-resolve
|
||||
datadir=/var/lib/mysql
|
||||
socket=/var/lib/mysql/mysql.sock
|
||||
secure-file-priv=/var/lib/mysql-files
|
||||
user=socore
|
||||
|
||||
log-error=/var/log/mysql/mysqld.log
|
||||
pid-file=/var/run/mysqld/mysqld.pid
|
||||
|
||||
# Switch back to the native password module so that playbook can connect
|
||||
authentication_policy=mysql_native_password
|
||||
1
salt/mysql/etc/mypass
Normal file
1
salt/mysql/etc/mypass
Normal file
@@ -0,0 +1 @@
|
||||
{{ MYSQLPASS }}
|
||||
14
salt/mysql/init.sls
Normal file
14
salt/mysql/init.sls
Normal file
@@ -0,0 +1,14 @@
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'mysql/map.jinja' import MYSQLMERGED %}
|
||||
|
||||
include:
|
||||
{% if MYSQLMERGED.enabled %}
|
||||
- mysql.enabled
|
||||
{% else %}
|
||||
- mysql.disabled
|
||||
{% endif %}
|
||||
7
salt/mysql/map.jinja
Normal file
7
salt/mysql/map.jinja
Normal file
@@ -0,0 +1,7 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'mysql/defaults.yaml' as MYSQLDEFAULTS with context %}
|
||||
{% set MYSQLMERGED = salt['pillar.get']('mysql', MYSQLDEFAULTS.mysql, merge=True) %}
|
||||
4
salt/mysql/soc_mysql.yaml
Normal file
4
salt/mysql/soc_mysql.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
mysql:
|
||||
enabled:
|
||||
description: You can enable or disable MySQL.
|
||||
advanced: True
|
||||
21
salt/mysql/sostatus.sls
Normal file
21
salt/mysql/sostatus.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-mysql_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-mysql
|
||||
- unless: grep -q so-mysql /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
12
salt/mysql/tools/sbin/so-mysql-restart
Executable file
12
salt/mysql/tools/sbin/so-mysql-restart
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-restart mysql $1
|
||||
12
salt/mysql/tools/sbin/so-mysql-start
Executable file
12
salt/mysql/tools/sbin/so-mysql-start
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start mysql $1
|
||||
12
salt/mysql/tools/sbin/so-mysql-stop
Executable file
12
salt/mysql/tools/sbin/so-mysql-stop
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop mysql $1
|
||||
@@ -277,11 +277,38 @@ http {
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /playbook/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:3000/playbook/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
|
||||
location /soctopus/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:7000/;
|
||||
proxy_read_timeout 300;
|
||||
proxy_connect_timeout 300;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /kibana/app/soc/ {
|
||||
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
|
||||
}
|
||||
|
||||
location /kibana/app/soctopus/ {
|
||||
rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent;
|
||||
}
|
||||
|
||||
location /sensoroniagents/ {
|
||||
if ($http_authorization = "") {
|
||||
|
||||
19
salt/playbook/automation_user_create.sls
Normal file
19
salt/playbook/automation_user_create.sls
Normal file
@@ -0,0 +1,19 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
# This state will create the SecOps Automation user within Playbook
|
||||
|
||||
include:
|
||||
- playbook
|
||||
|
||||
wait_for_playbook:
|
||||
cmd.run:
|
||||
- name: until nc -z {{ GLOBALS.manager }} 3000; do sleep 1; done
|
||||
- timeout: 300
|
||||
|
||||
create_user:
|
||||
cmd.script:
|
||||
- source: salt://playbook/files/automation_user_create.sh
|
||||
- cwd: /root
|
||||
- template: jinja
|
||||
- onchanges:
|
||||
- cmd: wait_for_playbook
|
||||
120
salt/playbook/config.sls
Normal file
120
salt/playbook/config.sls
Normal file
@@ -0,0 +1,120 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
|
||||
{% set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') %}
|
||||
|
||||
|
||||
include:
|
||||
- mysql
|
||||
|
||||
create_playbookdbuser:
|
||||
mysql_user.present:
|
||||
- name: playbookdbuser
|
||||
- password: {{ PLAYBOOKPASS }}
|
||||
- host: "{{ DOCKER.range.split('/')[0] }}/255.255.255.0"
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
query_playbookdbuser_grants:
|
||||
mysql_query.run:
|
||||
- database: playbook
|
||||
- query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ DOCKER.range.split('/')[0] }}/255.255.255.0';"
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
query_updatwebhooks:
|
||||
mysql_query.run:
|
||||
- database: playbook
|
||||
- query: "update webhooks set url = 'http://{{ GLOBALS.manager_ip}}:7000/playbook/webhook' where project_id = 1"
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
query_updatename:
|
||||
mysql_query.run:
|
||||
- database: playbook
|
||||
- query: "update custom_fields set name = 'Custom Filter' where id = 21;"
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
query_updatepluginurls:
|
||||
mysql_query.run:
|
||||
- database: playbook
|
||||
- query: |-
|
||||
update settings set value =
|
||||
"--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess
|
||||
project: '1'
|
||||
convert_url: http://{{ GLOBALS.manager }}:7000/playbook/sigmac
|
||||
create_url: http://{{ GLOBALS.manager }}:7000/playbook/play"
|
||||
where id = 43
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
playbook_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://playbook/tools/sbin
|
||||
- user: 939
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
#playbook_sbin_jinja:
|
||||
# file.recurse:
|
||||
# - name: /usr/sbin
|
||||
# - source: salt://playbook/tools/sbin_jinja
|
||||
# - user: 939
|
||||
# - group: 939
|
||||
# - file_mode: 755
|
||||
# - template: jinja
|
||||
|
||||
playbooklogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/playbook
|
||||
- dir_mode: 775
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
playbookfilesdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/playbook/redmine-files
|
||||
- dir_mode: 775
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
{% if 'idh' in salt['cmd.shell']("ls /opt/so/saltstack/local/pillar/minions/|awk -F'_' {'print $2'}|awk -F'.' {'print $1'}").split() %}
|
||||
idh-plays:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/soctopus/sigma-import
|
||||
- source: salt://idh/plays
|
||||
- makedirs: True
|
||||
cmd.run:
|
||||
- name: so-playbook-import True
|
||||
- onchanges:
|
||||
- file: /opt/so/conf/soctopus/sigma-import
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
14
salt/playbook/db_init.sls
Normal file
14
salt/playbook/db_init.sls
Normal file
@@ -0,0 +1,14 @@
|
||||
|
||||
# This state will import the initial default playbook database.
|
||||
# If there is an existing playbook database, it will be overwritten - no backups are made.
|
||||
|
||||
include:
|
||||
- mysql
|
||||
|
||||
salt://playbook/files/playbook_db_init.sh:
|
||||
cmd.script:
|
||||
- cwd: /root
|
||||
- template: jinja
|
||||
|
||||
'sleep 5':
|
||||
cmd.run
|
||||
2
salt/playbook/defaults.yaml
Normal file
2
salt/playbook/defaults.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
playbook:
|
||||
enabled: False
|
||||
37
salt/playbook/disabled.sls
Normal file
37
salt/playbook/disabled.sls
Normal file
@@ -0,0 +1,37 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- playbook.sostatus
|
||||
|
||||
so-playbook:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-playbook_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-playbook$
|
||||
|
||||
so-playbook-sync_cron:
|
||||
cron.absent:
|
||||
- identifier: so-playbook-sync_cron
|
||||
- user: root
|
||||
|
||||
so-playbook-ruleupdate_cron:
|
||||
cron.absent:
|
||||
- identifier: so-playbook-ruleupdate_cron
|
||||
- user: root
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
93
salt/playbook/enabled.sls
Normal file
93
salt/playbook/enabled.sls
Normal file
@@ -0,0 +1,93 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') %}
|
||||
|
||||
include:
|
||||
- playbook.config
|
||||
- playbook.sostatus
|
||||
|
||||
{% if PLAYBOOKPASS == None %}
|
||||
|
||||
playbook_password_none:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "Playbook MySQL Password Error - Not Starting Playbook"
|
||||
|
||||
{% else %}
|
||||
|
||||
so-playbook:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-playbook:{{ GLOBALS.so_version }}
|
||||
- hostname: playbook
|
||||
- name: so-playbook
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-playbook'].ip }}
|
||||
- binds:
|
||||
- /opt/so/conf/playbook/redmine-files:/usr/src/redmine/files:rw
|
||||
- /opt/so/log/playbook:/playbook/log:rw
|
||||
{% if DOCKER.containers['so-playbook'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-playbook'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
{% if DOCKER.containers['so-playbook'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-playbook'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- environment:
|
||||
- REDMINE_DB_MYSQL={{ GLOBALS.manager }}
|
||||
- REDMINE_DB_DATABASE=playbook
|
||||
- REDMINE_DB_USERNAME=playbookdbuser
|
||||
- REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }}
|
||||
{% if DOCKER.containers['so-playbook'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-playbook'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-playbook'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
|
||||
delete_so-playbook_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-playbook$
|
||||
|
||||
so-playbook-sync_cron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1
|
||||
- identifier: so-playbook-sync_cron
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
|
||||
so-playbook-ruleupdate_cron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1
|
||||
- identifier: so-playbook-ruleupdate_cron
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '6'
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
49
salt/playbook/files/automation_user_create.sh
Normal file
49
salt/playbook/files/automation_user_create.sh
Normal file
@@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) -%}
|
||||
# {%- set automation_pass = salt['pillar.get']('secrets:playbook_automation', None) %}
|
||||
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
try_count=6
|
||||
interval=10
|
||||
|
||||
while [[ $try_count -le 6 ]]; do
|
||||
if docker top "so-playbook" &>/dev/null; then
|
||||
automation_group=6
|
||||
|
||||
# Create user and retrieve api_key and user_id from response
|
||||
mapfile -t automation_res < <(
|
||||
curl -s --location --request POST 'http://127.0.0.1:3000/playbook/users.json' --user "admin:{{ admin_pass }}" --header 'Content-Type: application/json' --data '{
|
||||
"user" : {
|
||||
"login" : "automation",
|
||||
"password": "{{ automation_pass }}",
|
||||
"firstname": "SecOps",
|
||||
"lastname": "Automation",
|
||||
"mail": "automation2@localhost.local"
|
||||
}
|
||||
}' | jq -r '.user.api_key, .user.id'
|
||||
)
|
||||
|
||||
automation_api_key=${automation_res[0]}
|
||||
automation_user_id=${automation_res[1]}
|
||||
|
||||
# Add user_id from newly created user to Automation group
|
||||
curl -s --location --request POST "http://127.0.0.1:3000/playbook/groups/${automation_group}/users.json" \
|
||||
--user "admin:{{ admin_pass }}" \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data "{
|
||||
\"user_id\" : ${automation_user_id}
|
||||
}"
|
||||
|
||||
# Update the Automation API key in the secrets pillar
|
||||
so-yaml.py remove $local_salt_dir/pillar/secrets.sls secrets.playbook_automation_api_key
|
||||
printf '%s\n'\
|
||||
" playbook_automation_api_key: $automation_api_key" >> $local_salt_dir/pillar/secrets.sls
|
||||
exit 0
|
||||
fi
|
||||
((try_count++))
|
||||
sleep "${interval}s"
|
||||
done
|
||||
|
||||
# Timeout exceeded, exit with non-zero exit code
|
||||
exit 1
|
||||
17
salt/playbook/files/playbook_db_init.sh
Normal file
17
salt/playbook/files/playbook_db_init.sh
Normal file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
|
||||
# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) %}
|
||||
. /usr/sbin/so-common
|
||||
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
|
||||
# Generate salt + hash for admin user
|
||||
admin_salt=$(get_random_value 32)
|
||||
admin_stage1_hash=$(echo -n '{{ admin_pass }}' | sha1sum | awk '{print $1}')
|
||||
admin_hash=$(echo -n "${admin_salt}${admin_stage1_hash}" | sha1sum | awk '{print $1}')
|
||||
sed -i "s/ADMIN_HASH/${admin_hash}/g" $default_salt_dir/salt/playbook/files/playbook_db_init.sql
|
||||
sed -i "s/ADMIN_SALT/${admin_salt}/g" $default_salt_dir/salt/playbook/files/playbook_db_init.sql
|
||||
|
||||
# Copy file to destination + execute SQL
|
||||
docker cp $default_salt_dir/salt/playbook/files/playbook_db_init.sql so-mysql:/tmp/playbook_db_init.sql
|
||||
docker exec so-mysql /bin/bash -c "/usr/bin/mysql -b -uroot -p{{MYSQLPASS}} < /tmp/playbook_db_init.sql"
|
||||
1788
salt/playbook/files/playbook_db_init.sql
Normal file
1788
salt/playbook/files/playbook_db_init.sql
Normal file
File diff suppressed because one or more lines are too long
14
salt/playbook/init.sls
Normal file
14
salt/playbook/init.sls
Normal file
@@ -0,0 +1,14 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'playbook/map.jinja' import PLAYBOOKMERGED %}
|
||||
|
||||
include:
|
||||
{% if PLAYBOOKMERGED.enabled %}
|
||||
- playbook.enabled
|
||||
{% else %}
|
||||
- playbook.disabled
|
||||
{% endif %}
|
||||
2
salt/playbook/map.jinja
Normal file
2
salt/playbook/map.jinja
Normal file
@@ -0,0 +1,2 @@
|
||||
{% import_yaml 'playbook/defaults.yaml' as PLAYBOOKDEFAULTS %}
|
||||
{% set PLAYBOOKMERGED = salt['pillar.get']('playbook', PLAYBOOKDEFAULTS.playbook, merge=True) %}
|
||||
4
salt/playbook/soc_playbook.yaml
Normal file
4
salt/playbook/soc_playbook.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
playbook:
|
||||
enabled:
|
||||
description: You can enable or disable Playbook.
|
||||
helpLink: playbook.html
|
||||
21
salt/playbook/sostatus.sls
Normal file
21
salt/playbook/sostatus.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-playbook_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-playbook
|
||||
- unless: grep -q so-playbook /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
14
salt/playbook/tools/sbin/so-playbook-import
Executable file
14
salt/playbook/tools/sbin/so-playbook-import
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
ENABLEPLAY=${1:-False}
|
||||
|
||||
docker exec so-soctopus /usr/local/bin/python -c "import playbook; print(playbook.play_import($ENABLEPLAY))"
|
||||
22
salt/playbook/tools/sbin/so-playbook-reset
Executable file
22
salt/playbook/tools/sbin/so-playbook-reset
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
salt-call state.apply playbook.db_init,playbook queue=True
|
||||
|
||||
/usr/sbin/so-soctopus-restart
|
||||
|
||||
salt-call state.apply playbook,playbook.automation_user_create queue=True
|
||||
|
||||
/usr/sbin/so-soctopus-restart
|
||||
|
||||
echo "Importing Plays - NOTE: this will continue after installation finishes and could take an hour or more. Rebooting while the import is in progress will delay playbook imports."
|
||||
sleep 5
|
||||
so-playbook-ruleupdate >> /root/setup_playbook_rule_update.log 2>&1 &
|
||||
12
salt/playbook/tools/sbin/so-playbook-restart
Executable file
12
salt/playbook/tools/sbin/so-playbook-restart
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-restart playbook $1
|
||||
12
salt/playbook/tools/sbin/so-playbook-ruleupdate
Executable file
12
salt/playbook/tools/sbin/so-playbook-ruleupdate
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
docker exec so-soctopus python3 playbook_bulk-update.py
|
||||
29
salt/playbook/tools/sbin/so-playbook-sigma-refresh
Executable file
29
salt/playbook/tools/sbin/so-playbook-sigma-refresh
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if ! [ -f /opt/so/state/playbook_regen_plays ] || [ "$1" = "--force" ]; then
|
||||
|
||||
echo "Refreshing Sigma & regenerating plays... "
|
||||
|
||||
# Regenerate ElastAlert & update Plays
|
||||
docker exec so-soctopus python3 playbook_play-update.py
|
||||
|
||||
# Delete current Elastalert Rules
|
||||
rm /opt/so/rules/elastalert/playbook/*.yaml
|
||||
|
||||
# Regenerate Elastalert Rules
|
||||
so-playbook-sync
|
||||
|
||||
# Create state file
|
||||
touch /opt/so/state/playbook_regen_plays
|
||||
else
|
||||
printf "\nState file found, exiting...\nRerun with --force to override.\n"
|
||||
fi
|
||||
12
salt/playbook/tools/sbin/so-playbook-start
Executable file
12
salt/playbook/tools/sbin/so-playbook-start
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start playbook $1
|
||||
12
salt/playbook/tools/sbin/so-playbook-stop
Executable file
12
salt/playbook/tools/sbin/so-playbook-stop
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop playbook $1
|
||||
16
salt/playbook/tools/sbin/so-playbook-sync
Executable file
16
salt/playbook/tools/sbin/so-playbook-sync
Executable file
@@ -0,0 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
# Check to see if we are already running
|
||||
NUM_RUNNING=$(pgrep -cf "/bin/bash /usr/sbin/so-playbook-sync")
|
||||
[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING Playbook sync processes running...exiting." && exit 0
|
||||
|
||||
docker exec so-soctopus python3 playbook_play-sync.py
|
||||
@@ -92,7 +92,6 @@ soc:
|
||||
- http_request.headers.x-real-ip
|
||||
- identity_id
|
||||
- http_request.headers.user-agent
|
||||
- event.dataset
|
||||
'::conn':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -103,7 +102,6 @@ soc:
|
||||
- network.protocol
|
||||
- log.id.uid
|
||||
- network.community_id
|
||||
- event.dataset
|
||||
'::dce_rpc':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -114,7 +112,6 @@ soc:
|
||||
- dce_rpc.named_pipe
|
||||
- dce_rpc.operation
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::dhcp':
|
||||
- soc_timestamp
|
||||
- client.address
|
||||
@@ -123,7 +120,6 @@ soc:
|
||||
- host.hostname
|
||||
- dhcp.message_types
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::dnp3':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -132,7 +128,6 @@ soc:
|
||||
- destination.port
|
||||
- dnp3.fc_reply
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::dnp3_control':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -142,7 +137,6 @@ soc:
|
||||
- dnp3.function_code
|
||||
- dnp3.block_type
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::dnp3_objects':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -152,7 +146,6 @@ soc:
|
||||
- dnp3.function_code
|
||||
- dnp3.object_type
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::dns':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -165,7 +158,6 @@ soc:
|
||||
- dns.response.code_name
|
||||
- log.id.uid
|
||||
- network.community_id
|
||||
- event.dataset
|
||||
'::dpd':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -176,7 +168,6 @@ soc:
|
||||
- observer.analyser
|
||||
- error.reason
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::file':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -187,7 +178,6 @@ soc:
|
||||
- file.bytes.total
|
||||
- log.id.fuid
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::ftp':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -200,7 +190,6 @@ soc:
|
||||
- ftp.reply_code
|
||||
- file.size
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::http':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -215,7 +204,6 @@ soc:
|
||||
- http.response.body.length
|
||||
- log.id.uid
|
||||
- network.community_id
|
||||
- event.dataset
|
||||
'::intel':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -226,7 +214,6 @@ soc:
|
||||
- intel.indicator_type
|
||||
- intel.seen_where
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::irc':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -239,7 +226,6 @@ soc:
|
||||
- irc.command.value
|
||||
- irc.command.info
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::kerberos':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -250,7 +236,6 @@ soc:
|
||||
- kerberos.service
|
||||
- kerberos.request_type
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::modbus':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -259,7 +244,6 @@ soc:
|
||||
- destination.port
|
||||
- modbus.function
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::mysql':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -271,7 +255,6 @@ soc:
|
||||
- mysql.success
|
||||
- mysql.response
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::notice':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -283,7 +266,6 @@ soc:
|
||||
- log.id.fuid
|
||||
- log.id.uid
|
||||
- network.community_id
|
||||
- event.dataset
|
||||
'::ntlm':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -296,7 +278,6 @@ soc:
|
||||
- ntlm.server.nb.name
|
||||
- ntlm.server.tree.name
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::pe':
|
||||
- soc_timestamp
|
||||
- file.is_64bit
|
||||
@@ -305,7 +286,6 @@ soc:
|
||||
- file.os
|
||||
- file.subsystem
|
||||
- log.id.fuid
|
||||
- event.dataset
|
||||
'::radius':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -317,7 +297,6 @@ soc:
|
||||
- radius.framed_address
|
||||
- radius.reply_message
|
||||
- radius.result
|
||||
- event.dataset
|
||||
'::rdp':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -333,7 +312,6 @@ soc:
|
||||
- rdp.result
|
||||
- rdp.security_protocol
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::rfb':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -345,7 +323,6 @@ soc:
|
||||
- rfb.share_flag
|
||||
- rfb.desktop.name
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::signatures':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -359,7 +336,6 @@ soc:
|
||||
- signature_count
|
||||
- host.count
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::sip':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -377,7 +353,6 @@ soc:
|
||||
- sip.user_agent
|
||||
- sip.status_code
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::smb_files':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -391,7 +366,6 @@ soc:
|
||||
- file.size
|
||||
- file.prev_name
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::smb_mapping':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -402,7 +376,6 @@ soc:
|
||||
- smb.service
|
||||
- smb.share_type
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::smtp':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -415,7 +388,6 @@ soc:
|
||||
- smtp.useragent
|
||||
- log.id.uid
|
||||
- network.community_id
|
||||
- event.dataset
|
||||
'::snmp':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -425,7 +397,6 @@ soc:
|
||||
- snmp.community
|
||||
- snmp.version
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::socks':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -437,13 +408,11 @@ soc:
|
||||
- socks.request.port
|
||||
- socks.status
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::software':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
- software.name
|
||||
- software.type
|
||||
- event.dataset
|
||||
'::ssh':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -456,7 +425,6 @@ soc:
|
||||
- ssh.client
|
||||
- ssh.server
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::ssl':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -468,7 +436,6 @@ soc:
|
||||
- ssl.validation_status
|
||||
- ssl.version
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
':zeek:syslog':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -479,7 +446,6 @@ soc:
|
||||
- network.protocol
|
||||
- syslog.severity
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::tunnels':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -489,7 +455,6 @@ soc:
|
||||
- tunnel_type
|
||||
- action
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::weird':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -498,7 +463,6 @@ soc:
|
||||
- destination.port
|
||||
- weird.name
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::x509':
|
||||
- soc_timestamp
|
||||
- x509.certificate.subject
|
||||
@@ -506,7 +470,6 @@ soc:
|
||||
- x509.certificate.key.length
|
||||
- x509.certificate.issuer
|
||||
- log.id.fuid
|
||||
- event.dataset
|
||||
'::firewall':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -518,7 +481,6 @@ soc:
|
||||
- observer.ingress.interface.name
|
||||
- event.action
|
||||
- network.community_id
|
||||
- event.dataset
|
||||
':pfsense:':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -530,7 +492,6 @@ soc:
|
||||
- observer.ingress.interface.name
|
||||
- event.action
|
||||
- network.community_id
|
||||
- event.dataset
|
||||
':osquery:':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -538,9 +499,9 @@ soc:
|
||||
- destination.ip
|
||||
- destination.port
|
||||
- source.hostname
|
||||
- event.dataset
|
||||
- process.executable
|
||||
- user.name
|
||||
- event.dataset
|
||||
':strelka:file':
|
||||
- soc_timestamp
|
||||
- file.name
|
||||
@@ -549,7 +510,6 @@ soc:
|
||||
- file.source
|
||||
- file.mime_type
|
||||
- log.id.fuid
|
||||
- event.dataset
|
||||
':suricata:':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -561,11 +521,9 @@ soc:
|
||||
- event.severity_label
|
||||
- log.id.uid
|
||||
- network.community_id
|
||||
- event.dataset
|
||||
':windows_eventlog:':
|
||||
- soc_timestamp
|
||||
- user.name
|
||||
- event.dataset
|
||||
':elasticsearch:':
|
||||
- soc_timestamp
|
||||
- agent.name
|
||||
@@ -587,7 +545,6 @@ soc:
|
||||
- real_message
|
||||
- syslog.priority
|
||||
- syslog.application
|
||||
- event.dataset
|
||||
':aws:':
|
||||
- soc_timestamp
|
||||
- aws.cloudtrail.event_category
|
||||
@@ -599,7 +556,6 @@ soc:
|
||||
- user.name
|
||||
- source.ip
|
||||
- source.geo.region_iso_code
|
||||
- event.dataset
|
||||
':squid:':
|
||||
- soc_timestamp
|
||||
- url.original
|
||||
@@ -607,7 +563,6 @@ soc:
|
||||
- destination.geo.country_iso_code
|
||||
- user.name
|
||||
- source.ip
|
||||
- event.dataset
|
||||
'::sysmon_operational':
|
||||
- soc_timestamp
|
||||
- event.action
|
||||
@@ -615,7 +570,6 @@ soc:
|
||||
- user.name
|
||||
- process.executable
|
||||
- process.pid
|
||||
- event.dataset
|
||||
'::network_connection':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -623,50 +577,44 @@ soc:
|
||||
- destination.ip
|
||||
- destination.port
|
||||
- source.hostname
|
||||
- event.dataset
|
||||
- process.executable
|
||||
- user.name
|
||||
- event.dataset
|
||||
'::process_terminated':
|
||||
- soc_timestamp
|
||||
- process.executable
|
||||
- process.pid
|
||||
- winlog.computer_name
|
||||
- event.dataset
|
||||
'::file_create':
|
||||
- soc_timestamp
|
||||
- file.target
|
||||
- process.executable
|
||||
- process.pid
|
||||
- winlog.computer_name
|
||||
- event.dataset
|
||||
'::registry_value_set':
|
||||
- soc_timestamp
|
||||
- winlog.event_data.TargetObject
|
||||
- process.executable
|
||||
- process.pid
|
||||
- winlog.computer_name
|
||||
- event.dataset
|
||||
'::process_creation':
|
||||
- soc_timestamp
|
||||
- process.command_line
|
||||
- process.pid
|
||||
- process.parent.executable
|
||||
- process.working_directory
|
||||
- event.dataset
|
||||
'::registry_create_delete':
|
||||
- soc_timestamp
|
||||
- winlog.event_data.TargetObject
|
||||
- process.executable
|
||||
- process.pid
|
||||
- winlog.computer_name
|
||||
- event.dataset
|
||||
'::dns_query':
|
||||
- soc_timestamp
|
||||
- dns.query.name
|
||||
- dns.answers.name
|
||||
- process.executable
|
||||
- winlog.computer_name
|
||||
- event.dataset
|
||||
'::file_create_stream_hash':
|
||||
- soc_timestamp
|
||||
- file.target
|
||||
@@ -675,7 +623,6 @@ soc:
|
||||
- process.executable
|
||||
- process.pid
|
||||
- winlog.computer_name
|
||||
- event.dataset
|
||||
'::bacnet':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -685,7 +632,6 @@ soc:
|
||||
- bacnet.bclv.function
|
||||
- bacnet.result.code
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::bacnet_discovery':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -695,7 +641,6 @@ soc:
|
||||
- bacnet.vendor
|
||||
- bacnet.pdu.service
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::bacnet_property':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -705,7 +650,6 @@ soc:
|
||||
- bacnet.property
|
||||
- bacnet.pdu.service
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::bsap_ip_header':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -715,14 +659,12 @@ soc:
|
||||
- bsap.message.type
|
||||
- bsap.number.messages
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::bsap_ip_rdb':
|
||||
- soc_timestamp
|
||||
- bsap.application.function
|
||||
- bsap.application.sub.function
|
||||
- bsap.vector.variables
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::bsap_serial_header':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -733,13 +675,11 @@ soc:
|
||||
- bsap.destination.function
|
||||
- bsap.message.type
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::bsap_serial_rdb':
|
||||
- soc_timestamp
|
||||
- bsap.rdb.function
|
||||
- bsap.vector.variables
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::cip':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -759,7 +699,6 @@ soc:
|
||||
- cip.device.type.name
|
||||
- cip.vendor.name
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::cip_io':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -769,7 +708,6 @@ soc:
|
||||
- cip.connection.id
|
||||
- cip.io.data
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::cotp':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -778,7 +716,6 @@ soc:
|
||||
- destination.port
|
||||
- cotp.pdu.name
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::ecat_arp_info':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -786,7 +723,6 @@ soc:
|
||||
- source.mac
|
||||
- destination.mac
|
||||
- ecat.arp.type
|
||||
- event.dataset
|
||||
'::ecat_aoe_info':
|
||||
- soc_timestamp
|
||||
- source.mac
|
||||
@@ -794,7 +730,6 @@ soc:
|
||||
- destination.mac
|
||||
- destination.port
|
||||
- ecat.command
|
||||
- event.dataset
|
||||
'::ecat_coe_info':
|
||||
- soc_timestamp
|
||||
- ecat.message.number
|
||||
@@ -802,7 +737,6 @@ soc:
|
||||
- ecat.request.response.type
|
||||
- ecat.index
|
||||
- ecat.sub.index
|
||||
- event.dataset
|
||||
'::ecat_dev_info':
|
||||
- soc_timestamp
|
||||
- ecat.device.type
|
||||
@@ -810,20 +744,17 @@ soc:
|
||||
- ecat.ram.size
|
||||
- ecat.revision
|
||||
- ecat.slave.address
|
||||
- event.dataset
|
||||
'::ecat_log_address':
|
||||
- soc_timestamp
|
||||
- source.mac
|
||||
- destination.mac
|
||||
- ecat.command
|
||||
- event.dataset
|
||||
'::ecat_registers':
|
||||
- soc_timestamp
|
||||
- source.mac
|
||||
- destination.mac
|
||||
- ecat.command
|
||||
- ecat.register.type
|
||||
- event.dataset
|
||||
'::enip':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -842,7 +773,6 @@ soc:
|
||||
- destination.port
|
||||
- modbus.function
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -852,7 +782,6 @@ soc:
|
||||
- opcua.identifier_string
|
||||
- opcua.message_type
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_activate_session':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -863,7 +792,6 @@ soc:
|
||||
- opcua.identifier_string
|
||||
- opcua.user_name
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_activate_session_diagnostic_info':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -873,7 +801,6 @@ soc:
|
||||
- opcua.activate_session_diag_info_link_id
|
||||
- opcua.diag_info_link_id
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_activate_session_locale_id':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -883,7 +810,6 @@ soc:
|
||||
- opcua.local_id
|
||||
- opcua.locale_link_id
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_browse':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -893,7 +819,6 @@ soc:
|
||||
- opcua.link_id
|
||||
- opcua.service_type
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_browse_description':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -901,7 +826,6 @@ soc:
|
||||
- destination.ip
|
||||
- destination.port
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_browse_response_references':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -911,7 +835,6 @@ soc:
|
||||
- opcua.node_class
|
||||
- opcua.display_name_text
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_browse_result':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -920,7 +843,6 @@ soc:
|
||||
- destination.port
|
||||
- opcua.response_link_id
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_create_session':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -929,7 +851,6 @@ soc:
|
||||
- destination.port
|
||||
- opcua.link_id
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_create_session_endpoints':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -939,7 +860,6 @@ soc:
|
||||
- opcua.endpoint_link_id
|
||||
- opcua.endpoint_url
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_create_session_user_token':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -948,7 +868,6 @@ soc:
|
||||
- destination.port
|
||||
- opcua.user_token_link_id
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_create_subscription':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -957,7 +876,6 @@ soc:
|
||||
- destination.port
|
||||
- opcua.link_id
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_get_endpoints':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -967,7 +885,6 @@ soc:
|
||||
- opcua.endpoint_url
|
||||
- opcua.link_id
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_get_endpoints_description':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -977,7 +894,6 @@ soc:
|
||||
- opcua.endpoint_description_link_id
|
||||
- opcua.endpoint_uri
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_get_endpoints_user_token':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -987,7 +903,6 @@ soc:
|
||||
- opcua.user_token_link_id
|
||||
- opcua.user_token_type
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_read':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -997,7 +912,6 @@ soc:
|
||||
- opcua.link_id
|
||||
- opcua.read_results_link_id
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::opcua_binary_status_code_detail':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -1007,7 +921,6 @@ soc:
|
||||
- opcua.info_type_string
|
||||
- opcua.source_string
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::profinet':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -1017,7 +930,6 @@ soc:
|
||||
- profinet.index
|
||||
- profinet.operation_type
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::profinet_dce_rpc':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -1026,7 +938,6 @@ soc:
|
||||
- destination.port
|
||||
- profinet.operation
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::s7comm':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -1036,7 +947,6 @@ soc:
|
||||
- s7.ros.control.name
|
||||
- s7.function.name
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::s7comm_plus':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -1046,7 +956,6 @@ soc:
|
||||
- s7.opcode.name
|
||||
- s7.version
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::s7comm_read_szl':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -1056,7 +965,6 @@ soc:
|
||||
- s7.szl_id_name
|
||||
- s7.return_code_name
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::s7comm_upload_download':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -1066,7 +974,6 @@ soc:
|
||||
- s7.ros.control.name
|
||||
- s7.function_code
|
||||
- log.id.uid
|
||||
- event.dataset
|
||||
'::tds':
|
||||
- soc_timestamp
|
||||
- source.ip
|
||||
@@ -1157,15 +1064,6 @@ soc:
|
||||
- event.action
|
||||
- event.outcome
|
||||
- event.dataset
|
||||
':system:':
|
||||
- soc_timestamp
|
||||
- process.name
|
||||
- process.pid
|
||||
- user.effective.name
|
||||
- user.name
|
||||
- system.auth.sudo.command
|
||||
- event.dataset
|
||||
- message
|
||||
server:
|
||||
bindAddress: 0.0.0.0:9822
|
||||
baseUrl: /
|
||||
@@ -1181,15 +1079,11 @@ soc:
|
||||
hostUrl:
|
||||
elastalertengine:
|
||||
allowRegex: ''
|
||||
autoUpdateEnabled: true
|
||||
autoUpdateEnabled: false
|
||||
communityRulesImportFrequencySeconds: 86400
|
||||
denyRegex: ''
|
||||
elastAlertRulesFolder: /opt/sensoroni/elastalert
|
||||
rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint
|
||||
rulesRepos:
|
||||
- repo: https://github.com/Security-Onion-Solutions/securityonion-resources
|
||||
license: DRL
|
||||
folder: sigma/stable
|
||||
sigmaRulePackages:
|
||||
- core
|
||||
- emerging_threats_addon
|
||||
@@ -1237,10 +1131,9 @@ soc:
|
||||
- rbac/users_roles
|
||||
strelkaengine:
|
||||
allowRegex: ''
|
||||
autoUpdateEnabled: true
|
||||
communityRulesImportFrequencySeconds: 86400
|
||||
autoUpdateEnabled: false
|
||||
compileYaraPythonScriptPath: /opt/so/conf/strelka/compile_yara.py
|
||||
denyRegex: ''
|
||||
denyRegex: '.*'
|
||||
reposFolder: /opt/sensoroni/yara/repos
|
||||
rulesRepos:
|
||||
- repo: https://github.com/Security-Onion-Solutions/securityonion-yara
|
||||
@@ -1248,10 +1141,8 @@ soc:
|
||||
yaraRulesFolder: /opt/sensoroni/yara/rules
|
||||
suricataengine:
|
||||
allowRegex: ''
|
||||
autoUpdateEnabled: true
|
||||
communityRulesImportFrequencySeconds: 86400
|
||||
communityRulesFile: /nsm/rules/suricata/emerging-all.rules
|
||||
denyRegex: ''
|
||||
denyRegex: '.*'
|
||||
rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint
|
||||
client:
|
||||
enableReverseLookup: false
|
||||
@@ -1263,7 +1154,7 @@ soc:
|
||||
tipTimeoutMs: 6000
|
||||
cacheExpirationMs: 300000
|
||||
casesEnabled: true
|
||||
detectionsEnabled: true
|
||||
detectionsEnabled: false
|
||||
inactiveTools: ['toolUnused']
|
||||
tools:
|
||||
- name: toolKibana
|
||||
@@ -1291,6 +1182,11 @@ soc:
|
||||
icon: fa-external-link-alt
|
||||
target: so-cyberchef
|
||||
link: /cyberchef/
|
||||
- name: toolPlaybook
|
||||
description: toolPlaybookHelp
|
||||
icon: fa-external-link-alt
|
||||
target: so-playbook
|
||||
link: /playbook/projects/detection-playbooks/issues/
|
||||
- name: toolNavigator
|
||||
description: toolNavigatorHelp
|
||||
icon: fa-external-link-alt
|
||||
@@ -1982,9 +1878,8 @@ soc:
|
||||
default:
|
||||
- so_detection.title
|
||||
- so_detection.isEnabled
|
||||
- so_detection.severity
|
||||
- so_detection.language
|
||||
- so_detection.ruleset
|
||||
- so_detection.severity
|
||||
queries:
|
||||
- name: "All Detections"
|
||||
query: "_id:*"
|
||||
|
||||
@@ -41,6 +41,10 @@
|
||||
{% do SOCMERGED.config.server.modules.strelkaengine.update({'autoUpdateEnabled': false}) %}
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.manager.playbook == 0 %}
|
||||
{% do SOCMERGED.config.server.client.inactiveTools.append('toolPlaybook') %}
|
||||
{% endif %}
|
||||
|
||||
{% set standard_actions = SOCMERGED.config.pop('actions') %}
|
||||
|
||||
{% if pillar.global.endgamehost != '' %}
|
||||
|
||||
88
salt/soctopus/config.sls
Normal file
88
salt/soctopus/config.sls
Normal file
@@ -0,0 +1,88 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- nginx.config
|
||||
|
||||
soctopusdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/soctopus/sigma-import
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
soctopus-sync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/soctopus/templates
|
||||
- source: salt://soctopus/files/templates
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
- defaults:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
|
||||
soctopusconf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/soctopus/SOCtopus.conf
|
||||
- source: salt://soctopus/files/SOCtopus.conf
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 600
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
- defaults:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
|
||||
soctopuslogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/soctopus
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
playbookrulesdir:
|
||||
file.directory:
|
||||
- name: /opt/so/rules/elastalert/playbook
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
playbookrulessync:
|
||||
file.recurse:
|
||||
- name: /opt/so/rules/elastalert/playbook
|
||||
- source: salt://soctopus/files/templates
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
- defaults:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
|
||||
soctopus_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://soctopus/tools/sbin
|
||||
- user: 939
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
#soctopus_sbin_jinja:
|
||||
# file.recurse:
|
||||
# - name: /usr/sbin
|
||||
# - source: salt://soctopus/tools/sbin_jinja
|
||||
# - user: 939
|
||||
# - group: 939
|
||||
# - file_mode: 755
|
||||
# - template: jinja
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
2
salt/soctopus/defaults.yaml
Normal file
2
salt/soctopus/defaults.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
soctopus:
|
||||
enabled: False
|
||||
27
salt/soctopus/disabled.sls
Normal file
27
salt/soctopus/disabled.sls
Normal file
@@ -0,0 +1,27 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- soctopus.sostatus
|
||||
|
||||
so-soctopus:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-soctopus_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-soctopus$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
72
salt/soctopus/enabled.sls
Normal file
72
salt/soctopus/enabled.sls
Normal file
@@ -0,0 +1,72 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
|
||||
include:
|
||||
- soctopus.config
|
||||
- soctopus.sostatus
|
||||
|
||||
so-soctopus:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-soctopus:{{ GLOBALS.so_version }}
|
||||
- hostname: soctopus
|
||||
- name: so-soctopus
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-soctopus'].ip }}
|
||||
- binds:
|
||||
- /opt/so/conf/soctopus/SOCtopus.conf:/SOCtopus/SOCtopus.conf:ro
|
||||
- /opt/so/log/soctopus/:/var/log/SOCtopus/:rw
|
||||
- /opt/so/rules/elastalert/playbook:/etc/playbook-rules:rw
|
||||
- /opt/so/conf/navigator/layers/:/etc/playbook/:rw
|
||||
- /opt/so/conf/soctopus/sigma-import/:/SOCtopus/sigma-import/:rw
|
||||
{% if GLOBALS.airgap %}
|
||||
- /nsm/repo/rules/sigma:/soctopus/sigma
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-soctopus'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-soctopus'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-soctopus'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- extra_hosts:
|
||||
- {{GLOBALS.url_base}}:{{GLOBALS.manager_ip}}
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
{% if DOCKER.containers['so-soctopus'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-soctopus'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-soctopus'].extra_env %}
|
||||
- environment:
|
||||
{% for XTRAENV in DOCKER.containers['so-soctopus'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: /opt/so/conf/soctopus/SOCtopus.conf
|
||||
- require:
|
||||
- file: soctopusconf
|
||||
- file: navigatordefaultlayer
|
||||
|
||||
delete_so-soctopus_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-soctopus$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
77
salt/soctopus/files/SOCtopus.conf
Normal file
77
salt/soctopus/files/SOCtopus.conf
Normal file
@@ -0,0 +1,77 @@
|
||||
{%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %}
|
||||
{%- set THEHIVEURL = salt['pillar.get']('global:hiveurl', '') %}
|
||||
{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %}
|
||||
{%- set PLAYBOOK_KEY = salt['pillar.get']('secrets:playbook_automation_api_key', '') %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
[es]
|
||||
es_url = https://{{ GLOBALS.manager_ip }}:9200
|
||||
es_ip = {{ GLOBALS.manager_ip }}
|
||||
es_user = {{ ES_USER }}
|
||||
es_pass = "{{ ES_PASS }}"
|
||||
es_index_pattern = so-*
|
||||
es_verifycert = no
|
||||
|
||||
[cortex]
|
||||
auto_analyze_alerts = no
|
||||
cortex_url = https://{{THEHIVEURL}}/cortex/
|
||||
cortex_key = {{ CORTEXKEY }}
|
||||
supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS
|
||||
|
||||
[fir]
|
||||
fir_url = YOURFIRURL
|
||||
fir_token = YOURFIRTOKEN
|
||||
fir_actor = 3
|
||||
fir_category = 3
|
||||
fir_confidentiality = 1
|
||||
fir_detection = 2
|
||||
fir_plan = 8
|
||||
fir_severity = 4
|
||||
fir_verifycert = no
|
||||
|
||||
[grr]
|
||||
grr_url = YOURGRRURL
|
||||
grr_user = YOURGRRUSER
|
||||
grr_pass = YOURGRRPASS
|
||||
|
||||
[hive]
|
||||
hive_url = https://{{THEHIVEURL}}/thehive/
|
||||
hive_key = {{ HIVEKEY }}
|
||||
hive_tlp = 3
|
||||
hive_verifycert = no
|
||||
|
||||
[misp]
|
||||
misp_url = YOURMISPURL
|
||||
misp_key = YOURMISPKEY
|
||||
misp_verifycert = no
|
||||
distrib = 0
|
||||
threat = 4
|
||||
analysis = 0
|
||||
|
||||
[rtir]
|
||||
rtir_url = YOURRTIRURL
|
||||
rtir_api = REST/1.0/
|
||||
rtir_user = YOURRTIRUSER
|
||||
rtir_pass = YOURRTIRPASS
|
||||
rtir_queue = Incidents
|
||||
rtir_creator = root
|
||||
rtir_verifycert = no
|
||||
|
||||
[slack]
|
||||
slack_url = YOURSLACKWORKSPACE
|
||||
slack_webhook = YOURSLACKWEBHOOK
|
||||
|
||||
[soc]
|
||||
soc_url = http://{{ GLOBALS.manager }}:9822
|
||||
|
||||
[playbook]
|
||||
playbook_url = http://{{ GLOBALS.manager }}:3000/playbook
|
||||
playbook_ext_url = https://{{ GLOBALS.url_base }}/playbook
|
||||
playbook_key = {{ PLAYBOOK_KEY }}
|
||||
playbook_verifycert = no
|
||||
playbook_unit_test_index = playbook-testing
|
||||
playbook_rulesets = {{ salt['pillar.get']('soctopus:playbook:rulesets')|join(",") }}
|
||||
|
||||
[log]
|
||||
logfile = /var/log/SOCtopus/soctopus.log
|
||||
5
salt/soctopus/files/templates/es-generic.template
Normal file
5
salt/soctopus/files/templates/es-generic.template
Normal file
@@ -0,0 +1,5 @@
|
||||
alert: modules.so.playbook-es.PlaybookESAlerter
|
||||
elasticsearch_host: "{{ GLOBALS.manager_ip }}:9200"
|
||||
play_title: ""
|
||||
play_url: "https://{{ GLOBALS.manager_ip }}/playbook/issues/6000"
|
||||
sigma_level: ""
|
||||
22
salt/soctopus/files/templates/generic.template
Normal file
22
salt/soctopus/files/templates/generic.template
Normal file
@@ -0,0 +1,22 @@
|
||||
alert:
|
||||
- "modules.so.playbook-es.PlaybookESAlerter"
|
||||
|
||||
elasticsearch_host: "{{ GLOBALS.url_base }}:9200"
|
||||
play_title: ""
|
||||
play_id: ""
|
||||
event.module: "playbook"
|
||||
event.dataset: "playbook.alert"
|
||||
event.severity:
|
||||
rule.category:
|
||||
play_url: "https://{{ GLOBALS.url_base }}/playbook/issues/6000"
|
||||
kibana_pivot: "https://{{ GLOBALS.url_base }}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{[_id]}'),sort:!('@timestamp',desc))"
|
||||
soc_pivot: "https://{{ GLOBALS.url_base }}/#/hunt"
|
||||
sigma_level: ""
|
||||
|
||||
index: '.ds-logs-*'
|
||||
name: EQL
|
||||
priority: 3
|
||||
realert:
|
||||
minutes: 0
|
||||
type: any
|
||||
filter:
|
||||
13
salt/soctopus/files/templates/osquery.template
Normal file
13
salt/soctopus/files/templates/osquery.template
Normal file
@@ -0,0 +1,13 @@
|
||||
alert:
|
||||
- "modules.so.playbook-es.PlaybookESAlerter"
|
||||
|
||||
elasticsearch_host: "{{ GLOBALS.url_base }}:9200"
|
||||
play_title: ""
|
||||
event.module: "playbook"
|
||||
event.dataset: "alert"
|
||||
event.severity:
|
||||
rule.category:
|
||||
play_url: "https://{{ GLOBALS.url_base }}/playbook/issues/6000"
|
||||
kibana_pivot: "https://{{ GLOBALS.url_base }}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{[_id]}'),sort:!('@timestamp',desc))"
|
||||
soc_pivot: "https://{{ GLOBALS.url_base }}/#/hunt"
|
||||
sigma_level: ""
|
||||
13
salt/soctopus/init.sls
Normal file
13
salt/soctopus/init.sls
Normal file
@@ -0,0 +1,13 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'soctopus/map.jinja' import SOCTOPUSMERGED %}
|
||||
|
||||
include:
|
||||
{% if SOCTOPUSMERGED.enabled %}
|
||||
- soctopus.enabled
|
||||
{% else %}
|
||||
- soctopus.disabled
|
||||
{% endif %}
|
||||
7
salt/soctopus/map.jinja
Normal file
7
salt/soctopus/map.jinja
Normal file
@@ -0,0 +1,7 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'soctopus/defaults.yaml' as SOCTOPUSDEFAULTS %}
|
||||
{% set SOCTOPUSMERGED = salt['pillar.get']('soctopus', SOCTOPUSDEFAULTS.soctopus, merge=True) %}
|
||||
10
salt/soctopus/soc_soctopus.yaml
Normal file
10
salt/soctopus/soc_soctopus.yaml
Normal file
@@ -0,0 +1,10 @@
|
||||
soctopus:
|
||||
enabled:
|
||||
description: You can enable or disable SOCtopus.
|
||||
helpLink: playbook.html
|
||||
playbook:
|
||||
rulesets:
|
||||
description: List of playbook rulesets.
|
||||
advanced: True
|
||||
helpLink: playbook.html
|
||||
global: True
|
||||
21
salt/soctopus/sostatus.sls
Normal file
21
salt/soctopus/sostatus.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-soctopus_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-soctopus
|
||||
- unless: grep -q so-soctopus /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
12
salt/soctopus/tools/sbin/so-soctopus-restart
Executable file
12
salt/soctopus/tools/sbin/so-soctopus-restart
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-restart soctopus $1
|
||||
12
salt/soctopus/tools/sbin/so-soctopus-start
Executable file
12
salt/soctopus/tools/sbin/so-soctopus-start
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start soctopus $1
|
||||
12
salt/soctopus/tools/sbin/so-soctopus-stop
Executable file
12
salt/soctopus/tools/sbin/so-soctopus-stop
Executable file
@@ -0,0 +1,12 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop soctopus $1
|
||||
@@ -50,6 +50,16 @@ backend_taste:
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
{% if STRELKAMERGED.rules.enabled %}
|
||||
strelkarules:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/strelka/rules
|
||||
- source: salt://strelka/rules
|
||||
- user: 939
|
||||
- group: 939
|
||||
- clean: True
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
|
||||
@@ -42,8 +42,8 @@ strelka_backend:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- restart_policy: on-failure
|
||||
#- watch:
|
||||
#- file: strelkarules
|
||||
- watch:
|
||||
- file: strelkarules
|
||||
|
||||
delete_so-strelka-backend_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
@@ -33,12 +33,6 @@ suricata:
|
||||
threads: 1
|
||||
tpacket-v3: "yes"
|
||||
ring-size: 5000
|
||||
block-size: 32768
|
||||
block-timeout: 10
|
||||
use-emergency-flush: "yes"
|
||||
buffer-size: 32768
|
||||
disable-promisc: "no"
|
||||
checksum-checks: kernel
|
||||
vars:
|
||||
address-groups:
|
||||
HOME_NET:
|
||||
|
||||
@@ -32,21 +32,11 @@
|
||||
- interface: {{ GLOBALS.sensor.interface }}
|
||||
cluster-id: {{ SURICATAMERGED.config['af-packet']['cluster-id'] }}
|
||||
cluster-type: {{ SURICATAMERGED.config['af-packet']['cluster-type'] }}
|
||||
defrag: "{{ SURICATAMERGED.config['af-packet'].defrag }}"
|
||||
use-mmap: "{{ SURICATAMERGED.config['af-packet']['use-mmap'] }}"
|
||||
defrag: {{ SURICATAMERGED.config['af-packet'].defrag }}
|
||||
use-mmap: {{ SURICATAMERGED.config['af-packet']['use-mmap'] }}
|
||||
threads: {{ SURICATAMERGED.config['af-packet'].threads }}
|
||||
tpacket-v3: "{{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }}"
|
||||
tpacket-v3: {{ SURICATAMERGED.config['af-packet']['tpacket-v3'] }}
|
||||
ring-size: {{ SURICATAMERGED.config['af-packet']['ring-size'] }}
|
||||
block-size: {{ SURICATAMERGED.config['af-packet']['block-size'] }}
|
||||
block-timeout: {{ SURICATAMERGED.config['af-packet']['block-timeout'] }}
|
||||
use-emergency-flush: "{{ SURICATAMERGED.config['af-packet']['use-emergency-flush'] }}"
|
||||
buffer-size: {{ SURICATAMERGED.config['af-packet']['buffer-size'] }}
|
||||
disable-promisc: "{{ SURICATAMERGED.config['af-packet']['disable-promisc'] }}"
|
||||
{% if SURICATAMERGED.config['af-packet']['checksum-checks'] in ['yes', 'no'] %}
|
||||
checksum-checks: "{{ SURICATAMERGED.config['af-packet']['checksum-checks'] }}"
|
||||
{% else %}
|
||||
checksum-checks: {{ SURICATAMERGED.config['af-packet']['checksum-checks'] }}
|
||||
{% endif %}
|
||||
{% endload %}
|
||||
{% do SURICATAMERGED.config.pop('af-packet') %}
|
||||
{% do SURICATAMERGED.config.update({'af-packet': afpacket}) %}
|
||||
|
||||
@@ -21,14 +21,14 @@ suricata:
|
||||
helpLink: suricata.html
|
||||
pcap:
|
||||
filesize:
|
||||
description: Maximum file size for individual PCAP files written by Suricata. Increasing this number could improve write performance at the expense of pcap retrieval time.
|
||||
description: Max file size for individual PCAP files written by Suricata. Increasing this number could improve write performance at the expense of pcap retrieval times.
|
||||
advanced: True
|
||||
helpLink: suricata.html
|
||||
helplink: suricata.html
|
||||
maxsize:
|
||||
description: Maximum size in GB for total disk usage of all PCAP files written by Suricata.
|
||||
helpLink: suricata.html
|
||||
description: Size in GB for total usage size of PCAP on disk.
|
||||
helplink: suricata.html
|
||||
compression:
|
||||
description: Enable compression of Suricata PCAP files.
|
||||
description: Enable compression of Suricata PCAP.
|
||||
advanced: True
|
||||
helpLink: suricata.html
|
||||
lz4-checksum:
|
||||
@@ -36,11 +36,11 @@ suricata:
|
||||
advanced: True
|
||||
helpLink: suricata.html
|
||||
lz4-level:
|
||||
description: lz4 compression level of PCAP files. Set to 0 for no compression. Set to 16 for maximum compression.
|
||||
description: lz4 compression level of PCAP. 0 for no compression 16 for max compression.
|
||||
advanced: True
|
||||
helpLink: suricata.html
|
||||
filename:
|
||||
description: Filename output for Suricata PCAP files.
|
||||
description: Filename output for Suricata PCAP.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: suricata.html
|
||||
@@ -50,13 +50,13 @@ suricata:
|
||||
readonly: True
|
||||
helpLink: suricata.html
|
||||
use-stream-depth:
|
||||
description: Set to "no" to ignore the stream depth and capture the entire flow. Set to "yes" to truncate the flow based on the stream depth.
|
||||
description: Set to "no" to ignore the stream depth and capture the entire flow. Set this to "yes" to truncate the flow based on the stream depth.
|
||||
advanced: True
|
||||
regex: ^(yes|no)$
|
||||
regexFailureMessage: You must enter either yes or no.
|
||||
helpLink: suricata.html
|
||||
conditional:
|
||||
description: Set to "all" to record PCAP for all flows. Set to "alerts" to only record PCAP for Suricata alerts. Set to "tag" to only record PCAP for tagged rules.
|
||||
description: Set to "all" to capture PCAP for all flows. Set to "alerts" to capture PCAP just for alerts or set to "tag" to capture PCAP for just tagged rules.
|
||||
regex: ^(all|alerts|tag)$
|
||||
regexFailureMessage: You must enter either all, alert or tag.
|
||||
helpLink: suricata.html
|
||||
@@ -94,36 +94,6 @@ suricata:
|
||||
description: Buffer size for packets per thread.
|
||||
forcedType: int
|
||||
helpLink: suricata.html
|
||||
block-size:
|
||||
description: This must be configured to a sufficiently high value to accommodate a significant number of packets, considering byte size and MTU constraints. Ensure it aligns with a power of 2 and is a multiple of the page size.
|
||||
advanced: True
|
||||
forcedType: int
|
||||
helpLink: suricata.html
|
||||
block-timeout:
|
||||
description: If a block remains unfilled after the specified block-timeout milliseconds, it is passed to userspace.
|
||||
advanced: True
|
||||
forcedType: int
|
||||
helpLink: suricata.html
|
||||
use-emergency-flush:
|
||||
description: In high-traffic environments, enabling this option to 'yes' aids in recovering from packet drop occurrences. However, it may lead to some packets, possibly at max ring flush, not being inspected.
|
||||
advanced: True
|
||||
regex: ^(yes|no)$
|
||||
helpLink: suricata.html
|
||||
buffer-size:
|
||||
description: Increasing the value of the receive buffer may improve performance.
|
||||
advanced: True
|
||||
forcedType: int
|
||||
helpLink: suricata.html
|
||||
disable-promisc:
|
||||
description: Promiscuous mode can be disabled by setting this to "yes".
|
||||
advanced: True
|
||||
regex: ^(yes|no)$
|
||||
helpLink: suricata.html
|
||||
checksum-checks:
|
||||
description: "Opt for the checksum verification mode suitable for the interface. During capture, it's possible that some packets may exhibit invalid checksums due to the network card handling the checksum computation. You have several options: 'kernel': Relies on indications sent by the kernel for each packet (default). 'yes': Enforces checksum validation. 'no': Disables checksum validation. 'auto': Suricata employs a statistical approach to detect checksum offloading."
|
||||
advanced: True
|
||||
regex: ^(kernel|yes|no|auto)$
|
||||
helpLink: suricata.html
|
||||
threading:
|
||||
set-cpu-affinity:
|
||||
description: Bind(yes) or unbind(no) management and worker threads to a core or range of cores.
|
||||
|
||||
12
salt/top.sls
12
salt/top.sls
@@ -67,6 +67,7 @@ base:
|
||||
- idstools
|
||||
- suricata.manager
|
||||
- healthcheck
|
||||
- mysql
|
||||
- elasticsearch
|
||||
- elastic-fleet-package-registry
|
||||
- kibana
|
||||
@@ -77,6 +78,8 @@ base:
|
||||
- curator.disabled
|
||||
- elastalert
|
||||
- utility
|
||||
- soctopus
|
||||
- playbook
|
||||
- elasticfleet
|
||||
|
||||
'*_manager and G@saltversion:{{saltversion}}':
|
||||
@@ -96,6 +99,7 @@ base:
|
||||
- backup.config_backup
|
||||
- idstools
|
||||
- suricata.manager
|
||||
- mysql
|
||||
- elasticsearch
|
||||
- logstash
|
||||
- redis
|
||||
@@ -104,6 +108,8 @@ base:
|
||||
- curator.disabled
|
||||
- elastalert
|
||||
- utility
|
||||
- soctopus
|
||||
- playbook
|
||||
- elasticfleet
|
||||
- stig
|
||||
|
||||
@@ -126,6 +132,7 @@ base:
|
||||
- idstools
|
||||
- suricata.manager
|
||||
- healthcheck
|
||||
- mysql
|
||||
- elasticsearch
|
||||
- logstash
|
||||
- redis
|
||||
@@ -138,6 +145,8 @@ base:
|
||||
- curator.disabled
|
||||
- elastalert
|
||||
- utility
|
||||
- soctopus
|
||||
- playbook
|
||||
- elasticfleet
|
||||
- stig
|
||||
|
||||
@@ -170,6 +179,7 @@ base:
|
||||
- backup.config_backup
|
||||
- idstools
|
||||
- suricata.manager
|
||||
- mysql
|
||||
- elasticsearch
|
||||
- logstash
|
||||
- redis
|
||||
@@ -178,6 +188,8 @@ base:
|
||||
- kibana
|
||||
- elastalert
|
||||
- utility
|
||||
- soctopus
|
||||
- playbook
|
||||
- elasticfleet
|
||||
- stig
|
||||
|
||||
|
||||
@@ -792,6 +792,7 @@ create_manager_pillars() {
|
||||
create_global
|
||||
create_sensoroni_pillar
|
||||
backup_pillar
|
||||
soctopus_pillar
|
||||
docker_pillar
|
||||
redis_pillar
|
||||
idstools_pillar
|
||||
@@ -1108,6 +1109,10 @@ generate_ssl() {
|
||||
|
||||
generate_passwords(){
|
||||
title "Generate Random Passwords"
|
||||
MYSQLPASS=$(get_random_value)
|
||||
PLAYBOOKDBPASS=$(get_random_value)
|
||||
PLAYBOOKADMINPASS=$(get_random_value)
|
||||
PLAYBOOKAUTOMATIONPASS=$(get_random_value)
|
||||
INFLUXPASS=$(get_random_value)
|
||||
INFLUXTOKEN=$(head -c 64 /dev/urandom | base64 --wrap=0)
|
||||
SENSORONIKEY=$(get_random_value)
|
||||
@@ -1162,6 +1167,11 @@ install_cleanup() {
|
||||
# that will disrupt automated tests should be placed beneath this statement.
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
# If Mysql is running stop it
|
||||
if docker ps --format "{{.Names}}" 2>&1 | grep -q "so-mysql"; then
|
||||
logVmd "/usr/sbin/so-mysql-stop"
|
||||
fi
|
||||
|
||||
if [[ $setup_type == 'iso' ]]; then
|
||||
info "Removing so-setup permission entry from sudoers file"
|
||||
logCmd "sed -i '/so-setup/d' /etc/sudoers"
|
||||
@@ -1269,11 +1279,17 @@ telegraf_pillar() {
|
||||
manager_pillar() {
|
||||
touch $adv_manager_pillar_file
|
||||
title "Create the manager pillar"
|
||||
if [[ $is_import ]]; then
|
||||
PLAYBOOK=0
|
||||
else
|
||||
PLAYBOOK=1
|
||||
fi
|
||||
printf '%s\n'\
|
||||
"manager:"\
|
||||
" proxy: '$so_proxy'"\
|
||||
" no_proxy: '$no_proxy_string'"\
|
||||
" elastalert: 1"\
|
||||
" playbook: $PLAYBOOK"\
|
||||
"" > "$manager_pillar_file"
|
||||
}
|
||||
|
||||
@@ -1346,6 +1362,16 @@ backup_pillar() {
|
||||
touch $adv_backup_pillar_file
|
||||
}
|
||||
|
||||
soctopus_pillar() {
|
||||
title "Create the soctopus pillar file"
|
||||
touch $adv_soctopus_pillar_file
|
||||
printf '%s\n'\
|
||||
"soctopus:"\
|
||||
" playbook:"\
|
||||
" rulesets:"\
|
||||
" - windows" > "$soctopus_pillar_file"
|
||||
}
|
||||
|
||||
docker_pillar() {
|
||||
title "Create the docker pillar file"
|
||||
touch $adv_docker_pillar_file
|
||||
@@ -1387,7 +1413,7 @@ make_some_dirs() {
|
||||
mkdir -p $local_salt_dir/salt/firewall/portgroups
|
||||
mkdir -p $local_salt_dir/salt/firewall/ports
|
||||
|
||||
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do
|
||||
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc soctopus docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do
|
||||
mkdir -p $local_salt_dir/pillar/$THEDIR
|
||||
touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls
|
||||
touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls
|
||||
@@ -1923,6 +1949,7 @@ saltify() {
|
||||
|
||||
salt_install_module_deps() {
|
||||
logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/"
|
||||
logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/"
|
||||
}
|
||||
|
||||
salt_patch_x509_v2() {
|
||||
@@ -1940,6 +1967,11 @@ secrets_pillar(){
|
||||
mkdir -p $local_salt_dir/pillar
|
||||
printf '%s\n'\
|
||||
"secrets:"\
|
||||
" mysql: $MYSQLPASS"\
|
||||
" playbook_db: $PLAYBOOKDBPASS"\
|
||||
" playbook_admin: $PLAYBOOKADMINPASS"\
|
||||
" playbook_automation: $PLAYBOOKAUTOMATIONPASS"\
|
||||
" playbook_automation_api_key: "\
|
||||
" import_pass: $IMPORTPASS"\
|
||||
" influx_pass: $INFLUXPASS" > $local_salt_dir/pillar/secrets.sls
|
||||
fi
|
||||
|
||||
@@ -775,6 +775,10 @@ if ! [[ -f $install_opt_file ]]; then
|
||||
error "Failed to run so-elastic-fleet-setup"
|
||||
fail_setup
|
||||
fi
|
||||
if [[ ! $is_import ]]; then
|
||||
title "Setting up Playbook"
|
||||
logCmd "so-playbook-reset"
|
||||
fi
|
||||
checkin_at_boot
|
||||
set_initial_firewall_access
|
||||
logCmd "salt-call schedule.enable -linfo --local"
|
||||
|
||||
@@ -112,6 +112,12 @@ export sensoroni_pillar_file
|
||||
adv_sensoroni_pillar_file="$local_salt_dir/pillar/sensoroni/adv_sensoroni.sls"
|
||||
export adv_sensoroni_pillar_file
|
||||
|
||||
soctopus_pillar_file="$local_salt_dir/pillar/soctopus/soc_soctopus.sls"
|
||||
export soctopus_pillar_file
|
||||
|
||||
adv_soctopus_pillar_file="$local_salt_dir/pillar/soctopus/adv_soctopus.sls"
|
||||
export adv_soctopus_pillar_file
|
||||
|
||||
docker_pillar_file="$local_salt_dir/pillar/docker/soc_docker.sls"
|
||||
export docker_pillar
|
||||
|
||||
|
||||
@@ -288,9 +288,9 @@ whiptail_dhcp_or_static() {
|
||||
[ -n "$TESTING" ] && return
|
||||
|
||||
address_type=$(whiptail --title "$whiptail_title" --menu \
|
||||
"Choose how to set up your management interface. We recommend using a static IP address." 20 78 4 \
|
||||
"STATIC" "Set a static IPv4 address (recommended)" \
|
||||
"DHCP" "Use DHCP to configure the management interface" 3>&1 1>&2 2>&3 )
|
||||
"Choose how to set up your management interface:" 20 78 4 \
|
||||
"STATIC" "Set a static IPv4 address" \
|
||||
"DHCP" "Use DHCP to configure the Management Interface" 3>&1 1>&2 2>&3 )
|
||||
local exitstatus=$?
|
||||
whiptail_check_exitstatus $exitstatus
|
||||
|
||||
|
||||
Reference in New Issue
Block a user