mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-01-23 08:31:30 +01:00
Compare commits
238 Commits
2.4.60-202
...
kaffytaffy
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d91dd0dd3c | ||
|
|
a0388fd568 | ||
|
|
05244cfd75 | ||
|
|
6c5e0579cf | ||
|
|
1f6eb9cdc3 | ||
|
|
610dd2c08d | ||
|
|
506bbd314d | ||
|
|
4caa6a10b5 | ||
|
|
4b79623ce3 | ||
|
|
c4994a208b | ||
|
|
bb983d4ba2 | ||
|
|
c014508519 | ||
|
|
fcfbb1e857 | ||
|
|
911ee579a9 | ||
|
|
a6ff92b099 | ||
|
|
d73ba7dd3e | ||
|
|
04ddcd5c93 | ||
|
|
af29ae1968 | ||
|
|
fbd3cff90d | ||
|
|
0ed9894b7e | ||
|
|
a54a72c269 | ||
|
|
f514e5e9bb | ||
|
|
3955587372 | ||
|
|
6b28dc72e8 | ||
|
|
ca7253a589 | ||
|
|
af53dcda1b | ||
|
|
d3bd56b131 | ||
|
|
e9e61ea2d8 | ||
|
|
86b984001d | ||
|
|
fa7f8104c8 | ||
|
|
bd5fe43285 | ||
|
|
d38051e806 | ||
|
|
daa5342986 | ||
|
|
c48436ccbf | ||
|
|
7aa00faa6c | ||
|
|
6217a7b9a9 | ||
|
|
d67ebabc95 | ||
|
|
b9474b9352 | ||
|
|
376efab40c | ||
|
|
65274e89d7 | ||
|
|
acf29a6c9c | ||
|
|
721e04f793 | ||
|
|
00cea6fb80 | ||
|
|
433309ef1a | ||
|
|
cbc95d0b30 | ||
|
|
21f86be8ee | ||
|
|
8e38c3763e | ||
|
|
ca807bd6bd | ||
|
|
735cfb4c29 | ||
|
|
6202090836 | ||
|
|
436cbc1f06 | ||
|
|
40b08d737c | ||
|
|
4c5b42b898 | ||
|
|
7a6b72ebac | ||
|
|
f72cbd5f23 | ||
|
|
1d7e47f589 | ||
|
|
49d5fa95a2 | ||
|
|
204f44449a | ||
|
|
6046848ee7 | ||
|
|
b0aee238b1 | ||
|
|
d8ac3f1292 | ||
|
|
8788b34c8a | ||
|
|
784ec54795 | ||
|
|
54fce4bf8f | ||
|
|
c4ebe25bab | ||
|
|
7b4e207329 | ||
|
|
5ec3b834fb | ||
|
|
7668fa1396 | ||
|
|
470b0e4bf6 | ||
|
|
d3f163bf9e | ||
|
|
4b31632dfc | ||
|
|
c2f7f7e3a5 | ||
|
|
07cb0c7d46 | ||
|
|
14c824143b | ||
|
|
c75c411426 | ||
|
|
a7fab380b4 | ||
|
|
a9517e1291 | ||
|
|
1017838cfc | ||
|
|
1d221a574b | ||
|
|
a35bfc4822 | ||
|
|
7c64fc8c05 | ||
|
|
f66cca96ce | ||
|
|
12da7db22c | ||
|
|
1b8584d4bb | ||
|
|
9c59f42c16 | ||
|
|
fb5eea8284 | ||
|
|
9db9af27ae | ||
|
|
0f50a265cf | ||
|
|
3e05c04aa1 | ||
|
|
8f8896c505 | ||
|
|
941a841da0 | ||
|
|
13105c4ab3 | ||
|
|
dc27bbb01d | ||
|
|
2b8a051525 | ||
|
|
1c7cc8dd3b | ||
|
|
58d081eed1 | ||
|
|
9078b2bad2 | ||
|
|
8889c974b8 | ||
|
|
f615a73120 | ||
|
|
66844af1c2 | ||
|
|
a0b7d89eb6 | ||
|
|
c31e459c2b | ||
|
|
b863060df1 | ||
|
|
d96d696c35 | ||
|
|
105eadf111 | ||
|
|
ca57c20691 | ||
|
|
c4767bfdc8 | ||
|
|
0de1f76139 | ||
|
|
5f4a0fdfad | ||
|
|
18f95e867f | ||
|
|
ed6137a76a | ||
|
|
c3f02a698e | ||
|
|
db106f8ca1 | ||
|
|
c712529cf6 | ||
|
|
976ddd3982 | ||
|
|
64748b98ad | ||
|
|
3335612365 | ||
|
|
513273c8c3 | ||
|
|
0dfde3c9f2 | ||
|
|
0efdcfcb52 | ||
|
|
fbdcc53fe0 | ||
|
|
8e47cc73a5 | ||
|
|
639bf05081 | ||
|
|
c1b5ef0891 | ||
|
|
a8f25150f6 | ||
|
|
1ee2a6d37b | ||
|
|
f64d9224fb | ||
|
|
4e142e0212 | ||
|
|
c9bf1c86c6 | ||
|
|
82830c8173 | ||
|
|
7f5741c43b | ||
|
|
643d4831c1 | ||
|
|
b032eed22a | ||
|
|
1b49c8540e | ||
|
|
f7534a0ae3 | ||
|
|
b6187ab769 | ||
|
|
780ad9eb10 | ||
|
|
283939b18a | ||
|
|
e25bc8efe4 | ||
|
|
3b112e20e3 | ||
|
|
26abe90671 | ||
|
|
23a6c4adb6 | ||
|
|
2f03cbf115 | ||
|
|
a678a5a416 | ||
|
|
b2b54ccf60 | ||
|
|
55e71c867c | ||
|
|
6c2437f8ef | ||
|
|
261f2cbaf7 | ||
|
|
f083558666 | ||
|
|
505eeea66a | ||
|
|
1001aa665d | ||
|
|
7f488422b0 | ||
|
|
f17d8d3369 | ||
|
|
ff777560ac | ||
|
|
2c68fd6311 | ||
|
|
c1bf710e46 | ||
|
|
9d2b40f366 | ||
|
|
3aea2dec85 | ||
|
|
65f6b7022c | ||
|
|
e5a3a54aea | ||
|
|
be88dbe181 | ||
|
|
b64ed5535e | ||
|
|
5be56703e9 | ||
|
|
0c7ba62867 | ||
|
|
d9d851040c | ||
|
|
e747a4e3fe | ||
|
|
cc2164221c | ||
|
|
102c3271d1 | ||
|
|
32b8649c77 | ||
|
|
9c5ba92589 | ||
|
|
d2c9e0ea4a | ||
|
|
2928b71616 | ||
|
|
216b8c01bf | ||
|
|
ce0c9f846d | ||
|
|
ba262ee01a | ||
|
|
b571eeb8e6 | ||
|
|
7fe377f899 | ||
|
|
d57f773072 | ||
|
|
389357ad2b | ||
|
|
e2caf4668e | ||
|
|
63a58efba4 | ||
|
|
bbcd3116f7 | ||
|
|
9c12aa261e | ||
|
|
cc0f4847ba | ||
|
|
923b80ba60 | ||
|
|
7c4ea8a58e | ||
|
|
20bd9a9701 | ||
|
|
f0cb30a649 | ||
|
|
94ee761207 | ||
|
|
0a5dc411d0 | ||
|
|
d7ecad4333 | ||
|
|
49fa800b2b | ||
|
|
446f1ffdf5 | ||
|
|
57553bc1e5 | ||
|
|
df058b3f4a | ||
|
|
5e21da443f | ||
|
|
7898277a9b | ||
|
|
029d8a0e8f | ||
|
|
b8d33ab983 | ||
|
|
e124791d5d | ||
|
|
8ae30d0a77 | ||
|
|
81f3d69eb9 | ||
|
|
237946e916 | ||
|
|
3d04d37030 | ||
|
|
bb0da2a5c5 | ||
|
|
d6ce3851ec | ||
|
|
9c6f3f4808 | ||
|
|
1ab56033a2 | ||
|
|
a78a304d4f | ||
|
|
5ca9ec4b17 | ||
|
|
4e1543b6a8 | ||
|
|
0e7d08b957 | ||
|
|
f889a089bf | ||
|
|
2b019ec8fe | ||
|
|
5934829e0d | ||
|
|
486a633dfe | ||
|
|
77ac342786 | ||
|
|
8429a364dc | ||
|
|
1568f57096 | ||
|
|
f431e9ae08 | ||
|
|
4b03d088c3 | ||
|
|
4a33234c34 | ||
|
|
778997bed4 | ||
|
|
655d3e349c | ||
|
|
f3b921342e | ||
|
|
fff4d20e39 | ||
|
|
d2fb067110 | ||
|
|
876690a9f6 | ||
|
|
4c2f2759d4 | ||
|
|
dd603934bc | ||
|
|
d4d17e1835 | ||
|
|
d84af803a6 | ||
|
|
020eb47026 | ||
|
|
c6df805556 | ||
|
|
47d447eadd | ||
|
|
af5b3feb96 | ||
|
|
8cf29682bb | ||
|
|
86dc7cc804 |
32
.github/workflows/close-threads.yml
vendored
Normal file
32
.github/workflows/close-threads.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: 'Close Threads'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '50 1 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
discussions: write
|
||||
|
||||
concurrency:
|
||||
group: lock-threads
|
||||
|
||||
jobs:
|
||||
close-threads:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
days-before-issue-stale: -1
|
||||
days-before-issue-close: 60
|
||||
stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution."
|
||||
close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available."
|
||||
days-before-pr-stale: 45
|
||||
days-before-pr-close: 60
|
||||
stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes."
|
||||
close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked."
|
||||
19
.github/workflows/lock-threads.yml
vendored
19
.github/workflows/lock-threads.yml
vendored
@@ -2,7 +2,7 @@ name: 'Lock Threads'
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '50 1 * * *'
|
||||
- cron: '50 2 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
@@ -14,23 +14,6 @@ concurrency:
|
||||
group: lock-threads
|
||||
|
||||
jobs:
|
||||
close-threads:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: write
|
||||
steps:
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
days-before-issue-stale: -1
|
||||
days-before-issue-close: 60
|
||||
stale-issue-message: "This issue is stale because it has been inactive for an extended period. Stale issues convey that the issue, while important to someone, is not critical enough for the author, or other community members to work on, sponsor, or otherwise shepherd the issue through to a resolution."
|
||||
close-issue-message: "This issue was closed because it has been stale for an extended period. It will be automatically locked in 30 days, after which no further commenting will be available."
|
||||
days-before-pr-stale: 45
|
||||
days-before-pr-close: 60
|
||||
stale-pr-message: "This PR is stale because it has been inactive for an extended period. The longer a PR remains stale the more out of date with the main branch it becomes."
|
||||
close-pr-message: "This PR was closed because it has been stale for an extended period. It will be automatically locked in 30 days. If there is still a commitment to finishing this PR re-open it before it is locked."
|
||||
|
||||
lock-threads:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
@@ -19,4 +19,4 @@ role:
|
||||
receiver:
|
||||
standalone:
|
||||
searchnode:
|
||||
sensor:
|
||||
sensor:
|
||||
30
pillar/kafka/nodes.sls
Normal file
30
pillar/kafka/nodes.sls
Normal file
@@ -0,0 +1,30 @@
|
||||
{% set current_kafkanodes = salt.saltutil.runner('mine.get', tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver', fun='network.ip_addrs', tgt_type='compound') %}
|
||||
{% set pillar_kafkanodes = salt['pillar.get']('kafka:nodes', default={}, merge=True) %}
|
||||
|
||||
{% set existing_ids = [] %}
|
||||
{% for node in pillar_kafkanodes.values() %}
|
||||
{% if node.get('id') %}
|
||||
{% do existing_ids.append(node['nodeid']) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% set all_possible_ids = range(1, 256)|list %}
|
||||
|
||||
{% set available_ids = [] %}
|
||||
{% for id in all_possible_ids %}
|
||||
{% if id not in existing_ids %}
|
||||
{% do available_ids.append(id) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% set final_nodes = pillar_kafkanodes.copy() %}
|
||||
|
||||
{% for minionid, ip in current_kafkanodes.items() %}
|
||||
{% set hostname = minionid.split('_')[0] %}
|
||||
{% if hostname not in final_nodes %}
|
||||
{% set new_id = available_ids.pop(0) %}
|
||||
{% do final_nodes.update({hostname: {'nodeid': new_id, 'ip': ip[0]}}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
kafka:
|
||||
nodes: {{ final_nodes|tojson }}
|
||||
@@ -43,8 +43,6 @@ base:
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- kratos.soc_kratos
|
||||
@@ -61,10 +59,11 @@ base:
|
||||
- elastalert.adv_elastalert
|
||||
- backup.soc_backup
|
||||
- backup.adv_backup
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
- stig.soc_stig
|
||||
|
||||
'*_sensor':
|
||||
@@ -108,8 +107,6 @@ base:
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
@@ -165,8 +162,6 @@ base:
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
@@ -184,6 +179,9 @@ base:
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
|
||||
'*_heavynode':
|
||||
- elasticsearch.auth
|
||||
@@ -240,6 +238,9 @@ base:
|
||||
- redis.adv_redis
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
|
||||
'*_import':
|
||||
- secrets
|
||||
@@ -262,8 +263,6 @@ base:
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- backup.soc_backup
|
||||
|
||||
@@ -34,7 +34,6 @@
|
||||
'suricata',
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
@@ -101,9 +100,9 @@
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-managersearch': [
|
||||
'salt.master',
|
||||
@@ -123,9 +122,9 @@
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-searchnode': [
|
||||
'ssl',
|
||||
@@ -157,10 +156,10 @@
|
||||
'healthcheck',
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-sensor': [
|
||||
'ssl',
|
||||
@@ -191,7 +190,9 @@
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'kafka',
|
||||
'elasticsearch.ca'
|
||||
],
|
||||
'so-desktop': [
|
||||
'ssl',
|
||||
@@ -200,10 +201,6 @@
|
||||
],
|
||||
}, grain='role') %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
{% do allowed_states.append('mysql') %}
|
||||
{% endif %}
|
||||
|
||||
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('zeek') %}
|
||||
{%- endif %}
|
||||
@@ -229,10 +226,6 @@
|
||||
{% do allowed_states.append('elastalert') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% do allowed_states.append('playbook') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% do allowed_states.append('logstash') %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
bpf:
|
||||
pcap:
|
||||
description: List of BPF filters to apply to PCAP.
|
||||
description: List of BPF filters to apply to Stenographer.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
|
||||
@@ -70,3 +70,17 @@ x509_signing_policies:
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
kafka:
|
||||
- minions: '*'
|
||||
- signing_private_key: /etc/pki/ca.key
|
||||
- signing_cert: /etc/pki/ca.crt
|
||||
- C: US
|
||||
- ST: Utah
|
||||
- L: Salt Lake City
|
||||
- basicConstraints: "critical CA:false"
|
||||
- keyUsage: "digitalSignature, keyEncipherment"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- extendedKeyUsage: "serverAuth, clientAuth"
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
|
||||
{% if SOC_GLOBAL.global.airgap %}
|
||||
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
|
||||
{% else %}
|
||||
{% set UPDATE_DIR='/tmp/sogh/securityonion' %}
|
||||
{% endif %}
|
||||
{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %}
|
||||
|
||||
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
|
||||
{% if SOC_GLOBAL.global.airgap %}
|
||||
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
|
||||
{% else %}
|
||||
{% set UPDATE_DIR='/tmp/sogh/securityonion' %}
|
||||
{% endif %}
|
||||
|
||||
remove_common_soup:
|
||||
file.absent:
|
||||
@@ -68,3 +70,19 @@ copy_so-firewall_sbin:
|
||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-firewall
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_so-yaml_sbin:
|
||||
file.copy:
|
||||
- name: /usr/sbin/so-yaml.py
|
||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-yaml.py
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
{% else %}
|
||||
fix_23_soup_sbin:
|
||||
cmd.run:
|
||||
- name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
|
||||
fix_23_soup_salt:
|
||||
cmd.run:
|
||||
- name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
|
||||
{% endif %}
|
||||
|
||||
@@ -248,6 +248,14 @@ get_random_value() {
|
||||
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
|
||||
}
|
||||
|
||||
get_agent_count() {
|
||||
if [ -f /opt/so/log/agents/agentstatus.log ]; then
|
||||
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
|
||||
else
|
||||
AGENTCOUNT=0
|
||||
fi
|
||||
}
|
||||
|
||||
gpg_rpm_import() {
|
||||
if [[ $is_oracle ]]; then
|
||||
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
|
||||
@@ -329,7 +337,7 @@ lookup_salt_value() {
|
||||
local=""
|
||||
fi
|
||||
|
||||
salt-call --no-color ${kind}.get ${group}${key} --out=${output} ${local}
|
||||
salt-call -lerror --no-color ${kind}.get ${group}${key} --out=${output} ${local}
|
||||
}
|
||||
|
||||
lookup_pillar() {
|
||||
@@ -570,8 +578,9 @@ sync_options() {
|
||||
set_version
|
||||
set_os
|
||||
salt_minion_count
|
||||
get_agent_count
|
||||
|
||||
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT/$(read_feat)"
|
||||
echo "$VERSION/$OS/$(uname -r)/$MINIONCOUNT:$AGENTCOUNT/$(read_feat)"
|
||||
}
|
||||
|
||||
systemctl_func() {
|
||||
|
||||
@@ -50,16 +50,14 @@ container_list() {
|
||||
"so-idh"
|
||||
"so-idstools"
|
||||
"so-influxdb"
|
||||
"so-kafka"
|
||||
"so-kibana"
|
||||
"so-kratos"
|
||||
"so-logstash"
|
||||
"so-mysql"
|
||||
"so-nginx"
|
||||
"so-pcaptools"
|
||||
"so-playbook"
|
||||
"so-redis"
|
||||
"so-soc"
|
||||
"so-soctopus"
|
||||
"so-steno"
|
||||
"so-strelka-backend"
|
||||
"so-strelka-filestream"
|
||||
@@ -67,7 +65,7 @@ container_list() {
|
||||
"so-strelka-manager"
|
||||
"so-suricata"
|
||||
"so-telegraf"
|
||||
"so-zeek"
|
||||
"so-zeek"
|
||||
)
|
||||
else
|
||||
TRUSTED_CONTAINERS=(
|
||||
|
||||
@@ -49,10 +49,6 @@ if [ "$CONTINUE" == "y" ]; then
|
||||
sed -i "s|$OLD_IP|$NEW_IP|g" $file
|
||||
done
|
||||
|
||||
echo "Granting MySQL root user permissions on $NEW_IP"
|
||||
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null
|
||||
echo "Removing MySQL root user from $OLD_IP"
|
||||
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null
|
||||
echo "Updating Kibana dashboards"
|
||||
salt-call state.apply kibana.so_savedobjects_defaults -l info queue=True
|
||||
|
||||
|
||||
@@ -122,6 +122,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to get license information" # Logstash trying to contact ES before it's ready
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
@@ -154,15 +155,11 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
|
||||
@@ -201,6 +198,8 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log"
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unknown column" # Elastalert errors from running EQL queries
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parsing_exception" # Elastalert EQL parsing issue. Temp.
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded"
|
||||
fi
|
||||
|
||||
@@ -210,7 +209,9 @@ RESULT=0
|
||||
CONTAINER_IDS=$(docker ps -q)
|
||||
exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary
|
||||
exclude_container so-idstools # ignore due to known issues and noisy logging
|
||||
exclude_container so-playbook # ignore due to several playbook known issues
|
||||
exclude_container so-playbook # Playbook is removed as of 2.4.70, disregard output in stopped containers
|
||||
exclude_container so-mysql # MySQL is removed as of 2.4.70, disregard output in stopped containers
|
||||
exclude_container so-soctopus # Soctopus is removed as of 2.4.70, disregard output in stopped containers
|
||||
|
||||
for container_id in $CONTAINER_IDS; do
|
||||
container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names")
|
||||
@@ -228,10 +229,13 @@ exclude_log "kibana.log" # kibana error logs are too verbose with large variet
|
||||
exclude_log "spool" # disregard zeek analyze logs as this is data specific
|
||||
exclude_log "import" # disregard imported test data the contains error strings
|
||||
exclude_log "update.log" # ignore playbook updates due to several known issues
|
||||
exclude_log "playbook.log" # ignore due to several playbook known issues
|
||||
exclude_log "cron-cluster-delete.log" # ignore since Curator has been removed
|
||||
exclude_log "cron-close.log" # ignore since Curator has been removed
|
||||
exclude_log "curator.log" # ignore since Curator has been removed
|
||||
exclude_log "curator.log" # ignore since Curator has been removed
|
||||
exclude_log "playbook.log" # Playbook is removed as of 2.4.70, logs may still be on disk
|
||||
exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on disk
|
||||
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
|
||||
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
|
||||
|
||||
for log_file in $(cat /tmp/log_check_files); do
|
||||
status "Checking log file $log_file"
|
||||
|
||||
@@ -67,13 +67,6 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-mysql':
|
||||
final_octet: 30
|
||||
port_bindings:
|
||||
- 0.0.0.0:3306:3306
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-nginx':
|
||||
final_octet: 31
|
||||
port_bindings:
|
||||
@@ -91,13 +84,6 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-playbook':
|
||||
final_octet: 32
|
||||
port_bindings:
|
||||
- 0.0.0.0:3000:3000
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-redis':
|
||||
final_octet: 33
|
||||
port_bindings:
|
||||
@@ -118,13 +104,6 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-soctopus':
|
||||
final_octet: 35
|
||||
port_bindings:
|
||||
- 0.0.0.0:7000:7000
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-strelka-backend':
|
||||
final_octet: 36
|
||||
custom_bind_mounts: []
|
||||
@@ -206,3 +185,11 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-kafka':
|
||||
final_octet: 88
|
||||
port_bindings:
|
||||
- 0.0.0.0:9092:9092
|
||||
- 0.0.0.0:9093:9093
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
|
||||
@@ -46,14 +46,11 @@ docker:
|
||||
so-kibana: *dockerOptions
|
||||
so-kratos: *dockerOptions
|
||||
so-logstash: *dockerOptions
|
||||
so-mysql: *dockerOptions
|
||||
so-nginx: *dockerOptions
|
||||
so-nginx-fleet-node: *dockerOptions
|
||||
so-playbook: *dockerOptions
|
||||
so-redis: *dockerOptions
|
||||
so-sensoroni: *dockerOptions
|
||||
so-soc: *dockerOptions
|
||||
so-soctopus: *dockerOptions
|
||||
so-strelka-backend: *dockerOptions
|
||||
so-strelka-filestream: *dockerOptions
|
||||
so-strelka-frontend: *dockerOptions
|
||||
@@ -68,3 +65,4 @@ docker:
|
||||
so-steno: *dockerOptions
|
||||
so-suricata: *dockerOptions
|
||||
so-zeek: *dockerOptions
|
||||
so-kafka: *dockerOptions
|
||||
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "winlog",
|
||||
"version": ""
|
||||
},
|
||||
"name": "windows-defender",
|
||||
"namespace": "default",
|
||||
"description": "Windows Defender - Operational logs",
|
||||
"policy_id": "endpoints-initial",
|
||||
"inputs": {
|
||||
"winlogs-winlog": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"winlog.winlog": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"channel": "Microsoft-Windows-Windows Defender/Operational",
|
||||
"data_stream.dataset": "winlog.winlog",
|
||||
"preserve_original_event": false,
|
||||
"providers": [],
|
||||
"ignore_older": "72h",
|
||||
"language": 0,
|
||||
"tags": [] }
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
@@ -16,6 +16,9 @@
|
||||
"paths": [
|
||||
"/var/log/auth.log*",
|
||||
"/var/log/secure*"
|
||||
],
|
||||
"tags": [
|
||||
"so-grid-node"
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -25,6 +28,9 @@
|
||||
"paths": [
|
||||
"/var/log/messages*",
|
||||
"/var/log/syslog*"
|
||||
],
|
||||
"tags": [
|
||||
"so-grid-node"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,9 @@
|
||||
"paths": [
|
||||
"/var/log/auth.log*",
|
||||
"/var/log/secure*"
|
||||
],
|
||||
"tags": [
|
||||
"so-grid-node"
|
||||
]
|
||||
}
|
||||
},
|
||||
@@ -25,6 +28,9 @@
|
||||
"paths": [
|
||||
"/var/log/messages*",
|
||||
"/var/log/syslog*"
|
||||
],
|
||||
"tags": [
|
||||
"so-grid-node"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA
|
||||
|
||||
@@ -227,6 +227,113 @@ elasticsearch:
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
so-logs-soc:
|
||||
close: 30
|
||||
delete: 365
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- agent-mappings
|
||||
- dtc-agent-mappings
|
||||
- base-mappings
|
||||
- dtc-base-mappings
|
||||
- client-mappings
|
||||
- dtc-client-mappings
|
||||
- container-mappings
|
||||
- destination-mappings
|
||||
- dtc-destination-mappings
|
||||
- pb-override-destination-mappings
|
||||
- dll-mappings
|
||||
- dns-mappings
|
||||
- dtc-dns-mappings
|
||||
- ecs-mappings
|
||||
- dtc-ecs-mappings
|
||||
- error-mappings
|
||||
- event-mappings
|
||||
- dtc-event-mappings
|
||||
- file-mappings
|
||||
- dtc-file-mappings
|
||||
- group-mappings
|
||||
- host-mappings
|
||||
- dtc-host-mappings
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
- dtc-observer-mappings
|
||||
- organization-mappings
|
||||
- package-mappings
|
||||
- process-mappings
|
||||
- dtc-process-mappings
|
||||
- related-mappings
|
||||
- rule-mappings
|
||||
- dtc-rule-mappings
|
||||
- server-mappings
|
||||
- service-mappings
|
||||
- dtc-service-mappings
|
||||
- source-mappings
|
||||
- dtc-source-mappings
|
||||
- pb-override-source-mappings
|
||||
- threat-mappings
|
||||
- tls-mappings
|
||||
- url-mappings
|
||||
- user_agent-mappings
|
||||
- dtc-user_agent-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
data_stream: {}
|
||||
index_patterns:
|
||||
- logs-soc-so*
|
||||
priority: 500
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
dynamic_templates:
|
||||
- strings_as_keyword:
|
||||
mapping:
|
||||
ignore_above: 1024
|
||||
type: keyword
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-soc-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 30s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 30d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-common:
|
||||
close: 30
|
||||
delete: 365
|
||||
@@ -2295,6 +2402,50 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-cef_x_log:
|
||||
index_sorting: False
|
||||
index_template:
|
||||
index_patterns:
|
||||
- "logs-cef.log-*"
|
||||
template:
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-logs-cef.log-logs
|
||||
number_of_replicas: 0
|
||||
composed_of:
|
||||
- "logs-cef.log@package"
|
||||
- "logs-cef.log@custom"
|
||||
- "so-fleet_globals-1"
|
||||
- "so-fleet_agent_id_verification-1"
|
||||
priority: 501
|
||||
data_stream:
|
||||
hidden: false
|
||||
allow_custom_routing: false
|
||||
policy:
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 30d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-checkpoint_x_firewall:
|
||||
index_sorting: False
|
||||
index_template:
|
||||
|
||||
@@ -57,10 +57,11 @@
|
||||
{ "convert": { "field": "log.id.uid", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "convert": { "field": "agent.id", "type": "string", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "convert": { "field": "event.severity", "type": "integer", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" }},
|
||||
{ "set": { "field": "event.dataset", "ignore_empty_value":true, "copy_from": "event.dataset_temp" } },
|
||||
{ "set": { "if": "ctx.event?.dataset != null && !ctx.event.dataset.contains('.')", "field": "event.dataset", "value": "{{event.module}}.{{event.dataset}}" } },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" }},
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
|
||||
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
{%- endraw %}
|
||||
{%- if HIGHLANDER %}
|
||||
|
||||
@@ -68,7 +68,7 @@
|
||||
"field": "_security",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
},
|
||||
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
|
||||
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
|
||||
@@ -83,6 +83,7 @@
|
||||
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
|
||||
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
],
|
||||
"on_failure": [
|
||||
|
||||
389
salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0
Normal file
389
salt/elasticsearch/files/ingest/logs-pfsense.log-1.16.0
Normal file
@@ -0,0 +1,389 @@
|
||||
{
|
||||
"description": "Pipeline for pfSense",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "ecs.version",
|
||||
"value": "8.10.0"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "observer.vendor",
|
||||
"value": "netgate"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "observer.type",
|
||||
"value": "firewall"
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message",
|
||||
"target_field": "event.original"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.kind",
|
||||
"value": "event"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.timezone",
|
||||
"value": "{{_tmp.tz_offset}}",
|
||||
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"description": "Parse syslog header",
|
||||
"field": "event.original",
|
||||
"patterns": [
|
||||
"^(%{ECS_SYSLOG_PRI})?%{TIMESTAMP} %{GREEDYDATA:message}"
|
||||
],
|
||||
"pattern_definitions": {
|
||||
"ECS_SYSLOG_PRI": "<%{NONNEGINT:log.syslog.priority:long}>(\\d )?",
|
||||
"TIMESTAMP": "(?:%{BSD_TIMESTAMP_FORMAT}|%{SYSLOG_TIMESTAMP_FORMAT})",
|
||||
"BSD_TIMESTAMP_FORMAT": "%{SYSLOGTIMESTAMP:_tmp.timestamp}(%{SPACE}%{BSD_PROCNAME}|%{SPACE}%{OBSERVER}%{SPACE}%{BSD_PROCNAME})(\\[%{POSINT:process.pid:long}\\])?:",
|
||||
"BSD_PROCNAME": "(?:\\b%{NAME:process.name}|\\(%{NAME:process.name}\\))",
|
||||
"NAME": "[[[:alnum:]]_-]+",
|
||||
"SYSLOG_TIMESTAMP_FORMAT": "%{TIMESTAMP_ISO8601:_tmp.timestamp8601}%{SPACE}%{OBSERVER}%{SPACE}%{PROCESS}%{SPACE}(%{POSINT:process.pid:long}|-) - (-|%{META})",
|
||||
"TIMESTAMP_ISO8601": "%{YEAR}-%{MONTHNUM}-%{MONTHDAY}[T ]%{HOUR}:?%{MINUTE}(?::?%{SECOND})?%{ISO8601_TIMEZONE:event.timezone}?",
|
||||
"OBSERVER": "(?:%{IP:observer.ip}|%{HOSTNAME:observer.name})",
|
||||
"PROCESS": "(\\(%{DATA:process.name}\\)|(?:%{UNIXPATH}*/)?%{BASEPATH:process.name})",
|
||||
"BASEPATH": "[[[:alnum:]]_%!$@:.,+~-]+",
|
||||
"META": "\\[[^\\]]*\\]"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"if": "ctx._tmp.timestamp8601 != null",
|
||||
"field": "_tmp.timestamp8601",
|
||||
"target_field": "@timestamp",
|
||||
"formats": [
|
||||
"ISO8601"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
|
||||
"field": "_tmp.timestamp",
|
||||
"target_field": "@timestamp",
|
||||
"formats": [
|
||||
"MMM d HH:mm:ss",
|
||||
"MMM d HH:mm:ss",
|
||||
"MMM dd HH:mm:ss"
|
||||
],
|
||||
"timezone": "{{ event.timezone }}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"description": "Set Event Provider",
|
||||
"field": "process.name",
|
||||
"patterns": [
|
||||
"^%{HYPHENATED_WORDS:event.provider}"
|
||||
],
|
||||
"pattern_definitions": {
|
||||
"HYPHENATED_WORDS": "\\b[A-Za-z0-9_]+(-[A-Za-z_]+)*\\b"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-firewall",
|
||||
"if": "ctx.event.provider == 'filterlog'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-openvpn",
|
||||
"if": "ctx.event.provider == 'openvpn'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-ipsec",
|
||||
"if": "ctx.event.provider == 'charon'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-dhcp",
|
||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-unbound",
|
||||
"if": "ctx.event.provider == 'unbound'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-haproxy",
|
||||
"if": "ctx.event.provider == 'haproxy'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-php-fpm",
|
||||
"if": "ctx.event.provider == 'php-fpm'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-squid",
|
||||
"if": "ctx.event.provider == 'squid'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.16.0-suricata",
|
||||
"if": "ctx.event.provider == 'suricata'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"drop": {
|
||||
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"suricata\"].contains(ctx.event?.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "event.category",
|
||||
"value": "network",
|
||||
"if": "ctx.network != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"convert": {
|
||||
"field": "source.address",
|
||||
"target_field": "source.ip",
|
||||
"type": "ip",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"convert": {
|
||||
"field": "destination.address",
|
||||
"target_field": "destination.ip",
|
||||
"type": "ip",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.type",
|
||||
"value": "ipv6",
|
||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.type",
|
||||
"value": "ipv4",
|
||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
|
||||
}
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"field": "source.ip",
|
||||
"target_field": "source.geo",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination.geo",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"ignore_missing": true,
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"field": "source.ip",
|
||||
"target_field": "source.as",
|
||||
"properties": [
|
||||
"asn",
|
||||
"organization_name"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination.as",
|
||||
"properties": [
|
||||
"asn",
|
||||
"organization_name"
|
||||
],
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "source.as.asn",
|
||||
"target_field": "source.as.number",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "source.as.organization_name",
|
||||
"target_field": "source.as.organization.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "destination.as.asn",
|
||||
"target_field": "destination.as.number",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "destination.as.organization_name",
|
||||
"target_field": "destination.as.organization.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"community_id": {
|
||||
"target_field": "network.community_id",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"field": "observer.ingress.interface.name",
|
||||
"patterns": [
|
||||
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
|
||||
],
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.vlan.id",
|
||||
"copy_from": "observer.ingress.vlan.id",
|
||||
"ignore_empty_value": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.ip",
|
||||
"value": "{{destination.ip}}",
|
||||
"allow_duplicates": false,
|
||||
"if": "ctx.destination?.ip != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.ip",
|
||||
"value": "{{source.ip}}",
|
||||
"allow_duplicates": false,
|
||||
"if": "ctx.source?.ip != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.ip",
|
||||
"value": "{{source.nat.ip}}",
|
||||
"allow_duplicates": false,
|
||||
"if": "ctx.source?.nat?.ip != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.hosts",
|
||||
"value": "{{destination.domain}}",
|
||||
"if": "ctx.destination?.domain != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.user",
|
||||
"value": "{{user.name}}",
|
||||
"if": "ctx.user?.name != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.direction",
|
||||
"value": "{{network.direction}}bound",
|
||||
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"_tmp"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"description": "This script processor iterates over the whole document to remove fields with null values.",
|
||||
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": "event.original",
|
||||
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log@custom",
|
||||
"ignore_missing_pipeline": true
|
||||
}
|
||||
}
|
||||
],
|
||||
"on_failure": [
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"_tmp"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.kind",
|
||||
"value": "pipeline_error"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "error.message",
|
||||
"value": "{{{ _ingest.on_failure_message }}}"
|
||||
}
|
||||
}
|
||||
],
|
||||
"_meta": {
|
||||
"managed_by": "fleet",
|
||||
"managed": true,
|
||||
"package": {
|
||||
"name": "pfsense"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,31 @@
|
||||
{
|
||||
"description": "Pipeline for parsing pfSense Suricata logs.",
|
||||
"processors": [
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "suricata.common"
|
||||
}
|
||||
}
|
||||
],
|
||||
"on_failure": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.kind",
|
||||
"value": "pipeline_error"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "error.message",
|
||||
"value": "{{{ _ingest.on_failure_message }}}"
|
||||
}
|
||||
}
|
||||
],
|
||||
"_meta": {
|
||||
"managed_by": "fleet",
|
||||
"managed": true,
|
||||
"package": {
|
||||
"name": "pfsense"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -13,7 +13,6 @@
|
||||
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
|
||||
{ "lowercase": { "field": "network.transport", "ignore_failure": true } },
|
||||
{ "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
|
||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
|
||||
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
|
||||
|
||||
@@ -366,6 +366,7 @@ elasticsearch:
|
||||
so-logs-azure_x_signinlogs: *indexSettings
|
||||
so-logs-azure_x_springcloudlogs: *indexSettings
|
||||
so-logs-barracuda_x_waf: *indexSettings
|
||||
so-logs-cef_x_log: *indexSettings
|
||||
so-logs-cisco_asa_x_log: *indexSettings
|
||||
so-logs-cisco_ftd_x_log: *indexSettings
|
||||
so-logs-cisco_ios_x_log: *indexSettings
|
||||
|
||||
@@ -30,7 +30,8 @@
|
||||
"type": "keyword"
|
||||
},
|
||||
"author": {
|
||||
"type": "text"
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"description": {
|
||||
"type": "text"
|
||||
|
||||
@@ -9,11 +9,9 @@
|
||||
'so-influxdb',
|
||||
'so-kibana',
|
||||
'so-kratos',
|
||||
'so-mysql',
|
||||
'so-nginx',
|
||||
'so-redis',
|
||||
'so-soc',
|
||||
'so-soctopus',
|
||||
'so-strelka-coordinator',
|
||||
'so-strelka-gatekeeper',
|
||||
'so-strelka-frontend',
|
||||
@@ -29,14 +27,13 @@
|
||||
'so-elastic-fleet',
|
||||
'so-elastic-fleet-package-registry',
|
||||
'so-influxdb',
|
||||
'so-kafka',
|
||||
'so-kibana',
|
||||
'so-kratos',
|
||||
'so-logstash',
|
||||
'so-mysql',
|
||||
'so-nginx',
|
||||
'so-redis',
|
||||
'so-soc',
|
||||
'so-soctopus',
|
||||
'so-strelka-coordinator',
|
||||
'so-strelka-gatekeeper',
|
||||
'so-strelka-frontend',
|
||||
@@ -84,6 +81,7 @@
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-logstash',
|
||||
'so-redis',
|
||||
'so-kafka'
|
||||
] %}
|
||||
|
||||
{% elif GLOBALS.role == 'so-idh' %}
|
||||
|
||||
@@ -90,6 +90,11 @@ firewall:
|
||||
tcp:
|
||||
- 8086
|
||||
udp: []
|
||||
kafka:
|
||||
tcp:
|
||||
- 9092
|
||||
- 9093
|
||||
udp: []
|
||||
kibana:
|
||||
tcp:
|
||||
- 5601
|
||||
@@ -98,19 +103,11 @@ firewall:
|
||||
tcp:
|
||||
- 7788
|
||||
udp: []
|
||||
mysql:
|
||||
tcp:
|
||||
- 3306
|
||||
udp: []
|
||||
nginx:
|
||||
tcp:
|
||||
- 80
|
||||
- 443
|
||||
udp: []
|
||||
playbook:
|
||||
tcp:
|
||||
- 3000
|
||||
udp: []
|
||||
redis:
|
||||
tcp:
|
||||
- 6379
|
||||
@@ -178,8 +175,6 @@ firewall:
|
||||
hostgroups:
|
||||
eval:
|
||||
portgroups:
|
||||
- playbook
|
||||
- mysql
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
@@ -363,8 +358,6 @@ firewall:
|
||||
hostgroups:
|
||||
manager:
|
||||
portgroups:
|
||||
- playbook
|
||||
- mysql
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
@@ -376,6 +369,7 @@ firewall:
|
||||
- elastic_agent_update
|
||||
- localrules
|
||||
- sensoroni
|
||||
- kafka
|
||||
fleet:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
@@ -411,6 +405,7 @@ firewall:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- kafka
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -424,6 +419,7 @@ firewall:
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
- kafka
|
||||
heavynode:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -559,8 +555,6 @@ firewall:
|
||||
hostgroups:
|
||||
managersearch:
|
||||
portgroups:
|
||||
- playbook
|
||||
- mysql
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
@@ -756,8 +750,6 @@ firewall:
|
||||
- all
|
||||
standalone:
|
||||
portgroups:
|
||||
- playbook
|
||||
- mysql
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
@@ -1291,14 +1283,17 @@ firewall:
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_data
|
||||
- kafka
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
- beats_5644
|
||||
- kafka
|
||||
managersearch:
|
||||
portgroups:
|
||||
- redis
|
||||
- beats_5644
|
||||
- kafka
|
||||
self:
|
||||
portgroups:
|
||||
- redis
|
||||
|
||||
@@ -115,21 +115,18 @@ firewall:
|
||||
influxdb:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kafka:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kibana:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
localrules:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
mysql:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
nginx:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
playbook:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
redis:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
@@ -938,7 +935,6 @@ firewall:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup9:
|
||||
portgroups: *portgroupshost
|
||||
|
||||
idh:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
global:
|
||||
pcapengine: STENO
|
||||
pcapengine: STENO
|
||||
pipeline: REDIS
|
||||
@@ -28,7 +28,7 @@ global:
|
||||
description: Used for handling of authentication cookies.
|
||||
global: True
|
||||
airgap:
|
||||
description: Sets airgap mode.
|
||||
description: Airgapped systems do not have network connectivity to the internet. This setting represents how this grid was configured during initial setup. While it is technically possible to manually switch systems between airgap and non-airgap, there are some nuances and additional steps involved. For that reason this setting is marked read-only. Contact your support representative for guidance if there is a need to change this setting.
|
||||
global: True
|
||||
readonly: True
|
||||
imagerepo:
|
||||
@@ -36,9 +36,10 @@ global:
|
||||
global: True
|
||||
advanced: True
|
||||
pipeline:
|
||||
description: Sets which pipeline technology for events to use. Currently only Redis is supported.
|
||||
description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license.
|
||||
regex: ^(REDIS|KAFKA)$
|
||||
regexFailureMessage: You must enter either REDIS or KAFKA.
|
||||
global: True
|
||||
readonly: True
|
||||
advanced: True
|
||||
repo_host:
|
||||
description: Specify the host where operating system packages will be served from.
|
||||
|
||||
106
salt/kafka/config.sls
Normal file
106
salt/kafka/config.sls
Normal file
@@ -0,0 +1,106 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% set kafka_ips_logstash = [] %}
|
||||
{% set kafka_ips_kraft = [] %}
|
||||
{% set kafkanodes = salt['pillar.get']('kafka:nodes', {}) %}
|
||||
{% set kafka_ip = GLOBALS.node_ip %}
|
||||
|
||||
{# Create list for kafka <-> logstash/searchnode communcations #}
|
||||
{% for node, node_data in kafkanodes.items() %}
|
||||
{% do kafka_ips_logstash.append(node_data['ip'] + ":9092") %}
|
||||
{% endfor %}
|
||||
{% set kafka_server_list = "','".join(kafka_ips_logstash) %}
|
||||
|
||||
{# Create a list for kraft controller <-> kraft controller communications. Used for Kafka metadata management #}
|
||||
{% for node, node_data in kafkanodes.items() %}
|
||||
{% do kafka_ips_kraft.append(node_data['nodeid'] ~ "@" ~ node_data['ip'] ~ ":9093") %}
|
||||
{% endfor %}
|
||||
{% set kraft_server_list = "','".join(kafka_ips_kraft) %}
|
||||
|
||||
include:
|
||||
- ssl
|
||||
|
||||
kafka_group:
|
||||
group.present:
|
||||
- name: kafka
|
||||
- gid: 960
|
||||
|
||||
kafka:
|
||||
user.present:
|
||||
- uid: 960
|
||||
- gid: 960
|
||||
|
||||
{# Future tools to query kafka directly / show consumer groups
|
||||
kafka_sbin_tools:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://kafka/tools/sbin
|
||||
- user: 960
|
||||
- group: 960
|
||||
- file_mode: 755 #}
|
||||
|
||||
kafka_sbin_jinja_tools:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://kafka/tools/sbin_jinja
|
||||
- user: 960
|
||||
- group: 960
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- defaults:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
|
||||
kakfa_log_dir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/kafka
|
||||
- user: 960
|
||||
- group: 960
|
||||
- makedirs: True
|
||||
|
||||
kafka_data_dir:
|
||||
file.directory:
|
||||
- name: /nsm/kafka/data
|
||||
- user: 960
|
||||
- group: 960
|
||||
- makedirs: True
|
||||
|
||||
kafka_generate_keystore:
|
||||
cmd.run:
|
||||
- name: "/usr/sbin/so-kafka-generate-keystore"
|
||||
- onchanges:
|
||||
- x509: /etc/pki/kafka.key
|
||||
|
||||
kafka_keystore_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka.jks
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
{% for sc in ['server', 'client'] %}
|
||||
kafka_kraft_{{sc}}_properties:
|
||||
file.managed:
|
||||
- source: salt://kafka/etc/{{sc}}.properties.jinja
|
||||
- name: /opt/so/conf/kafka/{{sc}}.properties
|
||||
- template: jinja
|
||||
- user: 960
|
||||
- group: 960
|
||||
- makedirs: True
|
||||
- show_changes: False
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
39
salt/kafka/defaults.yaml
Normal file
39
salt/kafka/defaults.yaml
Normal file
@@ -0,0 +1,39 @@
|
||||
kafka:
|
||||
enabled: False
|
||||
config:
|
||||
server:
|
||||
advertised_x_listeners:
|
||||
auto_x_create_x_topics_x_enable: true
|
||||
controller_x_listener_x_names: CONTROLLER
|
||||
controller_x_quorum_x_voters:
|
||||
inter_x_broker_x_listener_x_name: BROKER
|
||||
listeners: BROKER://0.0.0.0:9092,CONTROLLER://0.0.0.0:9093
|
||||
listener_x_security_x_protocol_x_map: CONTROLLER:SSL,BROKER:SSL
|
||||
log_x_dirs: /nsm/kafka/data
|
||||
log_x_retention_x_check_x_interval_x_ms: 300000
|
||||
log_x_retention_x_hours: 168
|
||||
log_x_segment_x_bytes: 1073741824
|
||||
node_x_id:
|
||||
num_x_io_x_threads: 8
|
||||
num_x_network_x_threads: 3
|
||||
num_x_partitions: 1
|
||||
num_x_recovery_x_threads_x_per_x_data_x_dir: 1
|
||||
offsets_x_topic_x_replication_x_factor: 1
|
||||
process_x_roles: broker
|
||||
socket_x_receive_x_buffer_x_bytes: 102400
|
||||
socket_x_request_x_max_x_bytes: 104857600
|
||||
socket_x_send_x_buffer_x_bytes: 102400
|
||||
ssl_x_keystore_x_location: /etc/pki/kafka.jks
|
||||
ssl_x_keystore_x_password: changeit
|
||||
ssl_x_keystore_x_type: JKS
|
||||
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
|
||||
ssl_x_truststore_x_password: changeit
|
||||
transaction_x_state_x_log_x_min_x_isr: 1
|
||||
transaction_x_state_x_log_x_replication_x_factor: 1
|
||||
client:
|
||||
security_x_protocol: SSL
|
||||
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
|
||||
ssl_x_truststore_x_password: changeit
|
||||
ssl_x_keystore_x_location: /etc/pki/kafka.jks
|
||||
ssl_x_keystore_x_type: JKS
|
||||
ssl_x_keystore_x_password: changeit
|
||||
@@ -1,14 +1,16 @@
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'mysql/map.jinja' import MYSQLMERGED %}
|
||||
|
||||
include:
|
||||
{% if MYSQLMERGED.enabled %}
|
||||
- mysql.enabled
|
||||
{% else %}
|
||||
- mysql.disabled
|
||||
{% endif %}
|
||||
- kafka.sostatus
|
||||
|
||||
so-kafka:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-kafka_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-kafka$
|
||||
64
salt/kafka/enabled.sls
Normal file
64
salt/kafka/enabled.sls
Normal file
@@ -0,0 +1,64 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% set KAFKANODES = salt['pillar.get']('kafka:nodes', {}) %}
|
||||
|
||||
include:
|
||||
- elasticsearch.ca
|
||||
- kafka.sostatus
|
||||
- kafka.config
|
||||
- kafka.storage
|
||||
|
||||
so-kafka:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }}
|
||||
- hostname: so-kafka
|
||||
- name: so-kafka
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-kafka'].ip }}
|
||||
- user: kafka
|
||||
- environment:
|
||||
- KAFKA_HEAP_OPTS=-Xmx2G -Xms1G
|
||||
- extra_hosts:
|
||||
{% for node in KAFKANODES %}
|
||||
- {{ node }}:{{ KAFKANODES[node].ip }}
|
||||
{% endfor %}
|
||||
{% if DOCKER.containers['so-kafka'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-kafka'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- binds:
|
||||
- /etc/pki/kafka.jks:/etc/pki/kafka.jks
|
||||
- /opt/so/conf/ca/cacerts:/etc/pki/java/sos/cacerts
|
||||
- /nsm/kafka/data/:/nsm/kafka/data/:rw
|
||||
- /opt/so/conf/kafka/server.properties:/kafka/config/kraft/server.properties
|
||||
- /opt/so/conf/kafka/client.properties:/kafka/config/kraft/client.properties
|
||||
- watch:
|
||||
{% for sc in ['server', 'client'] %}
|
||||
- file: kafka_kraft_{{sc}}_properties
|
||||
{% endfor %}
|
||||
|
||||
delete_so-kafka_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-kafka$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -3,5 +3,5 @@
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'mysql/defaults.yaml' as MYSQLDEFAULTS with context %}
|
||||
{% set MYSQLMERGED = salt['pillar.get']('mysql', MYSQLDEFAULTS.mysql, merge=True) %}
|
||||
{% from 'kafka/map.jinja' import KAFKAMERGED -%}
|
||||
{{ KAFKAMERGED.config.client | yaml(False) | replace("_x_", ".") }}
|
||||
@@ -3,5 +3,5 @@
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'soctopus/defaults.yaml' as SOCTOPUSDEFAULTS %}
|
||||
{% set SOCTOPUSMERGED = salt['pillar.get']('soctopus', SOCTOPUSDEFAULTS.soctopus, merge=True) %}
|
||||
{% from 'kafka/map.jinja' import KAFKAMERGED -%}
|
||||
{{ KAFKAMERGED.config.server | yaml(False) | replace("_x_", ".") }}
|
||||
@@ -3,12 +3,12 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'kafka/map.jinja' import KAFKAMERGED %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'playbook/map.jinja' import PLAYBOOKMERGED %}
|
||||
|
||||
include:
|
||||
{% if PLAYBOOKMERGED.enabled %}
|
||||
- playbook.enabled
|
||||
{% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %}
|
||||
- kafka.enabled
|
||||
{% else %}
|
||||
- playbook.disabled
|
||||
- kafka.disabled
|
||||
{% endif %}
|
||||
20
salt/kafka/map.jinja
Normal file
20
salt/kafka/map.jinja
Normal file
@@ -0,0 +1,20 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %}
|
||||
{% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% do KAFKAMERGED.config.server.update({ 'node_x_id': salt['pillar.get']('kafka:nodes:' ~ GLOBALS.hostname ~ ':nodeid')}) %}
|
||||
{% do KAFKAMERGED.config.server.update({'advertised_x_listeners': 'BROKER://' ~ GLOBALS.node_ip ~ ':9092'}) %}
|
||||
|
||||
{% set nodes = salt['pillar.get']('kafka:nodes', {}) %}
|
||||
{% set combined = [] %}
|
||||
{% for hostname, data in nodes.items() %}
|
||||
{% do combined.append(data.nodeid ~ "@" ~ hostname ~ ":9093") %}
|
||||
{% endfor %}
|
||||
{% set kraft_controller_quorum_voters = ','.join(combined) %}
|
||||
|
||||
{% do KAFKAMERGED.config.server.update({'controller_x_quorum_x_voters': kraft_controller_quorum_voters}) %}
|
||||
170
salt/kafka/soc_kafka.yaml
Normal file
170
salt/kafka/soc_kafka.yaml
Normal file
@@ -0,0 +1,170 @@
|
||||
kafka:
|
||||
enabled:
|
||||
description: Enable or disable Kafka.
|
||||
helpLink: kafka.html
|
||||
cluster_id:
|
||||
description: The ID of the Kafka cluster.
|
||||
readonly: True
|
||||
advanced: True
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
config:
|
||||
server:
|
||||
advertised_x_listeners:
|
||||
description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication.
|
||||
title: advertised.listeners
|
||||
helpLink: kafka.html
|
||||
auto_x_create_x_topics_x_enable:
|
||||
description: Enable the auto creation of topics.
|
||||
title: auto.create.topics.enable
|
||||
forcedType: bool
|
||||
helpLink: kafka.html
|
||||
controller_x_listener_x_names:
|
||||
description: Set listeners used by the controller in a comma-seperated list.
|
||||
title: controller.listener.names
|
||||
helpLink: kafka.html
|
||||
controller_x_quorum_x_voters:
|
||||
description: A comma-seperated list of ID and endpoint information mapped for a set of voters.
|
||||
title: controller.quorum.voters
|
||||
helpLink: kafka.html
|
||||
inter_x_broker_x_listener_x_name:
|
||||
description: The name of the listener used for inter-broker communication.
|
||||
title: inter.broker.listener.name
|
||||
helpLink: kafka.html
|
||||
listeners:
|
||||
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
|
||||
helpLink: kafka.html
|
||||
listener_x_security_x_protocol_x_map:
|
||||
description: Comma-seperated mapping of listener name and security protocols.
|
||||
title: listener.security.protocol.map
|
||||
helpLink: kafka.html
|
||||
log_x_dirs:
|
||||
description: Where Kafka logs are stored within the Docker container.
|
||||
title: log.dirs
|
||||
helpLink: kafka.html
|
||||
log_x_retention_x_check_x_interval_x_ms:
|
||||
description: Frequency at which log files are checked if they are qualified for deletion.
|
||||
title: log.retention.check.interval.ms
|
||||
helpLink: kafka.html
|
||||
log_x_retention_x_hours:
|
||||
description: How long, in hours, a log file is kept.
|
||||
title: log.retention.hours
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
log_x_segment_x_bytes:
|
||||
description: The maximum allowable size for a log file.
|
||||
title: log.segment.bytes
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
node_x_id:
|
||||
description: The node ID corresponds to the roles performed by this process whenever process.roles is populated.
|
||||
title: node.id
|
||||
forcedType: int
|
||||
readonly: True
|
||||
helpLink: kafka.html
|
||||
num_x_io_x_threads:
|
||||
description: The number of threads used by Kafka.
|
||||
title: num.io.threads
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
num_x_network_x_threads:
|
||||
description: The number of threads used for network communication.
|
||||
title: num.network.threads
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
num_x_partitions:
|
||||
description: The number of log partitions assigned per topic.
|
||||
title: num.partitions
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
num_x_recovery_x_threads_x_per_x_data_x_dir:
|
||||
description: The number of threads used for log recuperation at startup and purging at shutdown. This ammount of threads is used per data directory.
|
||||
title: num.recovery.threads.per.data.dir
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
offsets_x_topic_x_replication_x_factor:
|
||||
description: The offsets topic replication factor.
|
||||
title: offsets.topic.replication.factor
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
process_x_roles:
|
||||
description: The roles the process performs. Use a comma-seperated list is multiple.
|
||||
title: process.roles
|
||||
helpLink: kafka.html
|
||||
socket_x_receive_x_buffer_x_bytes:
|
||||
description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default.
|
||||
title: socket.receive.buffer.bytes
|
||||
#forcedType: int - soc needs to allow -1 as an int before we can use this
|
||||
helpLink: kafka.html
|
||||
socket_x_request_x_max_x_bytes:
|
||||
description: The maximum bytes allowed for a request to the socket.
|
||||
title: socket.request.max.bytes
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
socket_x_send_x_buffer_x_bytes:
|
||||
description: Size, in bytes of the SO_SNDBUF buffer. A value of -1 will use the OS default.
|
||||
title: socket.send.buffer.byte
|
||||
#forcedType: int - soc needs to allow -1 as an int before we can use this
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_location:
|
||||
description: The key store file location within the Docker container.
|
||||
title: ssl.keystore.location
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_password:
|
||||
description: The key store file password. Invalid for PEM format.
|
||||
title: ssl.keystore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_type:
|
||||
description: The key store file format.
|
||||
title: ssl.keystore.type
|
||||
regex: ^(JKS|PKCS12|PEM)$
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_location:
|
||||
description: The trust store file location within the Docker container.
|
||||
title: ssl.truststore.location
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_password:
|
||||
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
|
||||
title: ssl.truststore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
transaction_x_state_x_log_x_min_x_isr:
|
||||
description: Overrides min.insync.replicas for the transaction topic. When a producer configures acks to "all" (or "-1"), this setting determines the minimum number of replicas required to acknowledge a write as successful. Failure to meet this minimum triggers an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used in conjunction, min.insync.replicas and acks enable stronger durability guarantees. For instance, creating a topic with a replication factor of 3, setting min.insync.replicas to 2, and using acks of "all" ensures that the producer raises an exception if a majority of replicas fail to receive a write.
|
||||
title: transaction.state.log.min.isr
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
transaction_x_state_x_log_x_replication_x_factor:
|
||||
description: Set the replication factor higher for the transaction topic to ensure availability. Internal topic creation will not proceed until the cluster size satisfies this replication factor prerequisite.
|
||||
title: transaction.state.log.replication.factor
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
client:
|
||||
security_x_protocol:
|
||||
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
|
||||
title: security.protocol
|
||||
regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT)
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_location:
|
||||
description: The key store file location within the Docker container.
|
||||
title: ssl.keystore.location
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_password:
|
||||
description: The key store file password. Invalid for PEM format.
|
||||
title: ssl.keystore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_type:
|
||||
description: The key store file format.
|
||||
title: ssl.keystore.type
|
||||
regex: ^(JKS|PKCS12|PEM)$
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_location:
|
||||
description: The trust store file location within the Docker container.
|
||||
title: ssl.truststore.location
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_password:
|
||||
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
|
||||
title: ssl.truststore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
@@ -6,11 +6,11 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-mysql_so-status.conf:
|
||||
append_so-kafka_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-mysql
|
||||
- unless: grep -q so-mysql /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-kafka
|
||||
- unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
@@ -18,4 +18,4 @@ append_so-mysql_so-status.conf:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
38
salt/kafka/storage.sls
Normal file
38
salt/kafka/storage.sls
Normal file
@@ -0,0 +1,38 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %}
|
||||
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
{% if kafka_cluster_id is none %}
|
||||
generate_kafka_cluster_id:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-kafka-clusterid
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #}
|
||||
{% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %}
|
||||
kafka_storage_init:
|
||||
cmd.run:
|
||||
- name: |
|
||||
docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /kafka/config/kraft/newserver.properties
|
||||
kafka_rm_kafkainit:
|
||||
cmd.run:
|
||||
- name: |
|
||||
docker rm so-kafkainit
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
13
salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore
Normal file
13
salt/kafka/tools/sbin_jinja/so-kafka-generate-keystore
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
# Generate a new keystore
|
||||
docker run -v /etc/pki/kafka.p12:/etc/pki/kafka.p12 --name so-kafka-keystore --user root --entrypoint keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -importkeystore -srckeystore /etc/pki/kafka.p12 -srcstoretype PKCS12 -srcstorepass changeit -destkeystore /etc/pki/kafka.jks -deststoretype JKS -deststorepass changeit -noprompt
|
||||
docker cp so-kafka-keystore:/etc/pki/kafka.jks /etc/pki/kafka.jks
|
||||
docker rm so-kafka-keystore
|
||||
@@ -75,9 +75,10 @@ so-logstash:
|
||||
{% else %}
|
||||
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
|
||||
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
||||
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
||||
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role == 'so-eval' %}
|
||||
- /nsm/zeek:/nsm/zeek:ro
|
||||
|
||||
@@ -4,9 +4,10 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
|
||||
{% from 'kafka/map.jinja' import KAFKAMERGED %}
|
||||
|
||||
include:
|
||||
{% if LOGSTASH_MERGED.enabled %}
|
||||
{% if LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %}
|
||||
- logstash.enabled
|
||||
{% else %}
|
||||
- logstash.disabled
|
||||
|
||||
@@ -0,0 +1,35 @@
|
||||
{% set kafka_brokers = salt['pillar.get']('logstash:nodes:receiver', {}) %}
|
||||
{% set kafka_on_mngr = salt ['pillar.get']('logstash:nodes:manager', {}) %}
|
||||
{% set broker_ips = [] %}
|
||||
{% for node, node_data in kafka_brokers.items() %}
|
||||
{% do broker_ips.append(node_data['ip'] + ":9092") %}
|
||||
{% endfor %}
|
||||
{% for node, node_data in kafka_on_mngr.items() %}
|
||||
{% do broker_ips.append(node_data['ip'] + ":9092") %}
|
||||
{% endfor %}
|
||||
{% set bootstrap_servers = "','".join(broker_ips) %}
|
||||
|
||||
input {
|
||||
kafka {
|
||||
codec => json
|
||||
topics => ['default-logs', 'kratos-logs', 'soc-logs', 'strelka-logs', 'suricata-logs', 'zeek-logs']
|
||||
group_id => 'searchnodes'
|
||||
client_id => '{{ GLOBALS.hostname }}'
|
||||
security_protocol => 'SSL'
|
||||
bootstrap_servers => '{{ bootstrap_servers }}'
|
||||
ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12'
|
||||
ssl_keystore_password => 'changeit'
|
||||
ssl_keystore_type => 'PKCS12'
|
||||
ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts'
|
||||
ssl_truststore_password => 'changeit'
|
||||
decorate_events => true
|
||||
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname}}", "kafka" ]
|
||||
}
|
||||
}
|
||||
filter {
|
||||
if ![metadata] {
|
||||
mutate {
|
||||
rename => { "@metadata" => "metadata" }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,15 @@ repo_log_dir:
|
||||
- user
|
||||
- group
|
||||
|
||||
agents_log_dir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/agents
|
||||
- user: root
|
||||
- group: root
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
|
||||
yara_log_dir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/yarasync
|
||||
@@ -101,6 +110,17 @@ so-repo-sync:
|
||||
- hour: '{{ MANAGERMERGED.reposync.hour }}'
|
||||
- minute: '{{ MANAGERMERGED.reposync.minute }}'
|
||||
|
||||
so_fleetagent_status:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-elasticagent-status > /opt/so/log/agents/agentstatus.log 2>&1
|
||||
- identifier: so_fleetagent_status
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
- hour: '*'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
socore_own_saltstack:
|
||||
file.directory:
|
||||
- name: /opt/so/saltstack
|
||||
@@ -117,51 +137,6 @@ rules_dir:
|
||||
- group: socore
|
||||
- makedirs: True
|
||||
|
||||
{% if STRELKAMERGED.rules.enabled %}
|
||||
strelkarepos:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/strelka/repos.txt
|
||||
- source: salt://strelka/rules/repos.txt.jinja
|
||||
- template: jinja
|
||||
- defaults:
|
||||
STRELKAREPOS: {{ STRELKAMERGED.rules.repos }}
|
||||
- makedirs: True
|
||||
strelka-yara-update:
|
||||
{% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %}
|
||||
cron.present:
|
||||
{% else %}
|
||||
cron.absent:
|
||||
{% endif %}
|
||||
- user: socore
|
||||
- name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1'
|
||||
- identifier: strelka-yara-update
|
||||
- hour: '7'
|
||||
- minute: '1'
|
||||
strelka-yara-download:
|
||||
{% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %}
|
||||
cron.present:
|
||||
{% else %}
|
||||
cron.absent:
|
||||
{% endif %}
|
||||
- user: socore
|
||||
- name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1'
|
||||
- identifier: strelka-yara-download
|
||||
- hour: '7'
|
||||
- minute: '1'
|
||||
{% if not GLOBALS.airgap %}
|
||||
update_yara_rules:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-yara-update
|
||||
- onchanges:
|
||||
- file: yara_update_scripts
|
||||
download_yara_rules:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-yara-download
|
||||
- onchanges:
|
||||
- file: yara_update_scripts
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
|
||||
@@ -20,10 +20,6 @@ manager:
|
||||
description: String of hosts to ignore the proxy settings for.
|
||||
global: True
|
||||
helpLink: proxy.html
|
||||
playbook:
|
||||
description: Enable playbook 1=enabled 0=disabled.
|
||||
global: True
|
||||
helpLink: playbook.html
|
||||
proxy:
|
||||
description: Proxy server to use for updates.
|
||||
global: True
|
||||
|
||||
4
salt/playbook/tools/sbin/so-playbook-restart → salt/manager/tools/sbin/so-elasticagent-status
Executable file → Normal file
4
salt/playbook/tools/sbin/so-playbook-restart → salt/manager/tools/sbin/so-elasticagent-status
Executable file → Normal file
@@ -5,8 +5,6 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-restart playbook $1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agent_status" | jq .
|
||||
29
salt/manager/tools/sbin/so-kafka-clusterid
Normal file
29
salt/manager/tools/sbin/so-kafka-clusterid
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
### THIS SCRIPT AND SALT STATE REFERENCES TO THIS SCRIPT TO BE REMOVED ONCE INITIAL TESTING IS DONE - THESE VALUES WILL GENERATED IN SETUP AND SOUP
|
||||
|
||||
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
if [[ -f /usr/sbin/so-common ]]; then
|
||||
source /usr/sbin/so-common
|
||||
else
|
||||
source $(dirname $0)/../../../common/tools/sbin/so-common
|
||||
fi
|
||||
|
||||
if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
|
||||
kafka_cluster_id=$(get_random_value 22)
|
||||
echo 'kafka: ' > $local_salt_dir/pillar/kafka/soc_kafka.sls
|
||||
echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls
|
||||
|
||||
if ! grep -q "^ kafkapass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
|
||||
kafkapass=$(get_random_value)
|
||||
echo ' kafkapass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls
|
||||
fi
|
||||
@@ -286,12 +286,6 @@ function add_sensor_to_minion() {
|
||||
echo " " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_playbook_to_minion() {
|
||||
printf '%s\n'\
|
||||
"playbook:"\
|
||||
" enabled: True"\
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_elastalert_to_minion() {
|
||||
printf '%s\n'\
|
||||
@@ -353,13 +347,6 @@ function add_nginx_to_minion() {
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_soctopus_to_minion() {
|
||||
printf '%s\n'\
|
||||
"soctopus:"\
|
||||
" enabled: True"\
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_soc_to_minion() {
|
||||
printf '%s\n'\
|
||||
"soc:"\
|
||||
@@ -374,13 +361,6 @@ function add_registry_to_minion() {
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_mysql_to_minion() {
|
||||
printf '%s\n'\
|
||||
"mysql:"\
|
||||
" enabled: True"\
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
function add_kratos_to_minion() {
|
||||
printf '%s\n'\
|
||||
"kratos:"\
|
||||
@@ -456,16 +436,13 @@ function createEVAL() {
|
||||
add_elasticsearch_to_minion
|
||||
add_sensor_to_minion
|
||||
add_strelka_to_minion
|
||||
add_playbook_to_minion
|
||||
add_elastalert_to_minion
|
||||
add_kibana_to_minion
|
||||
add_telegraf_to_minion
|
||||
add_influxdb_to_minion
|
||||
add_nginx_to_minion
|
||||
add_soctopus_to_minion
|
||||
add_soc_to_minion
|
||||
add_registry_to_minion
|
||||
add_mysql_to_minion
|
||||
add_kratos_to_minion
|
||||
add_idstools_to_minion
|
||||
add_elastic_fleet_package_registry_to_minion
|
||||
@@ -478,17 +455,14 @@ function createSTANDALONE() {
|
||||
add_logstash_to_minion
|
||||
add_sensor_to_minion
|
||||
add_strelka_to_minion
|
||||
add_playbook_to_minion
|
||||
add_elastalert_to_minion
|
||||
add_kibana_to_minion
|
||||
add_redis_to_minion
|
||||
add_telegraf_to_minion
|
||||
add_influxdb_to_minion
|
||||
add_nginx_to_minion
|
||||
add_soctopus_to_minion
|
||||
add_soc_to_minion
|
||||
add_registry_to_minion
|
||||
add_mysql_to_minion
|
||||
add_kratos_to_minion
|
||||
add_idstools_to_minion
|
||||
add_elastic_fleet_package_registry_to_minion
|
||||
@@ -497,17 +471,14 @@ function createSTANDALONE() {
|
||||
function createMANAGER() {
|
||||
add_elasticsearch_to_minion
|
||||
add_logstash_to_minion
|
||||
add_playbook_to_minion
|
||||
add_elastalert_to_minion
|
||||
add_kibana_to_minion
|
||||
add_redis_to_minion
|
||||
add_telegraf_to_minion
|
||||
add_influxdb_to_minion
|
||||
add_nginx_to_minion
|
||||
add_soctopus_to_minion
|
||||
add_soc_to_minion
|
||||
add_registry_to_minion
|
||||
add_mysql_to_minion
|
||||
add_kratos_to_minion
|
||||
add_idstools_to_minion
|
||||
add_elastic_fleet_package_registry_to_minion
|
||||
@@ -516,17 +487,14 @@ function createMANAGER() {
|
||||
function createMANAGERSEARCH() {
|
||||
add_elasticsearch_to_minion
|
||||
add_logstash_to_minion
|
||||
add_playbook_to_minion
|
||||
add_elastalert_to_minion
|
||||
add_kibana_to_minion
|
||||
add_redis_to_minion
|
||||
add_telegraf_to_minion
|
||||
add_influxdb_to_minion
|
||||
add_nginx_to_minion
|
||||
add_soctopus_to_minion
|
||||
add_soc_to_minion
|
||||
add_registry_to_minion
|
||||
add_mysql_to_minion
|
||||
add_kratos_to_minion
|
||||
add_idstools_to_minion
|
||||
add_elastic_fleet_package_registry_to_minion
|
||||
|
||||
@@ -17,13 +17,16 @@ def showUsage(args):
|
||||
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
|
||||
print(' General commands:')
|
||||
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.')
|
||||
print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.')
|
||||
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
|
||||
print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.')
|
||||
print(' help - Prints this usage information.')
|
||||
print('')
|
||||
print(' Where:')
|
||||
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
|
||||
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
|
||||
print(' LISTITEM - Item to add to the list.')
|
||||
print(' VALUE - Value to set for a given key')
|
||||
print(' LISTITEM - Item to append to a given key\'s list value')
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
@@ -37,6 +40,7 @@ def writeYaml(filename, content):
|
||||
file = open(filename, "w")
|
||||
return yaml.dump(content, file)
|
||||
|
||||
|
||||
def appendItem(content, key, listItem):
|
||||
pieces = key.split(".", 1)
|
||||
if len(pieces) > 1:
|
||||
@@ -51,6 +55,30 @@ def appendItem(content, key, listItem):
|
||||
print("The key provided does not exist. No action was taken on the file.")
|
||||
return 1
|
||||
|
||||
|
||||
def convertType(value):
|
||||
if len(value) > 0 and (not value.startswith("0") or len(value) == 1):
|
||||
if "." in value:
|
||||
try:
|
||||
value = float(value)
|
||||
return value
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
value = int(value)
|
||||
return value
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
lowered_value = value.lower()
|
||||
if lowered_value == "false":
|
||||
return False
|
||||
elif lowered_value == "true":
|
||||
return True
|
||||
return value
|
||||
|
||||
|
||||
def append(args):
|
||||
if len(args) != 3:
|
||||
print('Missing filename, key arg, or list item to append', file=sys.stderr)
|
||||
@@ -62,11 +90,41 @@ def append(args):
|
||||
listItem = args[2]
|
||||
|
||||
content = loadYaml(filename)
|
||||
appendItem(content, key, listItem)
|
||||
appendItem(content, key, convertType(listItem))
|
||||
writeYaml(filename, content)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def addKey(content, key, value):
|
||||
pieces = key.split(".", 1)
|
||||
if len(pieces) > 1:
|
||||
if not pieces[0] in content:
|
||||
content[pieces[0]] = {}
|
||||
addKey(content[pieces[0]], pieces[1], value)
|
||||
elif key in content:
|
||||
raise KeyError("key already exists")
|
||||
else:
|
||||
content[key] = value
|
||||
|
||||
|
||||
def add(args):
|
||||
if len(args) != 3:
|
||||
print('Missing filename, key arg, and/or value', file=sys.stderr)
|
||||
showUsage(None)
|
||||
return
|
||||
|
||||
filename = args[0]
|
||||
key = args[1]
|
||||
value = args[2]
|
||||
|
||||
content = loadYaml(filename)
|
||||
addKey(content, key, convertType(value))
|
||||
writeYaml(filename, content)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def removeKey(content, key):
|
||||
pieces = key.split(".", 1)
|
||||
if len(pieces) > 1:
|
||||
@@ -91,6 +149,24 @@ def remove(args):
|
||||
return 0
|
||||
|
||||
|
||||
def replace(args):
|
||||
if len(args) != 3:
|
||||
print('Missing filename, key arg, and/or value', file=sys.stderr)
|
||||
showUsage(None)
|
||||
return
|
||||
|
||||
filename = args[0]
|
||||
key = args[1]
|
||||
value = args[2]
|
||||
|
||||
content = loadYaml(filename)
|
||||
removeKey(content, key)
|
||||
addKey(content, key, convertType(value))
|
||||
writeYaml(filename, content)
|
||||
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
args = sys.argv[1:]
|
||||
|
||||
@@ -100,8 +176,10 @@ def main():
|
||||
|
||||
commands = {
|
||||
"help": showUsage,
|
||||
"add": add,
|
||||
"append": append,
|
||||
"remove": remove,
|
||||
"replace": replace,
|
||||
}
|
||||
|
||||
code = 1
|
||||
|
||||
@@ -42,6 +42,14 @@ class TestRemove(unittest.TestCase):
|
||||
sysmock.assert_called()
|
||||
self.assertIn(mock_stdout.getvalue(), "Usage:")
|
||||
|
||||
def test_remove_missing_arg(self):
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "help"]
|
||||
soyaml.remove(["file"])
|
||||
sysmock.assert_called()
|
||||
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
|
||||
|
||||
def test_remove(self):
|
||||
filename = "/tmp/so-yaml_test-remove.yaml"
|
||||
file = open(filename, "w")
|
||||
@@ -106,6 +114,14 @@ class TestRemove(unittest.TestCase):
|
||||
sysmock.assert_called_once_with(1)
|
||||
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
|
||||
|
||||
def test_append_missing_arg(self):
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "help"]
|
||||
soyaml.append(["file", "key"])
|
||||
sysmock.assert_called()
|
||||
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, or list item to append\n")
|
||||
|
||||
def test_append(self):
|
||||
filename = "/tmp/so-yaml_test-remove.yaml"
|
||||
file = open(filename, "w")
|
||||
@@ -201,3 +217,146 @@ class TestRemove(unittest.TestCase):
|
||||
soyaml.main()
|
||||
sysmock.assert_called()
|
||||
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
|
||||
|
||||
def test_add_key(self):
|
||||
content = {}
|
||||
soyaml.addKey(content, "foo", 123)
|
||||
self.assertEqual(content, {"foo": 123})
|
||||
|
||||
try:
|
||||
soyaml.addKey(content, "foo", "bar")
|
||||
self.assertFail("expected key error since key already exists")
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
try:
|
||||
soyaml.addKey(content, "foo.bar", 123)
|
||||
self.assertFail("expected type error since key parent value is not a map")
|
||||
except TypeError:
|
||||
pass
|
||||
|
||||
content = {}
|
||||
soyaml.addKey(content, "foo", "bar")
|
||||
self.assertEqual(content, {"foo": "bar"})
|
||||
|
||||
soyaml.addKey(content, "badda.badda", "boom")
|
||||
self.assertEqual(content, {"foo": "bar", "badda": {"badda": "boom"}})
|
||||
|
||||
def test_add_missing_arg(self):
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "help"]
|
||||
soyaml.add(["file", "key"])
|
||||
sysmock.assert_called()
|
||||
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
|
||||
|
||||
def test_add(self):
|
||||
filename = "/tmp/so-yaml_test-add.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
|
||||
file.close()
|
||||
|
||||
soyaml.add([filename, "key4", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
expected = "key1:\n child1: 123\n child2: abc\nkey2: false\nkey3:\n- a\n- b\n- c\nkey4: d\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_add_nested(self):
|
||||
filename = "/tmp/so-yaml_test-add.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
soyaml.add([filename, "key1.child3", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2:\n - a\n - b\n - c\n child3: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_add_nested_deep(self):
|
||||
filename = "/tmp/so-yaml_test-add.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
soyaml.add([filename, "key1.child2.deep2", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2:\n deep1: 45\n deep2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_replace_missing_arg(self):
|
||||
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||
with patch('sys.stderr', new=StringIO()) as mock_stdout:
|
||||
sys.argv = ["cmd", "help"]
|
||||
soyaml.replace(["file", "key"])
|
||||
sysmock.assert_called()
|
||||
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
|
||||
|
||||
def test_replace(self):
|
||||
filename = "/tmp/so-yaml_test-add.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: abc }, key2: false, key3: [a,b,c]}")
|
||||
file.close()
|
||||
|
||||
soyaml.replace([filename, "key2", True])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
expected = "key1:\n child1: 123\n child2: abc\nkey2: true\nkey3:\n- a\n- b\n- c\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_replace_nested(self):
|
||||
filename = "/tmp/so-yaml_test-add.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: [a,b,c] }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
soyaml.replace([filename, "key1.child2", "d"])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2: d\nkey2: false\nkey3:\n- e\n- f\n- g\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_replace_nested_deep(self):
|
||||
filename = "/tmp/so-yaml_test-add.yaml"
|
||||
file = open(filename, "w")
|
||||
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
|
||||
file.close()
|
||||
|
||||
soyaml.replace([filename, "key1.child2.deep1", 46])
|
||||
|
||||
file = open(filename, "r")
|
||||
actual = file.read()
|
||||
file.close()
|
||||
|
||||
expected = "key1:\n child1: 123\n child2:\n deep1: 46\nkey2: false\nkey3:\n- e\n- f\n- g\n"
|
||||
self.assertEqual(actual, expected)
|
||||
|
||||
def test_convert(self):
|
||||
self.assertEqual(soyaml.convertType("foo"), "foo")
|
||||
self.assertEqual(soyaml.convertType("foo.bar"), "foo.bar")
|
||||
self.assertEqual(soyaml.convertType("123"), 123)
|
||||
self.assertEqual(soyaml.convertType("0"), 0)
|
||||
self.assertEqual(soyaml.convertType("00"), "00")
|
||||
self.assertEqual(soyaml.convertType("0123"), "0123")
|
||||
self.assertEqual(soyaml.convertType("123.456"), 123.456)
|
||||
self.assertEqual(soyaml.convertType("0123.456"), "0123.456")
|
||||
self.assertEqual(soyaml.convertType("true"), True)
|
||||
self.assertEqual(soyaml.convertType("TRUE"), True)
|
||||
self.assertEqual(soyaml.convertType("false"), False)
|
||||
self.assertEqual(soyaml.convertType("FALSE"), False)
|
||||
self.assertEqual(soyaml.convertType(""), "")
|
||||
|
||||
@@ -229,7 +229,7 @@ check_local_mods() {
|
||||
# {% endraw %}
|
||||
|
||||
check_pillar_items() {
|
||||
local pillar_output=$(salt-call pillar.items --out=json)
|
||||
local pillar_output=$(salt-call pillar.items -lerror --out=json)
|
||||
|
||||
cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
|
||||
if [[ "$cond" == "true" ]]; then
|
||||
@@ -357,6 +357,7 @@ preupgrade_changes() {
|
||||
[[ "$INSTALLEDVERSION" == 2.4.30 ]] && up_to_2.4.40
|
||||
[[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50
|
||||
[[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60
|
||||
[[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70
|
||||
true
|
||||
}
|
||||
|
||||
@@ -373,6 +374,7 @@ postupgrade_changes() {
|
||||
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
|
||||
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
|
||||
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
|
||||
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
|
||||
true
|
||||
}
|
||||
|
||||
@@ -435,6 +437,29 @@ post_to_2.4.60() {
|
||||
POSTVERSION=2.4.60
|
||||
}
|
||||
|
||||
post_to_2.4.70() {
|
||||
# Global pipeline changes to REDIS or KAFKA
|
||||
echo "Removing global.pipeline pillar configuration"
|
||||
sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||||
|
||||
# Kafka configuration
|
||||
mkdir -p /opt/so/saltstack/local/pillar/kafka
|
||||
touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
|
||||
touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
|
||||
echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
|
||||
|
||||
if ! grep -q "^ cluster_id:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
|
||||
kafka_cluster_id=$(get_random_value 22)
|
||||
echo ' cluster_id: '$kafka_cluster_id >> $local_salt_dir/pillar/kafka/soc_kafka.sls
|
||||
|
||||
if ! grep -q "^ certpass:" $local_salt_dir/pillar/kafka/soc_kafka.sls; then
|
||||
kafkapass=$(get_random_value)
|
||||
echo ' certpass: '$kafkapass >> $local_salt_dir/pillar/kafka/soc_kafka.sls
|
||||
fi
|
||||
|
||||
POSTVERSION=2.4.70
|
||||
}
|
||||
|
||||
repo_sync() {
|
||||
echo "Sync the local repo."
|
||||
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
||||
@@ -574,6 +599,116 @@ up_to_2.4.60() {
|
||||
INSTALLEDVERSION=2.4.60
|
||||
}
|
||||
|
||||
up_to_2.4.70() {
|
||||
playbook_migration
|
||||
toggle_telemetry
|
||||
INSTALLEDVERSION=2.4.70
|
||||
}
|
||||
|
||||
toggle_telemetry() {
|
||||
if [[ -z $UNATTENDED && $is_airgap -ne 0 ]]; then
|
||||
cat << ASSIST_EOF
|
||||
|
||||
--------------- SOC Telemetry ---------------
|
||||
|
||||
The Security Onion development team could use your help! Enabling SOC
|
||||
Telemetry will help the team understand which UI features are being
|
||||
used and enables informed prioritization of future development.
|
||||
|
||||
Adjust this setting at anytime via the SOC Configuration screen.
|
||||
|
||||
Documentation: https://docs.securityonion.net/en/2.4/telemetry.html
|
||||
|
||||
ASSIST_EOF
|
||||
|
||||
echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? "
|
||||
|
||||
read -r input
|
||||
input=$(echo "${input,,}" | xargs echo -n)
|
||||
echo ""
|
||||
if [[ ${#input} -eq 0 || "$input" == "yes" || "$input" == "y" || "$input" == "yy" ]]; then
|
||||
echo "Thank you for helping improve Security Onion!"
|
||||
else
|
||||
if so-yaml.py replace /opt/so/saltstack/local/pillar/soc/soc_soc.sls soc.telemetryEnabled false; then
|
||||
echo "Disabled SOC Telemetry."
|
||||
else
|
||||
fail "Failed to disable SOC Telemetry; aborting."
|
||||
fi
|
||||
fi
|
||||
echo ""
|
||||
fi
|
||||
}
|
||||
|
||||
playbook_migration() {
|
||||
# Start SOC Detections migration
|
||||
mkdir -p /nsm/backup/detections-migration/{suricata,sigma/rules,elastalert}
|
||||
|
||||
# Remove cronjobs
|
||||
crontab -l | grep -v 'so-playbook-sync_cron' | crontab -
|
||||
crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab -
|
||||
|
||||
if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then
|
||||
|
||||
# Check for active Elastalert rules
|
||||
active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f -name "*.yaml" | wc -l)
|
||||
|
||||
if [[ "$active_rules_count" -gt 0 ]]; then
|
||||
# Prompt the user to AGREE if active Elastalert rules found
|
||||
echo
|
||||
echo "$active_rules_count Active Elastalert/Playbook rules found."
|
||||
echo "In preparation for the new Detections module, they will be backed up and then disabled."
|
||||
echo
|
||||
echo "If you would like to proceed, then type AGREE and press ENTER."
|
||||
echo
|
||||
# Read user input
|
||||
read INPUT
|
||||
if [ "${INPUT^^}" != 'AGREE' ]; then fail "SOUP canceled."; fi
|
||||
|
||||
echo "Backing up the Elastalert rules..."
|
||||
rsync -av --stats /opt/so/rules/elastalert/playbook/*.yaml /nsm/backup/detections-migration/elastalert/
|
||||
|
||||
# Verify that rsync completed successfully
|
||||
if [[ $? -eq 0 ]]; then
|
||||
# Delete the Elastlaert rules
|
||||
rm -f /opt/so/rules/elastalert/playbook/*.yaml
|
||||
echo "Active Elastalert rules have been backed up."
|
||||
else
|
||||
fail "Error: rsync failed to copy the files. Active Elastalert rules have not been backed up."
|
||||
fi
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "Exporting Sigma rules from Playbook..."
|
||||
MYSQLPW=$(awk '/mysql:/ {print $2}' /opt/so/saltstack/local/pillar/secrets.sls)
|
||||
|
||||
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT id, value FROM custom_values WHERE value LIKE '%View Sigma%'\"" | while read -r id value; do
|
||||
echo -e "$value" > "/nsm/backup/detections-migration/sigma/rules/$id.yaml"
|
||||
done || fail "Failed to export Sigma rules..."
|
||||
|
||||
echo
|
||||
echo "Exporting Sigma Filters from Playbook..."
|
||||
docker exec so-mysql sh -c "exec mysql -uroot -p${MYSQLPW} -D playbook -sN -e \"SELECT issues.subject as title, custom_values.value as filter FROM issues JOIN custom_values ON issues.id = custom_values.customized_id WHERE custom_values.value LIKE '%sofilter%'\"" > /nsm/backup/detections-migration/sigma/custom-filters.txt || fail "Failed to export Custom Sigma Filters."
|
||||
|
||||
echo
|
||||
echo "Backing up Playbook database..."
|
||||
docker exec so-mysql sh -c "mysqldump -uroot -p${MYSQLPW} --databases playbook > /tmp/playbook-dump" || fail "Failed to dump Playbook database."
|
||||
docker cp so-mysql:/tmp/playbook-dump /nsm/backup/detections-migration/sigma/playbook-dump.sql || fail "Failed to backup Playbook database."
|
||||
fi
|
||||
|
||||
echo
|
||||
echo "Stopping Playbook services & cleaning up..."
|
||||
for container in so-playbook so-mysql so-soctopus; do
|
||||
if [ -n "$(docker ps -q -f name=^${container}$)" ]; then
|
||||
docker stop $container
|
||||
fi
|
||||
done
|
||||
sed -i '/so-playbook\|so-soctopus\|so-mysql/d' /opt/so/conf/so-status/so-status.conf
|
||||
rm -f /usr/sbin/so-playbook-* /usr/sbin/so-soctopus-* /usr/sbin/so-mysql-*
|
||||
|
||||
echo
|
||||
echo "Playbook Migration is complete...."
|
||||
}
|
||||
|
||||
determine_elastic_agent_upgrade() {
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
update_elastic_agent_airgap
|
||||
@@ -756,7 +891,7 @@ verify_latest_update_script() {
|
||||
else
|
||||
echo "You are not running the latest soup version. Updating soup and its components. This might take multiple runs to complete."
|
||||
|
||||
salt-call state.apply common.soup_scripts queue=True -linfo --file-root=$UPDATE_DIR/salt --local
|
||||
salt-call state.apply common.soup_scripts queue=True -lerror --file-root=$UPDATE_DIR/salt --local --out-file=/dev/null
|
||||
|
||||
# Verify that soup scripts updated as expected
|
||||
get_soup_script_hashes
|
||||
@@ -837,7 +972,6 @@ main() {
|
||||
|
||||
echo "### Preparing soup at $(date) ###"
|
||||
echo ""
|
||||
|
||||
set_os
|
||||
|
||||
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
|
||||
|
||||
@@ -1,89 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
|
||||
|
||||
# MySQL Setup
|
||||
mysqlpkgs:
|
||||
pkg.removed:
|
||||
- skip_suggestions: False
|
||||
- pkgs:
|
||||
{% if grains['os_family'] != 'RedHat' %}
|
||||
- python3-mysqldb
|
||||
{% else %}
|
||||
- python3-mysqlclient
|
||||
{% endif %}
|
||||
|
||||
mysqletcdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/mysql/etc
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
mysqlpiddir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/mysql/pid
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
mysqlcnf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/mysql/etc/my.cnf
|
||||
- source: salt://mysql/etc/my.cnf
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
mysqlpass:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/mysql/etc/mypass
|
||||
- source: salt://mysql/etc/mypass
|
||||
- user: 939
|
||||
- group: 939
|
||||
- template: jinja
|
||||
- defaults:
|
||||
MYSQLPASS: {{ MYSQLPASS }}
|
||||
|
||||
mysqllogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/mysql
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
mysqldatadir:
|
||||
file.directory:
|
||||
- name: /nsm/mysql
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
mysql_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://mysql/tools/sbin
|
||||
- user: 939
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
#mysql_sbin_jinja:
|
||||
# file.recurse:
|
||||
# - name: /usr/sbin
|
||||
# - source: salt://mysql/tools/sbin_jinja
|
||||
# - user: 939
|
||||
# - group: 939
|
||||
# - file_mode: 755
|
||||
# - template: jinja
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,2 +0,0 @@
|
||||
mysql:
|
||||
enabled: False
|
||||
@@ -1,27 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- mysql.sostatus
|
||||
|
||||
so-mysql:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-mysql_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-mysql$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,84 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
|
||||
|
||||
include:
|
||||
- mysql.config
|
||||
- mysql.sostatus
|
||||
|
||||
{% if MYSQLPASS == None %}
|
||||
|
||||
mysql_password_none:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "MySQL Password Error - Not Starting MySQL"
|
||||
|
||||
{% else %}
|
||||
|
||||
so-mysql:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-mysql:{{ GLOBALS.so_version }}
|
||||
- hostname: so-mysql
|
||||
- user: socore
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-mysql'].ip }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
{% if DOCKER.containers['so-mysql'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-mysql'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-mysql'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- environment:
|
||||
- MYSQL_ROOT_HOST={{ GLOBALS.so_docker_gateway }}
|
||||
- MYSQL_ROOT_PASSWORD=/etc/mypass
|
||||
{% if DOCKER.containers['so-mysql'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-mysql'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- binds:
|
||||
- /opt/so/conf/mysql/etc/my.cnf:/etc/my.cnf:ro
|
||||
- /opt/so/conf/mysql/etc/mypass:/etc/mypass
|
||||
- /nsm/mysql:/var/lib/mysql:rw
|
||||
- /opt/so/log/mysql:/var/log/mysql:rw
|
||||
{% if DOCKER.containers['so-mysql'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-mysql'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- cap_add:
|
||||
- SYS_NICE
|
||||
- watch:
|
||||
- file: mysqlcnf
|
||||
- file: mysqlpass
|
||||
- require:
|
||||
- file: mysqlcnf
|
||||
- file: mysqlpass
|
||||
{% endif %}
|
||||
|
||||
delete_so-mysql_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-mysql$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,32 +0,0 @@
|
||||
# For advice on how to change settings please see
|
||||
# http://dev.mysql.com/doc/refman/5.7/en/server-configuration-defaults.html
|
||||
|
||||
[mysqld]
|
||||
#
|
||||
# Remove leading # and set to the amount of RAM for the most important data
|
||||
# cache in MySQL. Start at 70% of total RAM for dedicated server, else 10%.
|
||||
# innodb_buffer_pool_size = 128M
|
||||
#
|
||||
# Remove leading # to turn on a very important data integrity option: logging
|
||||
# changes to the binary log between backups.
|
||||
# log_bin
|
||||
#
|
||||
# Remove leading # to set options mainly useful for reporting servers.
|
||||
# The server defaults are faster for transactions and fast SELECTs.
|
||||
# Adjust sizes as needed, experiment to find the optimal values.
|
||||
# join_buffer_size = 128M
|
||||
# sort_buffer_size = 2M
|
||||
# read_rnd_buffer_size = 2M
|
||||
|
||||
host_cache_size=0
|
||||
skip-name-resolve
|
||||
datadir=/var/lib/mysql
|
||||
socket=/var/lib/mysql/mysql.sock
|
||||
secure-file-priv=/var/lib/mysql-files
|
||||
user=socore
|
||||
|
||||
log-error=/var/log/mysql/mysqld.log
|
||||
pid-file=/var/run/mysqld/mysqld.pid
|
||||
|
||||
# Switch back to the native password module so that playbook can connect
|
||||
authentication_policy=mysql_native_password
|
||||
@@ -1 +0,0 @@
|
||||
{{ MYSQLPASS }}
|
||||
@@ -1,4 +0,0 @@
|
||||
mysql:
|
||||
enabled:
|
||||
description: You can enable or disable MySQL.
|
||||
advanced: True
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-restart mysql $1
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start mysql $1
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop mysql $1
|
||||
@@ -277,38 +277,11 @@ http {
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /playbook/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:3000/playbook/;
|
||||
proxy_read_timeout 90;
|
||||
proxy_connect_timeout 90;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
|
||||
location /soctopus/ {
|
||||
auth_request /auth/sessions/whoami;
|
||||
proxy_pass http://{{ GLOBALS.manager }}:7000/;
|
||||
proxy_read_timeout 300;
|
||||
proxy_connect_timeout 300;
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header Proxy "";
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
}
|
||||
|
||||
location /kibana/app/soc/ {
|
||||
rewrite ^/kibana/app/soc/(.*) /soc/$1 permanent;
|
||||
}
|
||||
|
||||
location /kibana/app/soctopus/ {
|
||||
rewrite ^/kibana/app/soctopus/(.*) /soctopus/$1 permanent;
|
||||
}
|
||||
|
||||
location /sensoroniagents/ {
|
||||
if ($http_authorization = "") {
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
# This state will create the SecOps Automation user within Playbook
|
||||
|
||||
include:
|
||||
- playbook
|
||||
|
||||
wait_for_playbook:
|
||||
cmd.run:
|
||||
- name: until nc -z {{ GLOBALS.manager }} 3000; do sleep 1; done
|
||||
- timeout: 300
|
||||
|
||||
create_user:
|
||||
cmd.script:
|
||||
- source: salt://playbook/files/automation_user_create.sh
|
||||
- cwd: /root
|
||||
- template: jinja
|
||||
- onchanges:
|
||||
- cmd: wait_for_playbook
|
||||
@@ -1,120 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set MYSQLPASS = salt['pillar.get']('secrets:mysql') %}
|
||||
{% set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') %}
|
||||
|
||||
|
||||
include:
|
||||
- mysql
|
||||
|
||||
create_playbookdbuser:
|
||||
mysql_user.present:
|
||||
- name: playbookdbuser
|
||||
- password: {{ PLAYBOOKPASS }}
|
||||
- host: "{{ DOCKER.range.split('/')[0] }}/255.255.255.0"
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
query_playbookdbuser_grants:
|
||||
mysql_query.run:
|
||||
- database: playbook
|
||||
- query: "GRANT ALL ON playbook.* TO 'playbookdbuser'@'{{ DOCKER.range.split('/')[0] }}/255.255.255.0';"
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
query_updatwebhooks:
|
||||
mysql_query.run:
|
||||
- database: playbook
|
||||
- query: "update webhooks set url = 'http://{{ GLOBALS.manager_ip}}:7000/playbook/webhook' where project_id = 1"
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
query_updatename:
|
||||
mysql_query.run:
|
||||
- database: playbook
|
||||
- query: "update custom_fields set name = 'Custom Filter' where id = 21;"
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
query_updatepluginurls:
|
||||
mysql_query.run:
|
||||
- database: playbook
|
||||
- query: |-
|
||||
update settings set value =
|
||||
"--- !ruby/hash:ActiveSupport::HashWithIndifferentAccess
|
||||
project: '1'
|
||||
convert_url: http://{{ GLOBALS.manager }}:7000/playbook/sigmac
|
||||
create_url: http://{{ GLOBALS.manager }}:7000/playbook/play"
|
||||
where id = 43
|
||||
- connection_host: {{ GLOBALS.manager }}
|
||||
- connection_port: 3306
|
||||
- connection_user: root
|
||||
- connection_pass: {{ MYSQLPASS }}
|
||||
|
||||
playbook_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://playbook/tools/sbin
|
||||
- user: 939
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
#playbook_sbin_jinja:
|
||||
# file.recurse:
|
||||
# - name: /usr/sbin
|
||||
# - source: salt://playbook/tools/sbin_jinja
|
||||
# - user: 939
|
||||
# - group: 939
|
||||
# - file_mode: 755
|
||||
# - template: jinja
|
||||
|
||||
playbooklogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/playbook
|
||||
- dir_mode: 775
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
playbookfilesdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/playbook/redmine-files
|
||||
- dir_mode: 775
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
{% if 'idh' in salt['cmd.shell']("ls /opt/so/saltstack/local/pillar/minions/|awk -F'_' {'print $2'}|awk -F'.' {'print $1'}").split() %}
|
||||
idh-plays:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/soctopus/sigma-import
|
||||
- source: salt://idh/plays
|
||||
- makedirs: True
|
||||
cmd.run:
|
||||
- name: so-playbook-import True
|
||||
- onchanges:
|
||||
- file: /opt/so/conf/soctopus/sigma-import
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,14 +0,0 @@
|
||||
|
||||
# This state will import the initial default playbook database.
|
||||
# If there is an existing playbook database, it will be overwritten - no backups are made.
|
||||
|
||||
include:
|
||||
- mysql
|
||||
|
||||
salt://playbook/files/playbook_db_init.sh:
|
||||
cmd.script:
|
||||
- cwd: /root
|
||||
- template: jinja
|
||||
|
||||
'sleep 5':
|
||||
cmd.run
|
||||
@@ -1,2 +0,0 @@
|
||||
playbook:
|
||||
enabled: False
|
||||
@@ -1,37 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- playbook.sostatus
|
||||
|
||||
so-playbook:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-playbook_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-playbook$
|
||||
|
||||
so-playbook-sync_cron:
|
||||
cron.absent:
|
||||
- identifier: so-playbook-sync_cron
|
||||
- user: root
|
||||
|
||||
so-playbook-ruleupdate_cron:
|
||||
cron.absent:
|
||||
- identifier: so-playbook-ruleupdate_cron
|
||||
- user: root
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,93 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set PLAYBOOKPASS = salt['pillar.get']('secrets:playbook_db') %}
|
||||
|
||||
include:
|
||||
- playbook.config
|
||||
- playbook.sostatus
|
||||
|
||||
{% if PLAYBOOKPASS == None %}
|
||||
|
||||
playbook_password_none:
|
||||
test.configurable_test_state:
|
||||
- changes: False
|
||||
- result: False
|
||||
- comment: "Playbook MySQL Password Error - Not Starting Playbook"
|
||||
|
||||
{% else %}
|
||||
|
||||
so-playbook:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-playbook:{{ GLOBALS.so_version }}
|
||||
- hostname: playbook
|
||||
- name: so-playbook
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-playbook'].ip }}
|
||||
- binds:
|
||||
- /opt/so/conf/playbook/redmine-files:/usr/src/redmine/files:rw
|
||||
- /opt/so/log/playbook:/playbook/log:rw
|
||||
{% if DOCKER.containers['so-playbook'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-playbook'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
{% if DOCKER.containers['so-playbook'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-playbook'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- environment:
|
||||
- REDMINE_DB_MYSQL={{ GLOBALS.manager }}
|
||||
- REDMINE_DB_DATABASE=playbook
|
||||
- REDMINE_DB_USERNAME=playbookdbuser
|
||||
- REDMINE_DB_PASSWORD={{ PLAYBOOKPASS }}
|
||||
{% if DOCKER.containers['so-playbook'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-playbook'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-playbook'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
|
||||
delete_so-playbook_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-playbook$
|
||||
|
||||
so-playbook-sync_cron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-playbook-sync > /opt/so/log/playbook/sync.log 2>&1
|
||||
- identifier: so-playbook-sync_cron
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
|
||||
so-playbook-ruleupdate_cron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-playbook-ruleupdate > /opt/so/log/playbook/update.log 2>&1
|
||||
- identifier: so-playbook-ruleupdate_cron
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '6'
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,49 +0,0 @@
|
||||
#!/bin/bash
|
||||
# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) -%}
|
||||
# {%- set automation_pass = salt['pillar.get']('secrets:playbook_automation', None) %}
|
||||
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
try_count=6
|
||||
interval=10
|
||||
|
||||
while [[ $try_count -le 6 ]]; do
|
||||
if docker top "so-playbook" &>/dev/null; then
|
||||
automation_group=6
|
||||
|
||||
# Create user and retrieve api_key and user_id from response
|
||||
mapfile -t automation_res < <(
|
||||
curl -s --location --request POST 'http://127.0.0.1:3000/playbook/users.json' --user "admin:{{ admin_pass }}" --header 'Content-Type: application/json' --data '{
|
||||
"user" : {
|
||||
"login" : "automation",
|
||||
"password": "{{ automation_pass }}",
|
||||
"firstname": "SecOps",
|
||||
"lastname": "Automation",
|
||||
"mail": "automation2@localhost.local"
|
||||
}
|
||||
}' | jq -r '.user.api_key, .user.id'
|
||||
)
|
||||
|
||||
automation_api_key=${automation_res[0]}
|
||||
automation_user_id=${automation_res[1]}
|
||||
|
||||
# Add user_id from newly created user to Automation group
|
||||
curl -s --location --request POST "http://127.0.0.1:3000/playbook/groups/${automation_group}/users.json" \
|
||||
--user "admin:{{ admin_pass }}" \
|
||||
--header 'Content-Type: application/json' \
|
||||
--data "{
|
||||
\"user_id\" : ${automation_user_id}
|
||||
}"
|
||||
|
||||
# Update the Automation API key in the secrets pillar
|
||||
so-yaml.py remove $local_salt_dir/pillar/secrets.sls secrets.playbook_automation_api_key
|
||||
printf '%s\n'\
|
||||
" playbook_automation_api_key: $automation_api_key" >> $local_salt_dir/pillar/secrets.sls
|
||||
exit 0
|
||||
fi
|
||||
((try_count++))
|
||||
sleep "${interval}s"
|
||||
done
|
||||
|
||||
# Timeout exceeded, exit with non-zero exit code
|
||||
exit 1
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
# {%- set MYSQLPASS = salt['pillar.get']('secrets:mysql', None) -%}
|
||||
# {%- set admin_pass = salt['pillar.get']('secrets:playbook_admin', None) %}
|
||||
. /usr/sbin/so-common
|
||||
|
||||
default_salt_dir=/opt/so/saltstack/default
|
||||
|
||||
# Generate salt + hash for admin user
|
||||
admin_salt=$(get_random_value 32)
|
||||
admin_stage1_hash=$(echo -n '{{ admin_pass }}' | sha1sum | awk '{print $1}')
|
||||
admin_hash=$(echo -n "${admin_salt}${admin_stage1_hash}" | sha1sum | awk '{print $1}')
|
||||
sed -i "s/ADMIN_HASH/${admin_hash}/g" $default_salt_dir/salt/playbook/files/playbook_db_init.sql
|
||||
sed -i "s/ADMIN_SALT/${admin_salt}/g" $default_salt_dir/salt/playbook/files/playbook_db_init.sql
|
||||
|
||||
# Copy file to destination + execute SQL
|
||||
docker cp $default_salt_dir/salt/playbook/files/playbook_db_init.sql so-mysql:/tmp/playbook_db_init.sql
|
||||
docker exec so-mysql /bin/bash -c "/usr/bin/mysql -b -uroot -p{{MYSQLPASS}} < /tmp/playbook_db_init.sql"
|
||||
File diff suppressed because one or more lines are too long
@@ -1,2 +0,0 @@
|
||||
{% import_yaml 'playbook/defaults.yaml' as PLAYBOOKDEFAULTS %}
|
||||
{% set PLAYBOOKMERGED = salt['pillar.get']('playbook', PLAYBOOKDEFAULTS.playbook, merge=True) %}
|
||||
@@ -1,4 +0,0 @@
|
||||
playbook:
|
||||
enabled:
|
||||
description: You can enable or disable Playbook.
|
||||
helpLink: playbook.html
|
||||
@@ -1,21 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-playbook_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-playbook
|
||||
- unless: grep -q so-playbook /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,14 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
ENABLEPLAY=${1:-False}
|
||||
|
||||
docker exec so-soctopus /usr/local/bin/python -c "import playbook; print(playbook.play_import($ENABLEPLAY))"
|
||||
@@ -1,22 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
salt-call state.apply playbook.db_init,playbook queue=True
|
||||
|
||||
/usr/sbin/so-soctopus-restart
|
||||
|
||||
salt-call state.apply playbook,playbook.automation_user_create queue=True
|
||||
|
||||
/usr/sbin/so-soctopus-restart
|
||||
|
||||
echo "Importing Plays - NOTE: this will continue after installation finishes and could take an hour or more. Rebooting while the import is in progress will delay playbook imports."
|
||||
sleep 5
|
||||
so-playbook-ruleupdate >> /root/setup_playbook_rule_update.log 2>&1 &
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
docker exec so-soctopus python3 playbook_bulk-update.py
|
||||
@@ -1,29 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if ! [ -f /opt/so/state/playbook_regen_plays ] || [ "$1" = "--force" ]; then
|
||||
|
||||
echo "Refreshing Sigma & regenerating plays... "
|
||||
|
||||
# Regenerate ElastAlert & update Plays
|
||||
docker exec so-soctopus python3 playbook_play-update.py
|
||||
|
||||
# Delete current Elastalert Rules
|
||||
rm /opt/so/rules/elastalert/playbook/*.yaml
|
||||
|
||||
# Regenerate Elastalert Rules
|
||||
so-playbook-sync
|
||||
|
||||
# Create state file
|
||||
touch /opt/so/state/playbook_regen_plays
|
||||
else
|
||||
printf "\nState file found, exiting...\nRerun with --force to override.\n"
|
||||
fi
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start playbook $1
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-stop playbook $1
|
||||
@@ -1,16 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
# Check to see if we are already running
|
||||
NUM_RUNNING=$(pgrep -cf "/bin/bash /usr/sbin/so-playbook-sync")
|
||||
[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING Playbook sync processes running...exiting." && exit 0
|
||||
|
||||
docker exec so-soctopus python3 playbook_play-sync.py
|
||||
@@ -4,9 +4,10 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'redis/map.jinja' import REDISMERGED %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
{% if REDISMERGED.enabled %}
|
||||
{% if GLOBALS.pipeline == "REDIS" and REDISMERGED.enabled %}
|
||||
- redis.enabled
|
||||
{% else %}
|
||||
- redis.disabled
|
||||
|
||||
125
salt/salt/engines/master/pillarWatch.py
Normal file
125
salt/salt/engines/master/pillarWatch.py
Normal file
@@ -0,0 +1,125 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import re
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# will need this in future versions of this engine
|
||||
#import salt.client
|
||||
#local = salt.client.LocalClient()
|
||||
|
||||
def start(fpa, interval=10):
|
||||
log.info("pillarWatch engine: ##### checking watched pillars for changes #####")
|
||||
|
||||
# try to open the file that stores the previous runs data
|
||||
# if the file doesn't exist, create a blank one
|
||||
try:
|
||||
# maybe change this location
|
||||
dataFile = open("/opt/so/state/pillarWatch.txt", "r+")
|
||||
except FileNotFoundError:
|
||||
log.warn("pillarWatch engine: No previous pillarWatch data saved")
|
||||
dataFile = open("/opt/so/state/pillarWatch.txt", "w+")
|
||||
|
||||
df = dataFile.read()
|
||||
for i in fpa:
|
||||
log.trace("pillarWatch engine: files: %s" % i['files'])
|
||||
log.trace("pillarWatch engine: pillar: %s" % i['pillar'])
|
||||
log.trace("pillarWatch engine: actions: %s" % i['actions'])
|
||||
pillarFiles = i['files']
|
||||
pillar = i['pillar']
|
||||
actions = i['actions']
|
||||
# these are the keys that we are going to look for as we traverse the pillarFiles
|
||||
patterns = pillar.split(".")
|
||||
# check the pillar files in reveresed order to replicate the same hierarchy as the pillar top file
|
||||
for pillarFile in reversed(pillarFiles):
|
||||
currentPillarValue = ''
|
||||
previousPillarValue = ''
|
||||
# this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later
|
||||
patternFound = 0
|
||||
with open(pillarFile, "r") as file:
|
||||
log.debug("pillarWatch engine: checking file: %s" % pillarFile)
|
||||
for line in file:
|
||||
log.trace("pillarWatch engine: inspecting line: %s in file: %s" % (line, file))
|
||||
log.trace("pillarWatch engine: looking for: %s" % patterns[patternFound])
|
||||
# since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar
|
||||
# ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look
|
||||
# for pipeline. once pipeline is found, it will record the value
|
||||
if re.search('^' + patterns[patternFound] + ':', line.strip()):
|
||||
# strip the newline because it makes the logs u-g-l-y
|
||||
log.debug("pillarWatch engine: found: %s" % line.strip('\n'))
|
||||
patternFound += 1
|
||||
# we have found the final key in the pillar that we are looking for, get the previous value then the current value
|
||||
if patternFound == len(patterns):
|
||||
# at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars
|
||||
previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE'
|
||||
# check the contents of the dataFile that stores the previousPillarValue(s).
|
||||
# find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value
|
||||
for l in df.splitlines():
|
||||
if pillar in l:
|
||||
previousPillarValue = str(l.split(":")[1].strip())
|
||||
currentPillarValue = str(line.split(":")[1]).strip()
|
||||
log.debug("pillarWatch engine: %s currentPillarValue: %s" % (pillar, currentPillarValue))
|
||||
log.debug("pillarWatch engine: %s previousPillarValue: %s" % (pillar, previousPillarValue))
|
||||
# if the pillar we are checking for changes has been defined in the dataFile,
|
||||
# replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it.
|
||||
if pillar in df:
|
||||
df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df)
|
||||
else:
|
||||
df += pillar + ': ' + currentPillarValue + '\n'
|
||||
log.trace("pillarWatch engine: df: %s" % df)
|
||||
# we have found the pillar so we dont need to loop through the file anymore
|
||||
break
|
||||
# if key and value was found in the first file, then we don't want to look in
|
||||
# any more files since we use the first file as the source of truth.
|
||||
if patternFound == len(patterns):
|
||||
break
|
||||
# if the pillar value changed, then we find what actions we should take
|
||||
log.debug("pillarWatch engine: checking if currentPillarValue != previousPillarValue")
|
||||
if currentPillarValue != previousPillarValue:
|
||||
log.info("pillarWatch engine: currentPillarValue != previousPillarValue: %s != %s" % (currentPillarValue, previousPillarValue))
|
||||
# check if the previous pillar value is defined in the pillar from -> to actions
|
||||
if previousPillarValue in actions['from']:
|
||||
# check if the new / current pillar value is defined under to
|
||||
if currentPillarValue in actions['from'][previousPillarValue]['to']:
|
||||
ACTIONS=actions['from'][previousPillarValue]['to'][currentPillarValue]
|
||||
# if the new / current pillar value isn't defined under to, is there a wildcard defined
|
||||
elif '*' in actions['from'][previousPillarValue]['to']:
|
||||
ACTIONS=actions['from'][previousPillarValue]['to']['*']
|
||||
# no action was defined for us to take when we see the pillar change
|
||||
else:
|
||||
ACTIONS=['NO DEFINED ACTION FOR US TO TAKE']
|
||||
# if the previous pillar wasn't defined in the actions from, is there a wildcard defined for the pillar that we are changing from
|
||||
elif '*' in actions['from']:
|
||||
# is the new pillar value defined for the wildcard match
|
||||
if currentPillarValue in actions['from']['*']['to']:
|
||||
ACTIONS=actions['from']['*']['to'][currentPillarValue]
|
||||
# if the new pillar doesn't have an action, was a wildcard defined
|
||||
elif '*' in actions['from']['*']['to']:
|
||||
# need more logic here for to and from
|
||||
ACTIONS=actions['from']['*']['to']['*']
|
||||
else:
|
||||
ACTIONS=['NO DEFINED ACTION FOR US TO TAKE']
|
||||
# a match for the previous pillar wasn't defined in the action in either the form of a direct match or wildcard
|
||||
else:
|
||||
ACTIONS=['NO DEFINED ACTION FOR US TO TAKE']
|
||||
log.debug("pillarWatch engine: all defined actions: %s" % actions['from'])
|
||||
log.debug("pillarWatch engine: ACTIONS: %s chosen based on previousPillarValue: %s switching to currentPillarValue: %s" % (ACTIONS, previousPillarValue, currentPillarValue))
|
||||
for action in ACTIONS:
|
||||
log.info("pillarWatch engine: action: %s" % action)
|
||||
if action != 'NO DEFINED ACTION FOR US TO TAKE':
|
||||
for saltModule, args in action.items():
|
||||
log.debug("pillarWatch engine: saltModule: %s" % saltModule)
|
||||
log.debug("pillarWatch engine: args: %s" % args)
|
||||
#__salt__[saltModule](**args)
|
||||
actionReturn = __salt__[saltModule](**args)
|
||||
log.info("pillarWatch engine: actionReturn: %s" % actionReturn)
|
||||
|
||||
dataFile.seek(0)
|
||||
dataFile.write(df)
|
||||
dataFile.truncate()
|
||||
dataFile.close()
|
||||
120
salt/salt/engines/master/valWatch.py
Normal file
120
salt/salt/engines/master/valWatch.py
Normal file
@@ -0,0 +1,120 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import re
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# will need this in future versions of this engine
|
||||
import salt.client
|
||||
local = salt.client.LocalClient()
|
||||
|
||||
def start(fpa, interval=10):
|
||||
|
||||
def getValue(content, key):
|
||||
pieces = key.split(".", 1)
|
||||
if len(pieces) > 1:
|
||||
getValue(content[pieces[0]], pieces[1])
|
||||
else:
|
||||
#log.info("ck: %s" % content[key])
|
||||
return content[key]
|
||||
#content.pop(key, None)
|
||||
|
||||
log.info("valWatch engine: ##### checking watched values for changes #####")
|
||||
|
||||
# try to open the file that stores the previous runs data
|
||||
# if the file doesn't exist, create a blank one
|
||||
try:
|
||||
# maybe change this location
|
||||
dataFile = open("/opt/so/state/valWatch.txt", "r+")
|
||||
except FileNotFoundError:
|
||||
log.warn("valWatch engine: No previous valWatch data saved")
|
||||
dataFile = open("/opt/so/state/valWatch.txt", "w+")
|
||||
|
||||
df = dataFile.read()
|
||||
for i in fpa:
|
||||
log.info("valWatch engine: i: %s" % i)
|
||||
log.trace("valWatch engine: map: %s" % i['map'])
|
||||
log.trace("valWatch engine: value: %s" % i['value'])
|
||||
log.trace("valWatch engine: targets: %s" % i['targets'])
|
||||
log.trace("valWatch engine: actions: %s" % i['actions'])
|
||||
mapFile = i['map']
|
||||
value = i['value']
|
||||
targets = i['targets']
|
||||
# target type
|
||||
ttype = i['ttype']
|
||||
actions = i['actions']
|
||||
# these are the keys that we are going to look for as we traverse the pillarFiles
|
||||
patterns = value.split(".")
|
||||
mainDict = patterns.pop(0)
|
||||
# patterns = value.split(".")
|
||||
for target in targets:
|
||||
# tell targets to render mapfile and return value split
|
||||
mapRender = local.cmd(target, fun='jinja.load_map', arg=[mapFile, mainDict], tgt_type=ttype)
|
||||
|
||||
currentValue = ''
|
||||
previousValue = ''
|
||||
# this var is used to track how many times the pattern has been found in the pillar file so that we can access the proper index later
|
||||
patternFound = 0
|
||||
#with open(pillarFile, "r") as file:
|
||||
# log.debug("pillarWatch engine: checking file: %s" % pillarFile)
|
||||
mapRenderKeys = list(mapRender.keys())
|
||||
if len(mapRenderKeys) > 0:
|
||||
log.info(mapRenderKeys)
|
||||
log.info("valWatch engine: mapRender: %s" % mapRender)
|
||||
minion = mapRenderKeys[0]
|
||||
currentValue = getValue(mapRender[minion],value.split('.', 1)[1])
|
||||
log.info("valWatch engine: currentValue: %s: %s: %s" % (minion, value, currentValue))
|
||||
for l in df.splitlines():
|
||||
if value in l:
|
||||
previousPillarValue = str(l.split(":")[1].strip())
|
||||
log.info("valWatch engine: previousValue: %s: %s: %s" % (minion, value, previousValue))
|
||||
|
||||
'''
|
||||
for key in mapRender[minion]:
|
||||
log.info("pillarWatch engine: inspecting key: %s in mainDict: %s" % (key, mainDict))
|
||||
log.info("pillarWatch engine: looking for: %s" % patterns[patternFound])
|
||||
# since we are looping line by line through a pillar file, the next line will check if each line matches the progression of keys through the pillar
|
||||
# ex. if we are looking for the value of global.pipeline, then this will loop through the pillar file until 'global' is found, then it will look
|
||||
# for pipeline. once pipeline is found, it will record the value
|
||||
#if re.search(patterns[patternFound], key):
|
||||
if patterns[patternFound] == key:
|
||||
# strip the newline because it makes the logs u-g-l-y
|
||||
log.info("pillarWatch engine: found: %s" % key)
|
||||
patternFound += 1
|
||||
# we have found the final key in the pillar that we are looking for, get the previous value then the current value
|
||||
if patternFound == len(patterns):
|
||||
# at this point, df is equal to the contents of the pillarWatch file that is used to tract the previous values of the pillars
|
||||
previousPillarValue = 'PREVIOUSPILLARVALUENOTSAVEDINDATAFILE'
|
||||
# check the contents of the dataFile that stores the previousPillarValue(s).
|
||||
# find if the pillar we are checking for changes has previously been saved. if so, grab it's prior value
|
||||
for l in df.splitlines():
|
||||
if value in l:
|
||||
previousPillarValue = str(l.split(":")[1].strip())
|
||||
currentPillarValue = mapRender[minion][key]
|
||||
log.info("pillarWatch engine: %s currentPillarValue: %s" % (value, currentPillarValue))
|
||||
log.info("pillarWatch engine: %s previousPillarValue: %s" % (value, previousPillarValue))
|
||||
# if the pillar we are checking for changes has been defined in the dataFile,
|
||||
# replace the previousPillarValue with the currentPillarValue. if it isn't in there, append it.
|
||||
if value in df:
|
||||
df = re.sub(r"\b{}\b.*".format(pillar), pillar + ': ' + currentPillarValue, df)
|
||||
else:
|
||||
df += value + ': ' + currentPillarValue + '\n'
|
||||
log.info("pillarWatch engine: df: %s" % df)
|
||||
# we have found the pillar so we dont need to loop through the file anymore
|
||||
break
|
||||
# if key and value was found in the first file, then we don't want to look in
|
||||
# any more files since we use the first file as the source of truth.
|
||||
if patternFound == len(patterns):
|
||||
break
|
||||
'''
|
||||
|
||||
|
||||
dataFile.seek(0)
|
||||
dataFile.write(df)
|
||||
dataFile.truncate()
|
||||
dataFile.close()
|
||||
141
salt/salt/engines/master/valueWatch.py
Normal file
141
salt/salt/engines/master/valueWatch.py
Normal file
@@ -0,0 +1,141 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
import logging
|
||||
import re
|
||||
from pathlib import Path
|
||||
import os
|
||||
import glob
|
||||
import json
|
||||
from time import sleep
|
||||
|
||||
import sys
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
import salt.client
|
||||
local = salt.client.LocalClient()
|
||||
|
||||
def start(watched, interval=10):
|
||||
# this 20 second sleep allows enough time for the minion to reconnect during testing of the script when the salt-master is restarted
|
||||
sleep(20)
|
||||
log.info("valueWatch engine: started")
|
||||
# this dict will be used to store the files that we are watching and their modification times for the current iteration though a loop
|
||||
fileModTimesCurrent = {}
|
||||
# same as fileModTimesCurrent, but stores the previous values through the loop.
|
||||
# the combination of these two variables is used to determine if a files has changed.
|
||||
fileModTimesPrevious = {}
|
||||
#
|
||||
currentValues = {}
|
||||
|
||||
def getValue(content, key):
|
||||
pieces = key.split(".", 1)
|
||||
if len(pieces) > 1:
|
||||
getValue(content[pieces[0]], pieces[1])
|
||||
else:
|
||||
#log.info("ck: %s" % content[key])
|
||||
return content[key]
|
||||
#content.pop(key, None)
|
||||
|
||||
def updateModTimesCurrent(files):
|
||||
# this dict will be used to store the files that we are watching and their modification times for the current iteration though a loop
|
||||
fileModTimesCurrent.clear()
|
||||
for f in files:
|
||||
#log.warn(f)
|
||||
fileName = Path(f).name
|
||||
filePath = Path(f).parent
|
||||
if '*' in fileName:
|
||||
#log.info(fileName)
|
||||
#log.info(filePath)
|
||||
slsFiles = glob.glob(f)
|
||||
for slsFile in slsFiles:
|
||||
#log.info(slsFile)
|
||||
fileModTimesCurrent.update({slsFile: os.path.getmtime(slsFile)})
|
||||
else:
|
||||
fileModTimesCurrent.update({f: os.path.getmtime(f)})
|
||||
|
||||
def compareFileModTimes():
|
||||
ret = []
|
||||
for f in fileModTimesCurrent:
|
||||
log.info(f)
|
||||
if f in fileModTimesPrevious:
|
||||
log.info("valueWatch engine: fileModTimesCurrent: %s" % fileModTimesCurrent[f])
|
||||
log.info("valueWatch engine: fileModTimesPrevious: %s" % fileModTimesPrevious[f])
|
||||
if fileModTimesCurrent[f] != fileModTimesPrevious[f]:
|
||||
log.error("valueWatch engine: fileModTimesCurrent[f] != fileModTimesPrevious[f]")
|
||||
log.error("valueWatch engine: " + str(fileModTimesCurrent[f]) + " != " + str(fileModTimesPrevious[f]))
|
||||
ret.append(f)
|
||||
return ret
|
||||
|
||||
# this will set the current value of 'value' from engines.conf and save it to the currentValues dict
|
||||
def updateCurrentValues():
|
||||
for target in targets:
|
||||
log.info("valueWatch engine: refreshing pillars on %s" % target)
|
||||
refreshPillar = local.cmd(target, fun='saltutil.refresh_pillar', tgt_type=ttype)
|
||||
log.info("valueWatch engine: pillar refresh results: %s" % refreshPillar)
|
||||
# check if the result was True for the pillar refresh
|
||||
# will need to add a recheck incase the minion was just temorarily unavailable
|
||||
try:
|
||||
if next(iter(refreshPillar.values())):
|
||||
sleep(5)
|
||||
# render the map file for the variable passed in from value.
|
||||
mapRender = local.cmd(target, fun='jinja.load_map', arg=[mapFile, mainDict], tgt_type=ttype)
|
||||
log.info("mR: %s" % mapRender)
|
||||
currentValue = ''
|
||||
previousValue = ''
|
||||
mapRenderKeys = list(mapRender.keys())
|
||||
if len(mapRenderKeys) > 0:
|
||||
log.info(mapRenderKeys)
|
||||
log.info("valueWatch engine: mapRender: %s" % mapRender)
|
||||
minion = mapRenderKeys[0]
|
||||
# if not isinstance(mapRender[minion], bool):
|
||||
currentValue = getValue(mapRender[minion],value.split('.', 1)[1])
|
||||
log.info("valueWatch engine: currentValue: %s: %s: %s" % (minion, value, currentValue))
|
||||
currentValues.update({value: {minion: currentValue}})
|
||||
# we have rendered the value so we don't need to have any more target render it
|
||||
break
|
||||
except StopIteration:
|
||||
log.info("valueWatch engine: target %s did not respond or does not exist" % target)
|
||||
|
||||
log.info("valueWatch engine: currentValues: %s" % currentValues)
|
||||
|
||||
|
||||
# run the main loop
|
||||
while True:
|
||||
log.info("valueWatch engine: checking watched files for changes")
|
||||
for v in watched:
|
||||
value = v['value']
|
||||
files = v['files']
|
||||
mapFile = v['map']
|
||||
targets = v['targets']
|
||||
ttype = v['ttype']
|
||||
actions = v['actions']
|
||||
|
||||
patterns = value.split(".")
|
||||
mainDict = patterns.pop(0)
|
||||
|
||||
log.info("valueWatch engine: value: %s" % value)
|
||||
# the call to this function will update fileModtimesCurrent
|
||||
updateModTimesCurrent(files)
|
||||
#log.trace("valueWatch engine: fileModTimesCurrent: %s" % fileModTimesCurrent)
|
||||
#log.trace("valueWatch engine: fileModTimesPrevious: %s" % fileModTimesPrevious)
|
||||
|
||||
# compare with the previous checks file modification times
|
||||
modFilesDiff = compareFileModTimes()
|
||||
# if there were changes in the pillar files, then we need to have the minion render the map file to determine if the value changed
|
||||
if modFilesDiff:
|
||||
log.info("valueWatch engine: change in files detetected, updating currentValues: %s" % modFilesDiff)
|
||||
updateCurrentValues()
|
||||
elif value not in currentValues:
|
||||
log.info("valueWatch engine: %s not in currentValues, updating currentValues." % value)
|
||||
updateCurrentValues()
|
||||
else:
|
||||
log.info("valueWatch engine: no files changed, no update for currentValues")
|
||||
|
||||
# save this iteration's values to previous so we can compare next run
|
||||
fileModTimesPrevious.update(fileModTimesCurrent)
|
||||
sleep(interval)
|
||||
@@ -4,3 +4,127 @@ engines_dirs:
|
||||
engines:
|
||||
- checkmine:
|
||||
interval: 60
|
||||
- valueWatch:
|
||||
watched:
|
||||
- value: GLOBALMERGED.pipeline
|
||||
files:
|
||||
- /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||||
- /opt/so/saltstack/local/pillar/global/adv_global.sls
|
||||
map: global/map.jinja
|
||||
targets:
|
||||
- G@role:so-notanodetype
|
||||
- G@role:so-manager
|
||||
- G@role:so-searchnode
|
||||
ttype: compound
|
||||
actions:
|
||||
from:
|
||||
'*':
|
||||
to:
|
||||
KAFKA:
|
||||
- cmd.run:
|
||||
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
|
||||
KAFKA:
|
||||
to:
|
||||
'*':
|
||||
- cmd.run:
|
||||
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
|
||||
|
||||
# - value: FIREWALL_MERGED
|
||||
# files:
|
||||
# - /opt/so/saltstack/local/pillar/firewall/soc_firewall.sls
|
||||
# - /opt/so/saltstack/local/pillar/firewall/adv_firewall.sls
|
||||
# - /opt/so/saltstack/local/pillar/minions/*.sls
|
||||
# map: firewall/map.jinja
|
||||
# targets:
|
||||
# - so-*
|
||||
# ttype: compound
|
||||
# actions:
|
||||
# from:
|
||||
# '*':
|
||||
# to:
|
||||
# '*':
|
||||
# - cmd.run:
|
||||
# cmd: date
|
||||
interval: 10
|
||||
|
||||
|
||||
- pillarWatch:
|
||||
fpa:
|
||||
# these files will be checked in reversed order to replicate the same hierarchy as the pillar top file
|
||||
- files:
|
||||
- /opt/so/saltstack/local/pillar/global/soc_global.sls
|
||||
- /opt/so/saltstack/local/pillar/global/adv_global.sls
|
||||
pillar: global.pipeline
|
||||
actions:
|
||||
from:
|
||||
'*':
|
||||
to:
|
||||
KAFKA:
|
||||
- cmd.run:
|
||||
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
|
||||
# - cmd.run:
|
||||
# cmd: salt-call saltutil.kill_all_jobs
|
||||
# - cmd.run:
|
||||
# cmd: salt-call state.highstate &
|
||||
KAFKA:
|
||||
to:
|
||||
'*':
|
||||
- cmd.run:
|
||||
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
|
||||
# - cmd.run:
|
||||
# cmd: salt-call saltutil.kill_all_jobs
|
||||
# - cmd.run:
|
||||
# cmd: salt-call state.highstate &
|
||||
- files:
|
||||
- /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
|
||||
- /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
|
||||
pillar: idstools.config.ruleset
|
||||
actions:
|
||||
from:
|
||||
'*':
|
||||
to:
|
||||
'*':
|
||||
- cmd.run:
|
||||
cmd: /usr/sbin/so-rule-update
|
||||
interval: 10
|
||||
- valWatch:
|
||||
fpa:
|
||||
- value: GLOBALMERGED.pipeline
|
||||
map: global/map.jinja
|
||||
targets:
|
||||
- so-manager
|
||||
- so-managersearch
|
||||
- so-standalone
|
||||
|
||||
actions:
|
||||
from:
|
||||
'*':
|
||||
to:
|
||||
KAFKA:
|
||||
- cmd.run:
|
||||
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
|
||||
# - cmd.run:
|
||||
# cmd: salt-call saltutil.kill_all_jobs
|
||||
# - cmd.run:
|
||||
# cmd: salt-call state.highstate &
|
||||
KAFKA:
|
||||
to:
|
||||
'*':
|
||||
- cmd.run:
|
||||
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
|
||||
# - cmd.run:
|
||||
# cmd: salt-call saltutil.kill_all_jobs
|
||||
# - cmd.run:
|
||||
# cmd: salt-call state.highstate &
|
||||
# - files:
|
||||
# - /opt/so/saltstack/local/pillar/idstools/soc_idstools.sls
|
||||
# - /opt/so/saltstack/local/pillar/idstools/adv_idstools.sls
|
||||
# pillar: idstools.config.ruleset
|
||||
# actions:
|
||||
# from:
|
||||
# '*':
|
||||
# to:
|
||||
# '*':
|
||||
# - cmd.run:
|
||||
# cmd: /usr/sbin/so-rule-update
|
||||
interval: 10
|
||||
|
||||
@@ -27,6 +27,11 @@ checkmine_engine:
|
||||
- source: salt://salt/engines/master/checkmine.py
|
||||
- makedirs: True
|
||||
|
||||
pillarWatch_engine:
|
||||
file.managed:
|
||||
- name: /etc/salt/engines/pillarWatch.py
|
||||
- source: salt://salt/engines/master/pillarWatch.py
|
||||
|
||||
engines_config:
|
||||
file.managed:
|
||||
- name: /etc/salt/master.d/engines.conf
|
||||
|
||||
@@ -13,6 +13,9 @@ include:
|
||||
- systemd.reload
|
||||
- repo.client
|
||||
- salt.mine_functions
|
||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||
- ca
|
||||
{% endif %}
|
||||
|
||||
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
|
||||
|
||||
@@ -98,5 +101,8 @@ salt_minion_service:
|
||||
- file: mine_functions
|
||||
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
|
||||
- file: set_log_levels
|
||||
{% endif %}
|
||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||
- file: /etc/salt/minion.d/signing_policies.conf
|
||||
{% endif %}
|
||||
- order: last
|
||||
|
||||
@@ -9,7 +9,14 @@
|
||||
include:
|
||||
- manager.sync_es_users
|
||||
|
||||
socdirtest:
|
||||
sigmarepodir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/sigma/repos
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
socdirelastaertrules:
|
||||
file.directory:
|
||||
- name: /opt/so/rules/elastalert/rules
|
||||
- user: 939
|
||||
@@ -45,6 +52,15 @@ socsaltdir:
|
||||
- mode: 770
|
||||
- makedirs: True
|
||||
|
||||
socanalytics:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/soc/analytics.js
|
||||
- source: salt://soc/files/soc/analytics.js
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 600
|
||||
- show_changes: False
|
||||
|
||||
socconfig:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/soc/soc.json
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -8,6 +8,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'soc/merged.map.jinja' import DOCKER_EXTRA_HOSTS %}
|
||||
{% from 'soc/merged.map.jinja' import SOCMERGED %}
|
||||
|
||||
include:
|
||||
- soc.config
|
||||
@@ -24,12 +25,16 @@ so-soc:
|
||||
- binds:
|
||||
- /nsm/rules:/nsm/rules:rw
|
||||
- /opt/so/conf/strelka:/opt/sensoroni/yara:rw
|
||||
- /opt/so/conf/sigma:/opt/sensoroni/sigma:rw
|
||||
- /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw
|
||||
- /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw
|
||||
- /nsm/soc/jobs:/opt/sensoroni/jobs:rw
|
||||
- /nsm/soc/uploads:/nsm/soc/uploads:rw
|
||||
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw
|
||||
- /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro
|
||||
{% if SOCMERGED.telemetryEnabled and not GLOBALS.airgap %}
|
||||
- /opt/so/conf/soc/analytics.js:/opt/sensoroni/html/js/analytics.js:ro
|
||||
{% endif %}
|
||||
- /opt/so/conf/soc/motd.md:/opt/sensoroni/html/motd.md:ro
|
||||
- /opt/so/conf/soc/banner.md:/opt/sensoroni/html/login/banner.md:ro
|
||||
- /opt/so/conf/soc/sigma_so_pipeline.yaml:/opt/sensoroni/sigma_so_pipeline.yaml:ro
|
||||
@@ -66,6 +71,7 @@ so-soc:
|
||||
- file: socdatadir
|
||||
- file: soclogdir
|
||||
- file: socconfig
|
||||
- file: socanalytics
|
||||
- file: socmotd
|
||||
- file: socbanner
|
||||
- file: soccustom
|
||||
|
||||
5
salt/soc/files/soc/analytics.js
Normal file
5
salt/soc/files/soc/analytics.js
Normal file
@@ -0,0 +1,5 @@
|
||||
(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
|
||||
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
|
||||
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
|
||||
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
|
||||
})(window,document,'script','dataLayer','GTM-TM46SL7T');
|
||||
@@ -12,6 +12,10 @@ To see all the latest features and fixes in this version of Security Onion, clic
|
||||
|
||||
Want the best hardware for your enterprise deployment? Check out our [enterprise appliances](https://securityonionsolutions.com/hardware/)!
|
||||
|
||||
## Premium Support
|
||||
|
||||
Experiencing difficulties and need priority support or remote assistance? We offer a [premium support plan](https://securityonionsolutions.com/support/) to assist corporate, educational, and government organizations.
|
||||
|
||||
## Customize This Space
|
||||
|
||||
Make this area your own by customizing the content in the [Config](/#/config?s=soc.files.soc.motd__md) interface.
|
||||
|
||||
@@ -79,3 +79,12 @@ transformations:
|
||||
- type: logsource
|
||||
product: windows
|
||||
category: driver_load
|
||||
- id: linux_security_add-fields
|
||||
type: add_condition
|
||||
conditions:
|
||||
event.module: 'system'
|
||||
event.dataset: 'system.auth'
|
||||
rule_conditions:
|
||||
- type: logsource
|
||||
product: linux
|
||||
service: auth
|
||||
|
||||
@@ -30,6 +30,11 @@
|
||||
{# since cases is not a valid soc config item and only used for the map files, remove it from being placed in the config #}
|
||||
{% do SOCMERGED.config.server.modules.pop('cases') %}
|
||||
|
||||
{# do not automatically enable Sigma rules if install is Eval or Import #}
|
||||
{% if grains['role'] in ['so-eval', 'so-import'] %}
|
||||
{% do SOCMERGED.config.server.modules.elastalertengine.update({'autoEnabledSigmaRules': []}) %}
|
||||
{% endif %}
|
||||
|
||||
{# remove these modules if detections is disabled #}
|
||||
{% if not SOCMERGED.config.server.client.detectionsEnabled %}
|
||||
{% do SOCMERGED.config.server.modules.pop('elastalertengine') %}
|
||||
@@ -41,10 +46,6 @@
|
||||
{% do SOCMERGED.config.server.modules.strelkaengine.update({'autoUpdateEnabled': false}) %}
|
||||
{% endif %}
|
||||
|
||||
{% if pillar.manager.playbook == 0 %}
|
||||
{% do SOCMERGED.config.server.client.inactiveTools.append('toolPlaybook') %}
|
||||
{% endif %}
|
||||
|
||||
{% set standard_actions = SOCMERGED.config.pop('actions') %}
|
||||
|
||||
{% if pillar.global.endgamehost != '' %}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user