mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Compare commits
277 Commits
2.4.80-202
...
2.4.100-20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5472d2586c | ||
|
|
fd187b11f9 | ||
|
|
f6cfd2349b | ||
|
|
a11e78176f | ||
|
|
db4c373c45 | ||
|
|
5be17330d1 | ||
|
|
a7de6993f9 | ||
|
|
a9f2dfc4b8 | ||
|
|
b7e047d149 | ||
|
|
f69137b38d | ||
|
|
9746f6e5e2 | ||
|
|
89a1e2500e | ||
|
|
394ce29ea3 | ||
|
|
f19a35ff06 | ||
|
|
8943e88ca8 | ||
|
|
18774aa0a7 | ||
|
|
af80a78406 | ||
|
|
6043da4424 | ||
|
|
75086bac7f | ||
|
|
726df310ee | ||
|
|
b952728b2c | ||
|
|
1cac2ff1d4 | ||
|
|
a93c77a1cc | ||
|
|
dd09f5b153 | ||
|
|
29f996de66 | ||
|
|
c575e02fbb | ||
|
|
e96a0108c3 | ||
|
|
e86fce692c | ||
|
|
8d35c7c139 | ||
|
|
0a5725a62e | ||
|
|
1c6f5126db | ||
|
|
1ec5e3bf2a | ||
|
|
d29727c869 | ||
|
|
eabb894580 | ||
|
|
96339f0de6 | ||
|
|
d7e3e134a5 | ||
|
|
dfb0ff7a98 | ||
|
|
48f1e24bf5 | ||
|
|
cf47508185 | ||
|
|
2a024039bf | ||
|
|
212cc478de | ||
|
|
88ea60df2a | ||
|
|
c1b7232a88 | ||
|
|
04577a48be | ||
|
|
18ef37a2d0 | ||
|
|
4108e67178 | ||
|
|
ff479de7bd | ||
|
|
4afac201b9 | ||
|
|
c30537fe6a | ||
|
|
1ed73b6f8e | ||
|
|
f01825166d | ||
|
|
07f8bda27e | ||
|
|
e3ecc9d4be | ||
|
|
ca209ed54c | ||
|
|
df6ff027b5 | ||
|
|
e772497e12 | ||
|
|
205bbd9c61 | ||
|
|
224bc6b429 | ||
|
|
dc197f6a5c | ||
|
|
f182833a8d | ||
|
|
61ab1f1ef2 | ||
|
|
dea582f24a | ||
|
|
b860bf753a | ||
|
|
b5690f6879 | ||
|
|
a39ad55578 | ||
|
|
4c276d1211 | ||
|
|
5f74b1b730 | ||
|
|
b9040eb0de | ||
|
|
ab63d5dbdb | ||
|
|
f233f13637 | ||
|
|
c8a8236401 | ||
|
|
f5603b1274 | ||
|
|
1d27fcc50e | ||
|
|
dd2926201d | ||
|
|
ebcef8adbd | ||
|
|
ff14217d38 | ||
|
|
46596f01fa | ||
|
|
c1388a68f0 | ||
|
|
374da11037 | ||
|
|
caa8d9ecb0 | ||
|
|
02c7de6b1a | ||
|
|
c71b9f6e8f | ||
|
|
8c1feccbe0 | ||
|
|
5ee15c8b41 | ||
|
|
5328f55322 | ||
|
|
712f904c43 | ||
|
|
ccd7d86302 | ||
|
|
fc89604982 | ||
|
|
09f7329a21 | ||
|
|
cfd6676583 | ||
|
|
3713ee9d93 | ||
|
|
009c8d55c3 | ||
|
|
c0c01f0d17 | ||
|
|
2fe5dccbb4 | ||
|
|
c83a143eef | ||
|
|
56ef2a4e1c | ||
|
|
c36e8abc19 | ||
|
|
e76293acdb | ||
|
|
5bdb4ed51b | ||
|
|
aaf5d76071 | ||
|
|
d9a696a411 | ||
|
|
76ab4c92f0 | ||
|
|
60beaf51bc | ||
|
|
9ab17ff79c | ||
|
|
1a363790a0 | ||
|
|
d488bb6393 | ||
|
|
114ad779b4 | ||
|
|
49d2ac2b13 | ||
|
|
9a2252ed3f | ||
|
|
9264a03dbc | ||
|
|
fb2a42a9af | ||
|
|
63531cdbb6 | ||
|
|
bae348bef7 | ||
|
|
bd223d8643 | ||
|
|
3fa6c72620 | ||
|
|
2b90bdc86a | ||
|
|
6831b72804 | ||
|
|
5e12b928d9 | ||
|
|
0453f51e64 | ||
|
|
9594e4115c | ||
|
|
201e14f287 | ||
|
|
d833bd0d55 | ||
|
|
46eeb014af | ||
|
|
8e7a2cf353 | ||
|
|
2c528811cc | ||
|
|
3130b56d58 | ||
|
|
b466d83625 | ||
|
|
6d008546f1 | ||
|
|
c60b14e2e7 | ||
|
|
c753a7cffa | ||
|
|
5cba4d7d9b | ||
|
|
685df9e5ea | ||
|
|
ef5a42cf40 | ||
|
|
45ab6c7309 | ||
|
|
1b54a109d5 | ||
|
|
945d04a510 | ||
|
|
658db27a46 | ||
|
|
3e248da14d | ||
|
|
ed7f8dbf1d | ||
|
|
d6af3aab6d | ||
|
|
0cb067f6f2 | ||
|
|
ccf88fa62b | ||
|
|
20f915f649 | ||
|
|
f447b6b698 | ||
|
|
66b087f12f | ||
|
|
f2ad4c40e6 | ||
|
|
8538f2eca2 | ||
|
|
c55fa6dc6a | ||
|
|
17f37750e5 | ||
|
|
e789c17bc3 | ||
|
|
6f44d39b18 | ||
|
|
dd85249781 | ||
|
|
bdba621442 | ||
|
|
034315ed85 | ||
|
|
224c668c31 | ||
|
|
2e17e93cfe | ||
|
|
7dfb75ba6b | ||
|
|
af0425b8f1 | ||
|
|
6cf0a0bb42 | ||
|
|
d97400e6f5 | ||
|
|
cf1335dd84 | ||
|
|
be74449fb9 | ||
|
|
45b2413175 | ||
|
|
022df966c7 | ||
|
|
92385d652e | ||
|
|
4478d7b55a | ||
|
|
612716ee69 | ||
|
|
f78a5d1a78 | ||
|
|
2d0de87530 | ||
|
|
18df491f7e | ||
|
|
cee6ee7a2a | ||
|
|
6d18177f98 | ||
|
|
c0bb395571 | ||
|
|
f051ddc7f0 | ||
|
|
72ad49ed12 | ||
|
|
d11f4ef9ba | ||
|
|
03ca7977a0 | ||
|
|
91b2e7d400 | ||
|
|
34c3a58efe | ||
|
|
a867557f54 | ||
|
|
b814f32e0a | ||
|
|
2df44721d0 | ||
|
|
d0565baaa3 | ||
|
|
38e7da1334 | ||
|
|
1b623c5c7a | ||
|
|
542a116b8c | ||
|
|
e7b6496f98 | ||
|
|
3991c7b5fe | ||
|
|
678b232c24 | ||
|
|
fbd0dbd048 | ||
|
|
1df19faf5c | ||
|
|
8ec5794833 | ||
|
|
bf07d56da6 | ||
|
|
cdbffa2323 | ||
|
|
55469ebd24 | ||
|
|
4e81860a13 | ||
|
|
a23789287e | ||
|
|
fe1824aedd | ||
|
|
e58b2c45dd | ||
|
|
5d322ebc0b | ||
|
|
7ea8d5efd0 | ||
|
|
4182ff66a0 | ||
|
|
ff29d9ca51 | ||
|
|
4a88dedcb8 | ||
|
|
cfe5c1d76a | ||
|
|
ebf5159c95 | ||
|
|
d432019ad9 | ||
|
|
0d8fd42be3 | ||
|
|
d5faf535c3 | ||
|
|
8e1edd1d91 | ||
|
|
d791b23838 | ||
|
|
0db0754ee5 | ||
|
|
1f5a990b1e | ||
|
|
7a2f01be53 | ||
|
|
dadb0db8f3 | ||
|
|
dfd8ac3626 | ||
|
|
9716e09b83 | ||
|
|
669f68ad88 | ||
|
|
32af2d8436 | ||
|
|
24e945eee4 | ||
|
|
8615e5d5ea | ||
|
|
2dd5ff4333 | ||
|
|
6a396ec1aa | ||
|
|
34f558c023 | ||
|
|
9504f0885a | ||
|
|
ef59678441 | ||
|
|
c6f6811f47 | ||
|
|
ce8f9fe024 | ||
|
|
40b7999786 | ||
|
|
69be03f86a | ||
|
|
8dc8092241 | ||
|
|
578c6c567f | ||
|
|
662df1208d | ||
|
|
745b6775f1 | ||
|
|
176aaa8f3d | ||
|
|
4d499be1a8 | ||
|
|
c27225d91f | ||
|
|
1b47d5c622 | ||
|
|
32d7927a49 | ||
|
|
861630681c | ||
|
|
9d725f2b0b | ||
|
|
132263ac1a | ||
|
|
92a847e3bd | ||
|
|
75bbc41d38 | ||
|
|
7716f4aff8 | ||
|
|
8eb6dcc5b7 | ||
|
|
847638442b | ||
|
|
5743189eef | ||
|
|
81d874c6ae | ||
|
|
bfe8a3a01b | ||
|
|
71ed9204ff | ||
|
|
222ebbdec1 | ||
|
|
260d4e44bc | ||
|
|
0c5b3f7c1c | ||
|
|
feee80cad9 | ||
|
|
5f69456e22 | ||
|
|
e59d124c82 | ||
|
|
13d4738e8f | ||
|
|
abdfbba32a | ||
|
|
7d0a961482 | ||
|
|
0f226cc08e | ||
|
|
cfcfc6819f | ||
|
|
fe4e2a9540 | ||
|
|
492554d951 | ||
|
|
dfd5e95c93 | ||
|
|
50f0c43212 | ||
|
|
7fe8715bce | ||
|
|
f837ea944a | ||
|
|
469ca44016 | ||
|
|
81fcd68e9b | ||
|
|
55f8303dc2 | ||
|
|
8f8698fd02 | ||
|
|
98837bc379 | ||
|
|
0f243bb6ec | ||
|
|
88fc1bbe32 | ||
|
|
2ecac38f6d | ||
|
|
2168698595 |
@@ -1,17 +1,17 @@
|
||||
### 2.4.80-20240624 ISO image released on 2024/06/25
|
||||
### 2.4.100-20240903 ISO image released on 2024/09/03
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.80-20240624 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.80-20240624.iso
|
||||
2.4.100-20240903 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.100-20240903.iso
|
||||
|
||||
MD5: 139F9762E926F9CB3C4A9528A3752C31
|
||||
SHA1: BC6CA2C5F4ABC1A04E83A5CF8FFA6A53B1583CC9
|
||||
SHA256: 70E90845C84FFA30AD6CF21504634F57C273E7996CA72F7250428DDBAAC5B1BD
|
||||
MD5: 856BBB4F0764C0A479D8949725FC096B
|
||||
SHA1: B3FCFB8F1031EB8AA833A90C6C5BB61328A73842
|
||||
SHA256: 0103EB9D78970396BB47CBD18DA1FFE64524F5C1C559487A1B2D293E1882B265
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.80-20240624.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.100-20240903.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.80-20240624.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.100-20240903.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.80-20240624.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.100-20240903.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.80-20240624.iso.sig securityonion-2.4.80-20240624.iso
|
||||
gpg --verify securityonion-2.4.100-20240903.iso.sig securityonion-2.4.100-20240903.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Mon 24 Jun 2024 02:42:03 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Sat 31 Aug 2024 05:05:05 PM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
@@ -5,9 +5,11 @@
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 2.4.x | :white_check_mark: |
|
||||
| 2.3.x | :white_check_mark: |
|
||||
| 2.3.x | :x: |
|
||||
| 16.04.x | :x: |
|
||||
|
||||
Security Onion 2.3 has reached End Of Life and is no longer supported.
|
||||
|
||||
Security Onion 16.04 has reached End Of Life and is no longer supported.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
34
pillar/elasticsearch/nodes.sls
Normal file
34
pillar/elasticsearch/nodes.sls
Normal file
@@ -0,0 +1,34 @@
|
||||
{% set node_types = {} %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='elasticsearch:enabled:true',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='pillar') | dictsort()
|
||||
%}
|
||||
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% set hostname = minionid.split('_') | first %}
|
||||
{% set node_type = minionid.split('_') | last %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
elasticsearch:
|
||||
nodes:
|
||||
{% for node_type, values in node_types.items() %}
|
||||
{{node_type}}:
|
||||
{% for hostname, ip in values.items() %}
|
||||
{{hostname}}:
|
||||
ip: {{ip}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
@@ -1,16 +1,15 @@
|
||||
{% set node_types = {} %}
|
||||
{% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ',
|
||||
tgt='logstash:enabled:true',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='compound') | dictsort()
|
||||
tgt_type='pillar') | dictsort()
|
||||
%}
|
||||
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% set hostname = cached_grains[minionid]['host'] %}
|
||||
{% set node_type = minionid.split('_')[1] %}
|
||||
{% set hostname = minionid.split('_') | first %}
|
||||
{% set node_type = minionid.split('_') | last %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
|
||||
34
pillar/redis/nodes.sls
Normal file
34
pillar/redis/nodes.sls
Normal file
@@ -0,0 +1,34 @@
|
||||
{% set node_types = {} %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='redis:enabled:true',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='pillar') | dictsort()
|
||||
%}
|
||||
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% set hostname = minionid.split('_') | first %}
|
||||
{% set node_type = minionid.split('_') | last %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
redis:
|
||||
nodes:
|
||||
{% for node_type, values in node_types.items() %}
|
||||
{{node_type}}:
|
||||
{% for hostname, ip in values.items() %}
|
||||
{{hostname}}:
|
||||
ip: {{ip}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
@@ -47,10 +47,12 @@ base:
|
||||
- kibana.adv_kibana
|
||||
- kratos.soc_kratos
|
||||
- kratos.adv_kratos
|
||||
- redis.nodes
|
||||
- redis.soc_redis
|
||||
- redis.adv_redis
|
||||
- influxdb.soc_influxdb
|
||||
- influxdb.adv_influxdb
|
||||
- elasticsearch.nodes
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
- elasticfleet.soc_elasticfleet
|
||||
@@ -147,10 +149,12 @@ base:
|
||||
- idstools.adv_idstools
|
||||
- kratos.soc_kratos
|
||||
- kratos.adv_kratos
|
||||
- redis.nodes
|
||||
- redis.soc_redis
|
||||
- redis.adv_redis
|
||||
- influxdb.soc_influxdb
|
||||
- influxdb.adv_influxdb
|
||||
- elasticsearch.nodes
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
- elasticfleet.soc_elasticfleet
|
||||
@@ -215,11 +219,13 @@ base:
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- elasticsearch.nodes
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- redis.nodes
|
||||
- redis.soc_redis
|
||||
- redis.adv_redis
|
||||
- minions.{{ grains.id }}
|
||||
@@ -227,6 +233,8 @@ base:
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
|
||||
'*_receiver':
|
||||
- logstash.nodes
|
||||
|
||||
@@ -136,7 +136,9 @@
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
'stig',
|
||||
'kafka.ca',
|
||||
'kafka.ssl'
|
||||
],
|
||||
'so-standalone': [
|
||||
'salt.master',
|
||||
@@ -195,7 +197,6 @@
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'kafka',
|
||||
'elasticsearch.ca',
|
||||
'stig'
|
||||
],
|
||||
'so-desktop': [
|
||||
|
||||
@@ -14,6 +14,11 @@ net.core.wmem_default:
|
||||
sysctl.present:
|
||||
- value: 26214400
|
||||
|
||||
# Users are not a fan of console messages
|
||||
kernel.printk:
|
||||
sysctl.present:
|
||||
- value: "3 4 1 3"
|
||||
|
||||
# Remove variables.txt from /tmp - This is temp
|
||||
rmvariablesfile:
|
||||
file.absent:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# Elastic agent is not managed by salt. Because of this we must store this base information in a
|
||||
# script that accompanies the soup system. Since so-common is one of those special soup files,
|
||||
# and since this same logic is required during installation, it's included in this file.
|
||||
ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
|
||||
ELASTIC_AGENT_TARBALL_VERSION="8.14.3"
|
||||
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
|
||||
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
|
||||
@@ -95,6 +95,8 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|shutdown process" # server not yet ready (logstash waiting on elastic)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|contain valid certificates" # server not yet ready (logstash waiting on elastic)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failedaction" # server not yet ready (logstash waiting on elastic)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in start_workers" # server not yet ready (logstash waiting on elastic)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in buffer_initialize" # server not yet ready (logstash waiting on elastic)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no route to host" # server not yet ready
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not running" # server not yet ready
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unavailable" # server not yet ready
|
||||
@@ -147,6 +149,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncing rule" # false positive (rule sync log line includes rule name which can contain 'error')
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
@@ -170,6 +173,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cannot join on an empty table" # InfluxDB flux query, import nodes
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exhausting result iterator" # InfluxDB flux query mismatched table results (temporary data issue)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to finish run" # InfluxDB rare error, self-recoverable
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to gather disk name" # InfluxDB known error, can't read disks because the container doesn't have them mounted
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration"
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets"
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed"
|
||||
@@ -205,6 +209,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
|
||||
fi
|
||||
|
||||
RESULT=0
|
||||
@@ -241,6 +246,7 @@ exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on
|
||||
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
|
||||
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
|
||||
exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable
|
||||
exclude_log "/nsm/kafka/data/" # ignore Kafka data directory from log check.
|
||||
|
||||
for log_file in $(cat /tmp/log_check_files); do
|
||||
status "Checking log file $log_file"
|
||||
|
||||
@@ -9,6 +9,9 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02")
|
||||
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
|
||||
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
{%- set model = salt['grains.get']('sosmodel') %}
|
||||
model={{ model }}
|
||||
@@ -16,33 +19,42 @@ model={{ model }}
|
||||
if [[ $model =~ ^(SO2AMI01|SO2AZI01|SO2GCI01)$ ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for i in "${software_raid[@]}"; do
|
||||
if [[ "$model" == $i ]]; then
|
||||
is_softwareraid=true
|
||||
is_hwraid=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
for i in "${hardware_raid[@]}"; do
|
||||
if [[ "$model" == $i ]]; then
|
||||
is_softwareraid=false
|
||||
is_hwraid=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
{%- else %}
|
||||
echo "This is not an appliance"
|
||||
exit 0
|
||||
{%- endif %}
|
||||
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200|SOSSNNV|SOSMN)$ ]]; then
|
||||
is_bossraid=true
|
||||
fi
|
||||
if [[ $model =~ ^(SOSSNNV|SOSMN)$ ]]; then
|
||||
is_swraid=true
|
||||
fi
|
||||
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200)$ ]]; then
|
||||
is_hwraid=true
|
||||
fi
|
||||
|
||||
check_nsm_raid() {
|
||||
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
|
||||
MEGACTL=$(/opt/raidtools/megasasctl |grep optimal)
|
||||
|
||||
if [[ $APPLIANCE == '1' ]]; then
|
||||
if [[ "$model" == "SOS500" || "$model" == "SOS500-DE02" ]]; then
|
||||
#This doesn't have raid
|
||||
HWRAID=0
|
||||
else
|
||||
if [[ -n $PERCCLI ]]; then
|
||||
HWRAID=0
|
||||
elif [[ -n $MEGACTL ]]; then
|
||||
HWRAID=0
|
||||
else
|
||||
HWRAID=1
|
||||
fi
|
||||
|
||||
fi
|
||||
fi
|
||||
|
||||
}
|
||||
@@ -50,17 +62,27 @@ check_nsm_raid() {
|
||||
check_boss_raid() {
|
||||
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
|
||||
MVTEST=$(/usr/local/bin/mvcli info -o vd | grep "No adapter")
|
||||
BOSSNVMECLI=$(/usr/local/bin/mnv_cli info -o vd -i 0 | grep Functional)
|
||||
|
||||
# Check to see if this is a SM based system
|
||||
if [[ -z $MVTEST ]]; then
|
||||
if [[ -n $MVCLI ]]; then
|
||||
# Is this NVMe Boss Raid?
|
||||
if [[ "$model" =~ "-DE02" ]]; then
|
||||
if [[ -n $BOSSNVMECLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
else
|
||||
# This doesn't have boss raid so lets make it 0
|
||||
BOSSRAID=0
|
||||
# Check to see if this is a SM based system
|
||||
if [[ -z $MVTEST ]]; then
|
||||
if [[ -n $MVCLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
else
|
||||
# This doesn't have boss raid so lets make it 0
|
||||
BOSSRAID=0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -79,14 +101,13 @@ SWRAID=0
|
||||
BOSSRAID=0
|
||||
HWRAID=0
|
||||
|
||||
if [[ $is_hwraid ]]; then
|
||||
if [[ "$is_hwraid" == "true" ]]; then
|
||||
check_nsm_raid
|
||||
check_boss_raid
|
||||
fi
|
||||
if [[ $is_bossraid ]]; then
|
||||
check_boss_raid
|
||||
fi
|
||||
if [[ $is_swraid ]]; then
|
||||
if [[ "$is_softwareraid" == "true" ]]; then
|
||||
check_software_raid
|
||||
check_boss_raid
|
||||
fi
|
||||
|
||||
sum=$(($SWRAID + $BOSSRAID + $HWRAID))
|
||||
|
||||
@@ -3,8 +3,8 @@ elastalert:
|
||||
description: You can enable or disable Elastalert.
|
||||
helpLink: elastalert.html
|
||||
alerter_parameters:
|
||||
title: Alerter Parameters
|
||||
description: Optional configuration parameters for additional alerters that can be enabled for all Sigma rules. Filter for 'Alerter' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
|
||||
title: Custom Configuration Parameters
|
||||
description: Optional configuration parameters made available as defaults for all rules and alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available configuration parameters. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
multiline: True
|
||||
syntax: yaml
|
||||
|
||||
@@ -97,6 +97,7 @@ elasticfleet:
|
||||
- symantec_endpoint
|
||||
- system
|
||||
- tcp
|
||||
- tenable_io
|
||||
- tenable_sc
|
||||
- ti_abusech
|
||||
- ti_anomali
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "fleet_server",
|
||||
"version": ""
|
||||
},
|
||||
"name": "fleet_server-1",
|
||||
"namespace": "default",
|
||||
"policy_id": "FleetServer_hostname",
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"fleet_server-fleet-server": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"custom": "server.ssl.supported_protocols: [\"TLSv1.2\", \"TLSv1.3\"]\nserver.ssl.cipher_suites: [ \"ECDHE-RSA-AES-128-GCM-SHA256\", \"ECDHE-RSA-AES-256-GCM-SHA384\", \"ECDHE-RSA-AES-128-CBC-SHA\", \"ECDHE-RSA-AES-256-CBC-SHA\", \"RSA-AES-128-GCM-SHA256\", \"RSA-AES-256-GCM-SHA384\"]"
|
||||
},
|
||||
"streams": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.10.2"
|
||||
"version": "8.14.0"
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_id": "endpoints-initial",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"winlogs-winlog": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"winlog.winlog": {
|
||||
"winlog.winlogs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"channel": "Microsoft-Windows-Windows Defender/Operational",
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"custom": "",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.43.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-1.38.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.43.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.43.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-1.38.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.59.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-1.45.1\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.59.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.59.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-1.45.1\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"tags": [
|
||||
"import"
|
||||
]
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
# Get all the fleet policies
|
||||
json_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true')
|
||||
|
||||
# Extract the IDs that start with "FleetServer_"
|
||||
POLICY=$(echo "$json_output" | jq -r '.items[] | select(.id | startswith("FleetServer_")) | .id')
|
||||
|
||||
# Iterate over each ID in the POLICY variable
|
||||
for POLICYNAME in $POLICY; do
|
||||
printf "\nUpdating Policy: $POLICYNAME\n"
|
||||
|
||||
# First get the Integration ID
|
||||
INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$POLICYNAME" | jq -r '.item.package_policies[] | select(.package.name == "fleet_server") | .id')
|
||||
|
||||
# Modify the default integration policy to update the policy_id and an with the correct naming
|
||||
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "$POLICYNAME" --arg name "fleet_server-$POLICYNAME" '
|
||||
.policy_id = $policy_id |
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Now update the integration policy using the modified JSON
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
|
||||
done
|
||||
@@ -12,7 +12,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
# First, check for any package upgrades
|
||||
/usr/sbin/so-elastic-fleet-package-upgrade
|
||||
|
||||
# Second, configure Elastic Defend Integration seperately
|
||||
# Second, update Fleet Server policies
|
||||
/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
|
||||
|
||||
# Third, configure Elastic Defend Integration seperately
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
||||
|
||||
# Initial Endpoints
|
||||
|
||||
@@ -53,7 +53,8 @@ fi
|
||||
printf "\n### Create ES Token ###\n"
|
||||
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
|
||||
|
||||
### Create Outputs & Fleet URLs ###
|
||||
### Create Outputs, Fleet Policy and Fleet URLs ###
|
||||
# Create the Manager Elasticsearch Output first and set it as the default output
|
||||
printf "\nAdd Manager Elasticsearch Output...\n"
|
||||
ESCACRT=$(openssl x509 -in $INTCA)
|
||||
JSON_STRING=$( jq -n \
|
||||
@@ -62,7 +63,21 @@ JSON_STRING=$( jq -n \
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
printf "\n\n"
|
||||
|
||||
printf "\nCreate Logstash Output Config if node is not an Import or Eval install\n"
|
||||
# Create the Manager Fleet Server Host Agent Policy
|
||||
# This has to be done while the Elasticsearch Output is set to the default Output
|
||||
printf "Create Manager Fleet Server Policy...\n"
|
||||
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
|
||||
|
||||
# Modify the default integration policy to update the policy_id with the correct naming
|
||||
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
|
||||
.policy_id = $policy_id |
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Add the Fleet Server Integration to the new Fleet Policy
|
||||
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
|
||||
|
||||
# Now we can create the Logstash Output and set it to to be the default Output
|
||||
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
|
||||
{% if grains.role not in ['so-import', 'so-eval'] %}
|
||||
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
|
||||
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
|
||||
@@ -101,16 +116,6 @@ printf "\n\n"
|
||||
# Load Elasticsearch templates
|
||||
/usr/sbin/so-elasticsearch-templates-load
|
||||
|
||||
# Manager Fleet Server Host
|
||||
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "true" "120"
|
||||
|
||||
#Temp Fixup for ES Output bug
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg NAME "FleetServer_{{ GLOBALS.hostname }}" \
|
||||
'{"name": $NAME,"description": $NAME,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":120,"data_output_id":"so-manager_elasticsearch"}'
|
||||
)
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/FleetServer_{{ GLOBALS.hostname }}" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
|
||||
# Initial Endpoints Policy
|
||||
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
|
||||
|
||||
@@ -165,4 +170,4 @@ salt-call state.apply elasticfleet queue=True
|
||||
# Generate installers & install Elastic Agent on the node
|
||||
so-elastic-agent-gen-installers
|
||||
salt-call state.apply elasticfleet.install_agent_grid queue=True
|
||||
exit 0
|
||||
exit 0
|
||||
|
||||
@@ -1,23 +1,37 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS with context %}
|
||||
|
||||
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
|
||||
|
||||
{# ES_LOGSTASH_NODES is the same as LOGSTASH_NODES from logstash/map.jinja but heavynodes and fleet nodes are removed #}
|
||||
{% set ES_LOGSTASH_NODES = [] %}
|
||||
{% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{# this is a list of dicts containing hostname:ip for elasticsearch nodes that need to know about each other for cluster #}
|
||||
{% set ELASTICSEARCH_SEED_HOSTS = [] %}
|
||||
{% set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{% for node_type, node_details in node_data.items() | sort %}
|
||||
{% if node_type not in ['heavynode', 'fleet'] %}
|
||||
{% if node_type != 'heavynode' %}
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% do ES_LOGSTASH_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% do ELASTICSEARCH_SEED_HOSTS.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# this is a list of dicts containing hostname:ip of all nodes running elasticsearch #}
|
||||
{% set ELASTICSEARCH_NODES = [] %}
|
||||
{% set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{% for node_type, node_details in node_data.items() %}
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% do ELASTICSEARCH_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %}
|
||||
{% if ES_LOGSTASH_NODES | length > 1 %}
|
||||
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
|
||||
{% for NODE in ES_LOGSTASH_NODES %}
|
||||
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
@@ -118,6 +118,11 @@ esingestconf:
|
||||
- user: 930
|
||||
- group: 939
|
||||
|
||||
# Remove .fleet_final_pipeline-1 because we are using global@custom now
|
||||
so-fleet-final-pipeline-remove:
|
||||
file.absent:
|
||||
- name: /opt/so/conf/elasticsearch/ingest/.fleet_final_pipeline-1
|
||||
|
||||
# Auto-generate Elasticsearch ingest node pipelines from pillar
|
||||
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
|
||||
es_ingest_conf_{{pipeline}}:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,8 +7,8 @@
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'logstash/map.jinja' import LOGSTASH_NODES %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ES_LOGSTASH_NODES %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
||||
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
|
||||
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
|
||||
@@ -27,7 +27,7 @@ so-elasticsearch:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }}
|
||||
- extra_hosts:
|
||||
{% for node in LOGSTASH_NODES %}
|
||||
{% for node in ELASTICSEARCH_NODES %}
|
||||
{% for hostname, ip in node.items() %}
|
||||
- {{hostname}}:{{ip}}
|
||||
{% endfor %}
|
||||
@@ -38,7 +38,7 @@ so-elasticsearch:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- environment:
|
||||
{% if ES_LOGSTASH_NODES | length == 1 or GLOBALS.role == 'so-heavynode' %}
|
||||
{% if ELASTICSEARCH_SEED_HOSTS | length == 1 or GLOBALS.role == 'so-heavynode' %}
|
||||
- discovery.type=single-node
|
||||
{% endif %}
|
||||
- ES_JAVA_OPTS=-Xms{{ GLOBALS.elasticsearch.es_heap }} -Xmx{{ GLOBALS.elasticsearch.es_heap }} -Des.transport.cname_in_publish_address=true -Dlog4j2.formatMsgNoLookups=true
|
||||
|
||||
@@ -62,6 +62,7 @@
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
|
||||
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
{%- endraw %}
|
||||
{%- if HIGHLANDER %}
|
||||
@@ -72,7 +73,9 @@
|
||||
}
|
||||
}
|
||||
{%- endif %}
|
||||
{%- raw %}
|
||||
{%- raw %}
|
||||
,
|
||||
{ "pipeline": { "name": "global@custom", "ignore_missing_pipeline": true, "description": "[Fleet] Global pipeline for all data streams" } }
|
||||
]
|
||||
}
|
||||
{% endraw %}
|
||||
|
||||
@@ -1,107 +0,0 @@
|
||||
{
|
||||
"version": 3,
|
||||
"_meta": {
|
||||
"managed_by": "fleet",
|
||||
"managed": true
|
||||
},
|
||||
"description": "Final pipeline for processing all incoming Fleet Agent documents. \n",
|
||||
"processors": [
|
||||
{
|
||||
"date": {
|
||||
"description": "Add time when event was ingested (and remove sub-seconds to improve storage efficiency)",
|
||||
"tag": "truncate-subseconds-event-ingested",
|
||||
"field": "_ingest.timestamp",
|
||||
"target_field": "event.ingested",
|
||||
"formats": [
|
||||
"ISO8601"
|
||||
],
|
||||
"output_format": "date_time_no_millis",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"description": "Remove any pre-existing untrusted values.",
|
||||
"field": [
|
||||
"event.agent_id_status",
|
||||
"_security"
|
||||
],
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set_security_user": {
|
||||
"field": "_security",
|
||||
"properties": [
|
||||
"authentication_type",
|
||||
"username",
|
||||
"realm",
|
||||
"api_key"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"description": "Add event.agent_id_status based on the API key metadata and the agent.id contained in the event.\n",
|
||||
"tag": "agent-id-status",
|
||||
"source": "boolean is_user_trusted(def ctx, def users) {\n if (ctx?._security?.username == null) {\n return false;\n }\n\n def user = null;\n for (def item : users) {\n if (item?.username == ctx._security.username) {\n user = item;\n break;\n }\n }\n\n if (user == null || user?.realm == null || ctx?._security?.realm?.name == null) {\n return false;\n }\n\n if (ctx._security.realm.name != user.realm) {\n return false;\n }\n\n return true;\n}\n\nString verified(def ctx, def params) {\n // No agent.id field to validate.\n if (ctx?.agent?.id == null) {\n return \"missing\";\n }\n\n // Check auth metadata from API key.\n if (ctx?._security?.authentication_type == null\n // Agents only use API keys.\n || ctx._security.authentication_type != 'API_KEY'\n // Verify the API key owner before trusting any metadata it contains.\n || !is_user_trusted(ctx, params.trusted_users)\n // Verify the API key has metadata indicating the assigned agent ID.\n || ctx?._security?.api_key?.metadata?.agent_id == null) {\n return \"auth_metadata_missing\";\n }\n\n // The API key can only be used represent the agent.id it was issued to.\n if (ctx._security.api_key.metadata.agent_id != ctx.agent.id) {\n // Potential masquerade attempt.\n return \"mismatch\";\n }\n\n return \"verified\";\n}\n\nif (ctx?.event == null) {\n ctx.event = [:];\n}\n\nctx.event.agent_id_status = verified(ctx, params);",
|
||||
"params": {
|
||||
"trusted_users": [
|
||||
{
|
||||
"username": "elastic/fleet-server",
|
||||
"realm": "_service_account"
|
||||
},
|
||||
{
|
||||
"username": "cloud-internal-agent-server",
|
||||
"realm": "found"
|
||||
},
|
||||
{
|
||||
"username": "elastic",
|
||||
"realm": "reserved"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": "_security",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
|
||||
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
|
||||
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}" } },
|
||||
{ "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
|
||||
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
||||
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
|
||||
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
|
||||
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
|
||||
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
],
|
||||
"on_failure": [
|
||||
{
|
||||
"remove": {
|
||||
"field": "_security",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "error.message",
|
||||
"value": [
|
||||
"failed in Fleet agent final_pipeline: {{ _ingest.on_failure_message }}"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
27
salt/elasticsearch/files/ingest/global@custom
Normal file
27
salt/elasticsearch/files/ingest/global@custom
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"version": 3,
|
||||
"_meta": {
|
||||
"managed_by": "securityonion",
|
||||
"managed": true
|
||||
},
|
||||
"description": "Custom pipeline for processing all incoming Fleet Agent documents. \n",
|
||||
"processors": [
|
||||
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
|
||||
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
|
||||
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}" } },
|
||||
{ "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
|
||||
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
||||
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
|
||||
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
|
||||
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
|
||||
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
]
|
||||
}
|
||||
@@ -466,6 +466,13 @@ elasticsearch:
|
||||
so-logs-sonicwall_firewall_x_log: *indexSettings
|
||||
so-logs-snort_x_log: *indexSettings
|
||||
so-logs-symantec_endpoint_x_log: *indexSettings
|
||||
so-logs-tenable_io_x_asset: *indexSettings
|
||||
so-logs-tenable_io_x_plugin: *indexSettings
|
||||
so-logs-tenable_io_x_scan: *indexSettings
|
||||
so-logs-tenable_io_x_vulnerability: *indexSettings
|
||||
so-logs-tenable_sc_x_asset: *indexSettings
|
||||
so-logs-tenable_sc_x_plugin: *indexSettings
|
||||
so-logs-tenable_sc_x_vulnerability: *indexSettings
|
||||
so-logs-ti_abusech_x_malware: *indexSettings
|
||||
so-logs-ti_abusech_x_malwarebazaar: *indexSettings
|
||||
so-logs-ti_abusech_x_threatfox: *indexSettings
|
||||
@@ -530,6 +537,58 @@ elasticsearch:
|
||||
so-strelka: *indexSettings
|
||||
so-syslog: *indexSettings
|
||||
so-zeek: *indexSettings
|
||||
so-metrics-fleet_server_x_agent_status: &fleetMetricsSettings
|
||||
index_sorting:
|
||||
description: Sorts the index by event time, at the cost of additional processing resource consumption.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
index_template:
|
||||
ignore_missing_component_templates:
|
||||
description: Ignore component templates if they aren't in Elasticsearch.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
index_patterns:
|
||||
description: Patterns for matching multiple indices or tables.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
template:
|
||||
settings:
|
||||
index:
|
||||
mode:
|
||||
description: Type of mode used for this index. Time series indices can be used for metrics to reduce necessary storage.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
number_of_replicas:
|
||||
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
composed_of:
|
||||
description: The index template is composed of these component templates.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
priority:
|
||||
description: The priority of the index template.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
data_stream:
|
||||
hidden:
|
||||
description: Hide the data stream.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
allow_custom_routing:
|
||||
description: Allow custom routing for the data stream.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
so-metrics-fleet_server_x_agent_versions: *fleetMetricsSettings
|
||||
so_roles:
|
||||
so-manager: &soroleSettings
|
||||
config:
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
|
||||
{% set DEFAULT_GLOBAL_OVERRIDES = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings.pop('global_overrides') %}
|
||||
|
||||
@@ -17,10 +22,26 @@
|
||||
{% set ES_INDEX_SETTINGS = {} %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
|
||||
{% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %}
|
||||
{# if policy isn't defined in the original index settings, then dont merge policy from the global_overrides #}
|
||||
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
|
||||
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
|
||||
|
||||
{# prevent this action from being performed on custom defined indices. #}
|
||||
{# the custom defined index is not present in either of the dictionaries and fails to reder. #}
|
||||
{% if index in ES_INDEX_SETTINGS_ORIG and index in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES %}
|
||||
|
||||
{# dont merge policy from the global_overrides if policy isn't defined in the original index settingss #}
|
||||
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
|
||||
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
|
||||
{% endif %}
|
||||
|
||||
{# this prevents and index from inderiting a policy phase from global overrides if it wasnt defined in the defaults. #}
|
||||
{% if ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
||||
{% for phase in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.copy() %}
|
||||
{% if ES_INDEX_SETTINGS_ORIG[index].policy.phases[phase] is not defined %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.pop(phase) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if settings.index_template is defined %}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"name": "logs"
|
||||
},
|
||||
"codec": "best_compression",
|
||||
"default_pipeline": "logs-elastic_agent-1.13.1",
|
||||
"default_pipeline": "logs-elastic_agent-1.20.0",
|
||||
"mapping": {
|
||||
"total_fields": {
|
||||
"limit": "10000"
|
||||
|
||||
@@ -0,0 +1,201 @@
|
||||
{
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {
|
||||
"name": "metrics"
|
||||
},
|
||||
"default_pipeline": "metrics-fleet_server.agent_status-1.5.0",
|
||||
"mapping": {
|
||||
"total_fields": {
|
||||
"limit": "1000"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings": {
|
||||
"dynamic": false,
|
||||
"_source": {
|
||||
"mode": "synthetic"
|
||||
},
|
||||
"properties": {
|
||||
"cluster": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"time_series_dimension": true,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"fleet": {
|
||||
"properties": {
|
||||
"agents": {
|
||||
"properties": {
|
||||
"offline": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"total": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"updating": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"inactive": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"healthy": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"unhealthy": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"unenrolled": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"enrolled": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"unhealthy_reason": {
|
||||
"properties": {
|
||||
"output": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"input": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"other": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
},
|
||||
"upgrading_step": {
|
||||
"properties": {
|
||||
"rollback": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"requested": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"restarting": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"downloading": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"scheduled": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"extracting": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"replacing": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"failed": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"watching": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"agent": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"version": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"@timestamp": {
|
||||
"ignore_malformed": false,
|
||||
"type": "date"
|
||||
},
|
||||
"data_stream": {
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "constant_keyword"
|
||||
},
|
||||
"type": {
|
||||
"type": "constant_keyword"
|
||||
},
|
||||
"dataset": {
|
||||
"type": "constant_keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"kibana": {
|
||||
"properties": {
|
||||
"uuid": {
|
||||
"path": "agent.id",
|
||||
"type": "alias"
|
||||
},
|
||||
"version": {
|
||||
"path": "agent.version",
|
||||
"type": "alias"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"package": {
|
||||
"name": "fleet_server"
|
||||
},
|
||||
"managed_by": "fleet",
|
||||
"managed": true
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,102 @@
|
||||
{
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {
|
||||
"name": "metrics"
|
||||
},
|
||||
"default_pipeline": "metrics-fleet_server.agent_versions-1.5.0",
|
||||
"mapping": {
|
||||
"total_fields": {
|
||||
"limit": "1000"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings": {
|
||||
"dynamic": false,
|
||||
"_source": {
|
||||
"mode": "synthetic"
|
||||
},
|
||||
"properties": {
|
||||
"cluster": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"time_series_dimension": true,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"fleet": {
|
||||
"properties": {
|
||||
"agent": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"version": {
|
||||
"time_series_dimension": true,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"agent": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"version": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"@timestamp": {
|
||||
"ignore_malformed": false,
|
||||
"type": "date"
|
||||
},
|
||||
"data_stream": {
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "constant_keyword"
|
||||
},
|
||||
"type": {
|
||||
"type": "constant_keyword"
|
||||
},
|
||||
"dataset": {
|
||||
"type": "constant_keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"kibana": {
|
||||
"properties": {
|
||||
"uuid": {
|
||||
"path": "agent.id",
|
||||
"type": "alias"
|
||||
},
|
||||
"version": {
|
||||
"path": "agent.version",
|
||||
"type": "alias"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"package": {
|
||||
"name": "fleet_server"
|
||||
},
|
||||
"managed_by": "fleet",
|
||||
"managed": true
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"related": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,7 @@ if [ ! -f /opt/so/state/espipelines.txt ]; then
|
||||
|
||||
cd ${ELASTICSEARCH_INGEST_PIPELINES}
|
||||
echo "Loading pipelines..."
|
||||
for i in .[a-z]* *;
|
||||
for i in *;
|
||||
do
|
||||
echo $i;
|
||||
retry 5 5 "so-elasticsearch-query _ingest/pipeline/$i -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load pipeline: $i"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{%- set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{%- set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
@@ -40,9 +40,9 @@ fi
|
||||
|
||||
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space
|
||||
{% if GLOBALS.role == 'so-manager' %}
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $5}'); do
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $8}'); do
|
||||
{% else %}
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $8}'); do
|
||||
{% endif %}
|
||||
size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}')
|
||||
unit=$(echo $i | grep -oE '[A-Za-z]+')
|
||||
|
||||
@@ -13,10 +13,10 @@ TOTAL_USED_SPACE=0
|
||||
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total used space
|
||||
{% if GLOBALS.role == 'so-manager' %}
|
||||
# Get total disk space - disk.total
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $3}'); do
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $6}'); do
|
||||
{% else %}
|
||||
# Get disk space taken up by indices - disk.indices
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $2}'); do
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
|
||||
{% endif %}
|
||||
size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}')
|
||||
unit=$(echo $i | grep -oE '[A-Za-z]+')
|
||||
|
||||
@@ -10,10 +10,26 @@
|
||||
|
||||
{%- for index, settings in ES_INDEX_SETTINGS.items() %}
|
||||
{%- if settings.policy is defined %}
|
||||
echo
|
||||
echo "Setting up {{ index }}-logs policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
{%- if index == 'so-logs-detections.alerts' %}
|
||||
echo
|
||||
echo "Setting up so-logs-detections.alerts-so policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-so" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
{%- elif index == 'so-logs-soc' %}
|
||||
echo
|
||||
echo "Setting up so-soc-logs policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/so-soc-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
echo
|
||||
echo "Setting up {{ index }}-logs policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
{%- else %}
|
||||
echo
|
||||
echo "Setting up {{ index }}-logs policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
echo
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
# Elastic License 2.0.
|
||||
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
|
||||
|
||||
STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt
|
||||
STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt
|
||||
@@ -68,9 +67,9 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
{% if GLOBALS.role != 'so-heavynode' %}
|
||||
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||
INSTALLED=$(elastic_fleet_package_is_installed {{ SUPPORTED_PACKAGES[0] }} )
|
||||
if [ "$INSTALLED" != "installed" ]; then
|
||||
TEMPLATE="logs-endpoint.alerts@package"
|
||||
INSTALLED=$(so-elasticsearch-query _component_template/$TEMPLATE | jq -r .component_templates[0].name)
|
||||
if [ "$INSTALLED" != "$TEMPLATE" ]; then
|
||||
echo
|
||||
echo "Packages not yet installed."
|
||||
echo
|
||||
@@ -134,7 +133,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
TEMPLATE=${i::-14}
|
||||
COMPONENT_PATTERN=${TEMPLATE:3}
|
||||
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" ]]; then
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ logs-http_endpoint\.generic|logs-winlog\.winlog ]]; then
|
||||
load_failures=$((load_failures+1))
|
||||
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
|
||||
else
|
||||
@@ -153,7 +152,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
cd - >/dev/null
|
||||
|
||||
if [[ $load_failures -eq 0 ]]; then
|
||||
echo "All template loaded successfully"
|
||||
echo "All templates loaded successfully"
|
||||
touch $STATE_FILE_SUCCESS
|
||||
else
|
||||
echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate"
|
||||
|
||||
@@ -120,7 +120,10 @@ firewall:
|
||||
influxdb:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kafka:
|
||||
kafka_controller:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kafka_data:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kibana:
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
|
||||
{%- from 'soc/merged.map.jinja' import SOCMERGED -%}
|
||||
--suricata-version=6.0
|
||||
--suricata-version=7.0.3
|
||||
--merged=/opt/so/rules/nids/suri/all.rules
|
||||
--output=/nsm/rules/detect-suricata/custom_temp
|
||||
--local=/opt/so/rules/nids/suri/local.rules
|
||||
@@ -20,4 +20,4 @@
|
||||
--local={{ ruleset.file }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
@@ -11,8 +11,8 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
|
||||
{%- set proxy = salt['pillar.get']('manager:proxy') %}
|
||||
{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %}
|
||||
|
||||
# Download the rules from the internet
|
||||
{%- if proxy %}
|
||||
# Download the rules from the internet
|
||||
export http_proxy={{ proxy }}
|
||||
export https_proxy={{ proxy }}
|
||||
export no_proxy="{{ noproxy }}"
|
||||
@@ -20,12 +20,12 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
|
||||
|
||||
mkdir -p /nsm/rules/suricata
|
||||
chown -R socore:socore /nsm/rules/suricata
|
||||
{%- if not GLOBALS.airgap %}
|
||||
# Download the rules from the internet
|
||||
{%- if GLOBALS.airgap != 'True' %}
|
||||
{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %}
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 7.0.3 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
|
||||
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }}
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 7.0.3 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
37
salt/kafka/ca.sls
Normal file
37
salt/kafka/ca.sls
Normal file
@@ -0,0 +1,37 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set KAFKATRUST = salt['pillar.get']('kafka:truststore') %}
|
||||
|
||||
kafkaconfdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/kafka
|
||||
- user: 960
|
||||
- group: 960
|
||||
- makedirs: True
|
||||
|
||||
{% if GLOBALS.is_manager %}
|
||||
# Manager runs so-kafka-trust to create truststore for Kafka ssl communication
|
||||
kafka_truststore:
|
||||
cmd.script:
|
||||
- source: salt://kafka/tools/sbin_jinja/so-kafka-trust
|
||||
- template: jinja
|
||||
- cwd: /opt/so
|
||||
- defaults:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
KAFKATRUST: {{ KAFKATRUST }}
|
||||
{% endif %}
|
||||
|
||||
kafkacertz:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/kafka/kafka-truststore.jks
|
||||
- source: salt://kafka/files/kafka-truststore
|
||||
- user: 960
|
||||
- group: 931
|
||||
|
||||
{% endif %}
|
||||
@@ -6,7 +6,8 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %}
|
||||
{% set KAFKA_PASSWORD = salt['pillar.get']('kafka:password') %}
|
||||
{% set KAFKA_PASSWORD = salt['pillar.get']('kafka:config:password') %}
|
||||
{% set KAFKA_TRUSTPASS = salt['pillar.get']('kafka:config:trustpass') %}
|
||||
|
||||
{# Create list of KRaft controllers #}
|
||||
{% set controllers = [] %}
|
||||
@@ -67,19 +68,12 @@
|
||||
|
||||
{% endif %}
|
||||
|
||||
{# If a password other than PLACEHOLDER isn't set remove it from the server.properties #}
|
||||
{% if KAFKAMERGED.config.broker.ssl_x_truststore_x_password == 'PLACEHOLDER' %}
|
||||
{% do KAFKAMERGED.config.broker.pop('ssl_x_truststore_x_password') %}
|
||||
{% endif %}
|
||||
|
||||
{% if KAFKAMERGED.config.controller.ssl_x_truststore_x_password == 'PLACEHOLDER' %}
|
||||
{% do KAFKAMERGED.config.controller.pop('ssl_x_truststore_x_password') %}
|
||||
{% endif %}
|
||||
{# Truststore config #}
|
||||
{% do KAFKAMERGED.config.broker.update({'ssl_x_truststore_x_password': KAFKA_TRUSTPASS }) %}
|
||||
{% do KAFKAMERGED.config.controller.update({'ssl_x_truststore_x_password': KAFKA_TRUSTPASS }) %}
|
||||
{% do KAFKAMERGED.config.client.update({'ssl_x_truststore_x_password': KAFKA_TRUSTPASS }) %}
|
||||
|
||||
{# Client properties stuff #}
|
||||
{% if KAFKAMERGED.config.client.ssl_x_truststore_x_password == 'PLACEHOLDER' %}
|
||||
{% do KAFKAMERGED.config.client.pop('ssl_x_truststore_x_password') %}
|
||||
{% endif %}
|
||||
{% do KAFKAMERGED.config.client.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
|
||||
|
||||
{% if 'broker' in node_type %}
|
||||
|
||||
@@ -7,18 +7,22 @@
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- ssl
|
||||
|
||||
kafka_group:
|
||||
group.present:
|
||||
- name: kafka
|
||||
- gid: 960
|
||||
|
||||
kafka:
|
||||
kafka_user:
|
||||
user.present:
|
||||
- name: kafka
|
||||
- uid: 960
|
||||
- gid: 960
|
||||
- home: /opt/so/conf/kafka
|
||||
- createhome: False
|
||||
|
||||
kafka_home_dir:
|
||||
file.absent:
|
||||
- name: /home/kafka
|
||||
|
||||
kafka_sbin_tools:
|
||||
file.recurse:
|
||||
@@ -28,6 +32,17 @@ kafka_sbin_tools:
|
||||
- group: 960
|
||||
- file_mode: 755
|
||||
|
||||
kafka_sbin_jinja_tools:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://kafka/tools/sbin_jinja
|
||||
- user: 960
|
||||
- group: 960
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- defaults:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
|
||||
kafka_log_dir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/kafka
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
kafka:
|
||||
enabled: False
|
||||
cluster_id:
|
||||
password:
|
||||
controllers:
|
||||
reset:
|
||||
logstash: []
|
||||
config:
|
||||
password:
|
||||
trustpass:
|
||||
broker:
|
||||
advertised_x_listeners:
|
||||
auto_x_create_x_topics_x_enable: true
|
||||
@@ -30,16 +32,16 @@ kafka:
|
||||
ssl_x_keystore_x_location: /etc/pki/kafka.p12
|
||||
ssl_x_keystore_x_type: PKCS12
|
||||
ssl_x_keystore_x_password:
|
||||
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
|
||||
ssl_x_truststore_x_password: PLACEHOLDER
|
||||
ssl_x_truststore_x_type: PEM
|
||||
ssl_x_truststore_x_location: /etc/pki/kafka-truststore.jks
|
||||
ssl_x_truststore_x_type: JKS
|
||||
ssl_x_truststore_x_password:
|
||||
transaction_x_state_x_log_x_min_x_isr: 1
|
||||
transaction_x_state_x_log_x_replication_x_factor: 1
|
||||
client:
|
||||
security_x_protocol: SSL
|
||||
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
|
||||
ssl_x_truststore_x_password: PLACEHOLDER
|
||||
ssl_x_truststore_x_type: PEM
|
||||
ssl_x_truststore_x_location: /etc/pki/kafka-truststore.jks
|
||||
ssl_x_truststore_x_type: JKS
|
||||
ssl_x_truststore_x_password:
|
||||
ssl_x_keystore_x_location: /etc/pki/kafka.p12
|
||||
ssl_x_keystore_x_type: PKCS12
|
||||
ssl_x_keystore_x_password:
|
||||
@@ -57,6 +59,6 @@ kafka:
|
||||
ssl_x_keystore_x_location: /etc/pki/kafka.p12
|
||||
ssl_x_keystore_x_type: PKCS12
|
||||
ssl_x_keystore_x_password:
|
||||
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
|
||||
ssl_x_truststore_x_password: PLACEHOLDER
|
||||
ssl_x_truststore_x_type: PEM
|
||||
ssl_x_truststore_x_location: /etc/pki/kafka-truststore.jks
|
||||
ssl_x_truststore_x_type: JKS
|
||||
ssl_x_truststore_x_password:
|
||||
@@ -22,4 +22,13 @@ ensure_default_pipeline:
|
||||
- name: |
|
||||
/usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False;
|
||||
/usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/global/soc_global.sls global.pipeline REDIS
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{# If Kafka has never been manually enabled, the 'Kafka' user does not exist. In this case certs for Kafka should not exist since they'll be owned by uid 960 #}
|
||||
{% for cert in ['kafka-client.crt','kafka-client.key','kafka.crt','kafka.key','kafka-logstash.crt','kafka-logstash.key','kafka-logstash.p12','kafka.p12','elasticfleet-kafka.p8'] %}
|
||||
check_kafka_cert_{{cert}}:
|
||||
file.absent:
|
||||
- name: /etc/pki/{{cert}}
|
||||
- onlyif: stat -c %U /etc/pki/{{cert}} | grep -q UNKNOWN
|
||||
- show_changes: False
|
||||
{% endfor %}
|
||||
@@ -17,10 +17,11 @@
|
||||
{% if 'gmd' in salt['pillar.get']('features', []) %}
|
||||
|
||||
include:
|
||||
- elasticsearch.ca
|
||||
- kafka.sostatus
|
||||
- kafka.ca
|
||||
- kafka.config
|
||||
- kafka.ssl
|
||||
- kafka.storage
|
||||
- kafka.sostatus
|
||||
|
||||
so-kafka:
|
||||
docker_container.running:
|
||||
@@ -49,7 +50,7 @@ so-kafka:
|
||||
{% endfor %}
|
||||
- binds:
|
||||
- /etc/pki/kafka.p12:/etc/pki/kafka.p12:ro
|
||||
- /etc/pki/tls/certs/intca.crt:/etc/pki/java/sos/cacerts:ro
|
||||
- /opt/so/conf/kafka/kafka-truststore.jks:/etc/pki/kafka-truststore.jks:ro
|
||||
- /nsm/kafka/data/:/nsm/kafka/data/:rw
|
||||
- /opt/so/log/kafka:/opt/kafka/logs/:rw
|
||||
- /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro
|
||||
@@ -58,6 +59,9 @@ so-kafka:
|
||||
{% for sc in ['server', 'client'] %}
|
||||
- file: kafka_kraft_{{sc}}_properties
|
||||
{% endfor %}
|
||||
- file: kafkacertz
|
||||
- require:
|
||||
- file: kafkacertz
|
||||
|
||||
delete_so-kafka_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
@@ -18,7 +18,12 @@ include:
|
||||
- kafka.nodes
|
||||
{% endif %}
|
||||
{% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %}
|
||||
{% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-receiver'] %}
|
||||
- kafka.enabled
|
||||
{# Searchnodes only run kafka.ssl state when Kafka is enabled #}
|
||||
{% elif grains.role == "so-searchnode" %}
|
||||
- kafka.ssl
|
||||
{% endif %}
|
||||
{% else %}
|
||||
- kafka.disabled
|
||||
{% endif %}
|
||||
|
||||
@@ -8,19 +8,31 @@ kafka:
|
||||
advanced: True
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
password:
|
||||
description: The password to use for the Kafka certificates.
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
controllers:
|
||||
description: A comma-separated list of hostnames that will act as Kafka controllers. These hosts will be responsible for managing the Kafka cluster. Note that only manager and receiver nodes are eligible to run Kafka. This configuration needs to be set before enabling Kafka. Failure to do so may result in Kafka topics becoming unavailable requiring manual intervention to restore functionality or reset Kafka, either of which can result in data loss.
|
||||
forcedType: "string"
|
||||
forcedType: string
|
||||
helpLink: kafka.html
|
||||
reset:
|
||||
description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed.
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
logstash:
|
||||
description: By default logstash is disabled when Kafka is enabled. This option allows you to specify any hosts you would like to re-enable logstash on alongside Kafka.
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
config:
|
||||
password:
|
||||
description: The password used for the Kafka certificates.
|
||||
readonly: True
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
trustpass:
|
||||
description: The password used for the Kafka truststore.
|
||||
readonly: True
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
broker:
|
||||
advertised_x_listeners:
|
||||
description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication.
|
||||
@@ -128,6 +140,10 @@ kafka:
|
||||
description: The trust store file location within the Docker container.
|
||||
title: ssl.truststore.location
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_type:
|
||||
description: The trust store file format.
|
||||
title: ssl.truststore.type
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_password:
|
||||
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
|
||||
title: ssl.truststore.password
|
||||
@@ -167,6 +183,10 @@ kafka:
|
||||
description: The trust store file location within the Docker container.
|
||||
title: ssl.truststore.location
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_type:
|
||||
description: The trust store file format.
|
||||
title: ssl.truststore.type
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_password:
|
||||
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
|
||||
title: ssl.truststore.password
|
||||
|
||||
201
salt/kafka/ssl.sls
Normal file
201
salt/kafka/ssl.sls
Normal file
@@ -0,0 +1,201 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set kafka_password = salt['pillar.get']('kafka:config:password') %}
|
||||
|
||||
include:
|
||||
- ca.dirs
|
||||
{% set global_ca_server = [] %}
|
||||
{% set x509dict = salt['mine.get'](GLOBALS.manager | lower~'*', 'x509.get_pem_entries') %}
|
||||
{% for host in x509dict %}
|
||||
{% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
|
||||
{% do global_ca_server.append(host) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% set ca_server = global_ca_server[0] %}
|
||||
|
||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
kafka_client_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka-client.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/kafka-client.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/kafka-client.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_client_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka-client.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka-client.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_client_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-client.key
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_client_crt_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-client.crt
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %}
|
||||
kafka_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/kafka.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/kafka.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}"
|
||||
- onchanges:
|
||||
- x509: /etc/pki/kafka.key
|
||||
kafka_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka.key
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_crt_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka.crt
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_pkcs12_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka.p12
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
{% endif %}
|
||||
|
||||
# Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka.
|
||||
# Manager will have cert, but be unused until a pipeline is created and logstash enabled.
|
||||
{% if GLOBALS.role in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %}
|
||||
kafka_logstash_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka-logstash.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/kafka-logstash.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_logstash_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka-logstash.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka-logstash.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:{{ kafka_password }}"
|
||||
- onchanges:
|
||||
- x509: /etc/pki/kafka-logstash.key
|
||||
|
||||
kafka_logstash_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-logstash.key
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
kafka_logstash_crt_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-logstash.crt
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
kafka_logstash_pkcs12_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-logstash.p12
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
13
salt/kafka/tools/sbin_jinja/so-kafka-trust
Normal file
13
salt/kafka/tools/sbin_jinja/so-kafka-trust
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
{% set TRUSTPASS = salt['pillar.get']('kafka:config:trustpass') %}
|
||||
|
||||
if [ ! -f /opt/so/saltstack/local/salt/kafka/files/kafka-truststore ]; then
|
||||
docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-kafkatrust --user root --entrypoint /opt/java/openjdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -import -file /etc/pki/ca.crt -alias SOS -keystore /etc/pki/kafka-truststore -storepass {{ TRUSTPASS }} -storetype jks -noprompt
|
||||
docker cp so-kafkatrust:/etc/pki/kafka-truststore /opt/so/saltstack/local/salt/kafka/files/kafka-truststore
|
||||
docker rm so-kafkatrust
|
||||
fi
|
||||
@@ -1 +1,2 @@
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.4","id": "8.10.4","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.14.3","id": "8.14.3","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ update() {
|
||||
|
||||
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
|
||||
for i in "${LINES[@]}"; do
|
||||
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.4" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
|
||||
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.14.3" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
|
||||
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
|
||||
done
|
||||
|
||||
|
||||
@@ -7,9 +7,7 @@ logstash:
|
||||
- search
|
||||
receiver:
|
||||
- receiver
|
||||
heavynode:
|
||||
- manager
|
||||
- search
|
||||
heavynode: []
|
||||
searchnode:
|
||||
- search
|
||||
manager:
|
||||
@@ -27,7 +25,7 @@ logstash:
|
||||
- so/0011_input_endgame.conf
|
||||
- so/0012_input_elastic_agent.conf.jinja
|
||||
- so/0013_input_lumberjack_fleet.conf
|
||||
- so/9999_output_redis.conf.jinja
|
||||
- so/9999_output_redis.conf.jinja
|
||||
receiver:
|
||||
- so/0011_input_endgame.conf
|
||||
- so/0012_input_elastic_agent.conf.jinja
|
||||
@@ -37,7 +35,6 @@ logstash:
|
||||
- so/0900_input_redis.conf.jinja
|
||||
- so/9805_output_elastic_agent.conf.jinja
|
||||
- so/9900_output_endgame.conf.jinja
|
||||
- so/0800_input_kafka.conf.jinja
|
||||
custom0: []
|
||||
custom1: []
|
||||
custom2: []
|
||||
|
||||
@@ -14,6 +14,11 @@
|
||||
include:
|
||||
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
|
||||
- elasticsearch.ca
|
||||
{% endif %}
|
||||
{# Kafka ca runs on nodes that can run logstash for Kafka input / output. Only when Kafka is global pipeline #}
|
||||
{% if GLOBALS.role in ['so-searchnode', 'so-manager', 'so-managersearch', 'so-receiver', 'so-standalone'] and GLOBALS.pipeline == 'KAFKA' %}
|
||||
- kafka.ca
|
||||
- kafka.ssl
|
||||
{% endif %}
|
||||
- logstash.config
|
||||
- logstash.sostatus
|
||||
@@ -79,8 +84,9 @@ so-logstash:
|
||||
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
||||
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
{% if GLOBALS.pipeline == "KAFKA" and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
|
||||
- /opt/so/conf/kafka/kafka-truststore.jks:/etc/pki/kafka-truststore.jks:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role == 'so-eval' %}
|
||||
- /nsm/zeek:/nsm/zeek:ro
|
||||
@@ -105,6 +111,9 @@ so-logstash:
|
||||
- file: ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
- file: kafkacertz
|
||||
{% endif %}
|
||||
- require:
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
- x509: etc_filebeat_crt
|
||||
@@ -118,6 +127,9 @@ so-logstash:
|
||||
- file: cacertz
|
||||
- file: capemz
|
||||
{% endif %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
- file: kafkacertz
|
||||
{% endif %}
|
||||
|
||||
delete_so-logstash_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
@@ -4,13 +4,9 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
|
||||
{% from 'kafka/map.jinja' import KAFKAMERGED %}
|
||||
|
||||
include:
|
||||
{# Disable logstash when Kafka is enabled except when the role is standalone #}
|
||||
{% if LOGSTASH_MERGED.enabled and grains.role == 'so-standalone' %}
|
||||
- logstash.enabled
|
||||
{% elif LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %}
|
||||
{% if LOGSTASH_MERGED.enabled %}
|
||||
- logstash.enabled
|
||||
{% else %}
|
||||
- logstash.disabled
|
||||
|
||||
@@ -6,24 +6,40 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'logstash/defaults.yaml' as LOGSTASH_DEFAULTS %}
|
||||
{% set LOGSTASH_MERGED = salt['pillar.get']('logstash', LOGSTASH_DEFAULTS.logstash, merge=True) %}
|
||||
{% set KAFKA_LOGSTASH = salt['pillar.get']('kafka:logstash', []) %}
|
||||
|
||||
{% set REDIS_NODES = [] %}
|
||||
{# LOGSTASH_NODES is the same as ES_LOGSTASH_NODES from elasticsearch/config.map.jinja but heavynodes are present #}
|
||||
{# used to store the redis nodes that logstash needs to know about to pull from the queue #}
|
||||
{% set LOGSTASH_REDIS_NODES = [] %}
|
||||
{# stores all logstash nodes #}
|
||||
{% set LOGSTASH_NODES = [] %}
|
||||
{% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{% set logstash_node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{% set redis_node_data = salt['pillar.get']('redis:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
|
||||
{% for node_type, node_details in node_data.items() | sort %}
|
||||
{% for node_type, node_details in redis_node_data.items() | sort %}
|
||||
{% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch', 'so-fleet'] %}
|
||||
{% if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %}
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% do REDIS_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% for hostname in redis_node_data[node_type].keys() %}
|
||||
{% do LOGSTASH_REDIS_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{% do REDIS_NODES.append({GLOBALS.hostname:GLOBALS.node_ip}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% for node_type, node_details in logstash_node_data.items() | sort %}
|
||||
{% for hostname in logstash_node_data[node_type].keys() %}
|
||||
{% do LOGSTASH_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{# Append Kafka input pipeline when Kafka is enabled #}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' %}
|
||||
{% do LOGSTASH_MERGED.defined_pipelines.search.remove('so/0900_input_redis.conf.jinja') %}
|
||||
{% do LOGSTASH_MERGED.defined_pipelines.search.append('so/0800_input_kafka.conf.jinja') %}
|
||||
{% do LOGSTASH_MERGED.defined_pipelines.manager.append('so/0800_input_kafka.conf.jinja') %}
|
||||
{# Disable logstash on manager & receiver nodes unless it has an override configured #}
|
||||
{% if not KAFKA_LOGSTASH %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %}
|
||||
{% do LOGSTASH_MERGED.update({'enabled': False}) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
@@ -1,4 +1,5 @@
|
||||
{%- set kafka_password = salt['pillar.get']('kafka:password') %}
|
||||
{%- set kafka_password = salt['pillar.get']('kafka:config:password') %}
|
||||
{%- set kafka_trustpass = salt['pillar.get']('kafka:config:trustpass') %}
|
||||
{%- set kafka_brokers = salt['pillar.get']('kafka:nodes', {}) %}
|
||||
{%- set brokers = [] %}
|
||||
|
||||
@@ -22,8 +23,8 @@ input {
|
||||
ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12'
|
||||
ssl_keystore_password => '{{ kafka_password }}'
|
||||
ssl_keystore_type => 'PKCS12'
|
||||
ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts'
|
||||
ssl_truststore_password => 'changeit'
|
||||
ssl_truststore_location => '/etc/pki/kafka-truststore.jks'
|
||||
ssl_truststore_password => '{{ kafka_trustpass }}'
|
||||
decorate_events => true
|
||||
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname}}", "kafka" ]
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{%- from 'logstash/map.jinja' import REDIS_NODES with context %}
|
||||
{%- from 'logstash/map.jinja' import LOGSTASH_REDIS_NODES with context %}
|
||||
{%- set REDIS_PASS = salt['pillar.get']('redis:config:requirepass') %}
|
||||
|
||||
{%- for index in range(REDIS_NODES|length) %}
|
||||
{%- for host in REDIS_NODES[index] %}
|
||||
{%- for index in range(LOGSTASH_REDIS_NODES|length) %}
|
||||
{%- for host in LOGSTASH_REDIS_NODES[index] %}
|
||||
input {
|
||||
redis {
|
||||
host => '{{ host }}'
|
||||
|
||||
@@ -9,6 +9,10 @@ if [ -f /usr/sbin/so-common ]; then
|
||||
. /usr/sbin/so-common
|
||||
fi
|
||||
|
||||
if [ -f /usr/sbin/so-elastic-fleet-common ]; then
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
fi
|
||||
|
||||
function usage() {
|
||||
echo "Usage: $0 -o=<operation> -m=[id]"
|
||||
echo ""
|
||||
@@ -380,23 +384,31 @@ function add_elastic_fleet_package_registry_to_minion() {
|
||||
|
||||
function create_fleet_policy() {
|
||||
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg NAME "FleetServer_$LSHOSTNAME" \
|
||||
--arg DESC "Fleet Server - $LSHOSTNAME" \
|
||||
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":1209600,"has_fleet_server":true}'
|
||||
)
|
||||
# First, set the default output to Elasticsearch
|
||||
# This is required because of the license output bug
|
||||
JSON_STRING=$(jq -n \
|
||||
'{
|
||||
"name": "so-manager_elasticsearch",
|
||||
"type": "elasticsearch",
|
||||
"is_default": true,
|
||||
"is_default_monitoring": false
|
||||
}')
|
||||
|
||||
# Create Fleet Sever Policy
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
|
||||
JSON_STRING_UPDATE=$( jq -n \
|
||||
--arg NAME "FleetServer_$LSHOSTNAME" \
|
||||
--arg DESC "Fleet Server - $LSHOSTNAME" \
|
||||
'{"name":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":120,"data_output_id":"so-manager_elasticsearch"}'
|
||||
)
|
||||
# Create the Fleet Server Policy
|
||||
elastic_fleet_policy_create "FleetServer_$LSHOSTNAME" "Fleet Server - $LSHOSTNAME" "false" "120"
|
||||
|
||||
# Update Fleet Policy - ES Output
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/FleetServer_$LSHOSTNAME" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING_UPDATE"
|
||||
# Modify the default integration policy to update the policy_id with the correct naming
|
||||
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_$LSHOSTNAME" --arg name "fleet_server-$LSHOSTNAME" '
|
||||
.policy_id = $policy_id |
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Add the Fleet Server Integration to the new Fleet Policy
|
||||
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
|
||||
|
||||
# Set the default output back to the default
|
||||
/sbin/so-elastic-fleet-outputs-update
|
||||
}
|
||||
|
||||
function update_fleet_host_urls() {
|
||||
@@ -528,7 +540,6 @@ function createHEAVYNODE() {
|
||||
pcapspace
|
||||
add_elasticsearch_to_minion
|
||||
add_elastic_agent_to_minion
|
||||
add_logstash_to_minion
|
||||
add_sensor_to_minion
|
||||
add_strelka_to_minion
|
||||
add_redis_to_minion
|
||||
|
||||
@@ -234,10 +234,14 @@ function updatePassword() {
|
||||
passwordHash=$(hashPassword "$password")
|
||||
# Update DB with new hash
|
||||
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB), created_at=datetime('now'), updated_at=datetime('now') where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name='password');" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
|
||||
# Deactivate MFA
|
||||
echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
|
||||
echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to update password"
|
||||
# Deactivate MFA
|
||||
echo "delete from identity_credential_identifiers where identity_credential_id in (select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id in (select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to clear aal2 identity IDs"
|
||||
echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id in (select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to clear aal2 identity credentials"
|
||||
echo "update identities set available_aal='aal1' where id='${identityId}';" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to reset aal"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
so-user add --email $1
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
so-user disable --email $1
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
so-user enable --email $1
|
||||
@@ -1,2 +0,0 @@
|
||||
#!/bin/bash
|
||||
so-user list
|
||||
@@ -1,7 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
@@ -30,7 +30,7 @@ check_err() {
|
||||
[[ $ERR_HANDLED == true ]] && exit $exit_code
|
||||
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
|
||||
|
||||
set +e
|
||||
systemctl_func "start" "$cron_service_name"
|
||||
systemctl_func "start" "salt-master"
|
||||
@@ -108,7 +108,7 @@ add_common() {
|
||||
}
|
||||
|
||||
airgap_mounted() {
|
||||
# Let's see if the ISO is already mounted.
|
||||
# Let's see if the ISO is already mounted.
|
||||
if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then
|
||||
echo "The ISO is already mounted"
|
||||
else
|
||||
@@ -116,8 +116,8 @@ airgap_mounted() {
|
||||
echo "This is airgap. Ask for a location."
|
||||
echo ""
|
||||
cat << EOF
|
||||
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
|
||||
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
|
||||
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
|
||||
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
|
||||
Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom.
|
||||
|
||||
EOF
|
||||
@@ -134,7 +134,7 @@ EOF
|
||||
exit 0
|
||||
else
|
||||
echo "ISO has been mounted!"
|
||||
fi
|
||||
fi
|
||||
elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
|
||||
ln -s $ISOLOC /tmp/soagupdate
|
||||
echo "Found the update content"
|
||||
@@ -149,7 +149,7 @@ EOF
|
||||
echo "Device has been mounted!"
|
||||
fi
|
||||
else
|
||||
echo "Could not find Security Onion ISO content at ${ISOLOC}"
|
||||
echo "Could not find Security Onion ISO content at ${ISOLOC}"
|
||||
echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded."
|
||||
exit 0
|
||||
fi
|
||||
@@ -195,7 +195,7 @@ check_airgap() {
|
||||
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
|
||||
AGDOCKER=/tmp/soagupdate/docker
|
||||
AGREPO=/tmp/soagupdate/minimal/Packages
|
||||
else
|
||||
else
|
||||
is_airgap=1
|
||||
fi
|
||||
}
|
||||
@@ -308,6 +308,21 @@ clone_to_tmp() {
|
||||
fi
|
||||
}
|
||||
|
||||
disable_logstash_heavynodes() {
|
||||
c=0
|
||||
printf "\nChecking for heavynodes and disabling Logstash if they exist\n"
|
||||
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
|
||||
if [[ "$file" =~ "_heavynode.sls" && ! "$file" =~ "/opt/so/saltstack/local/pillar/minions/adv_" ]]; then
|
||||
if [ "$c" -eq 0 ]; then
|
||||
c=$((c + 1))
|
||||
FINAL_MESSAGE_QUEUE+=("Logstash has been disabled on all heavynodes. It can be re-enabled via Grid Configuration in SOC.")
|
||||
fi
|
||||
echo "Disabling Logstash for: $file"
|
||||
so-yaml.py replace "$file" logstash.enabled False
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
enable_highstate() {
|
||||
echo "Enabling highstate."
|
||||
salt-call state.enable highstate -l info --local
|
||||
@@ -385,17 +400,19 @@ preupgrade_changes() {
|
||||
[[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60
|
||||
[[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70
|
||||
[[ "$INSTALLEDVERSION" == 2.4.70 ]] && up_to_2.4.80
|
||||
[[ "$INSTALLEDVERSION" == 2.4.80 ]] && up_to_2.4.90
|
||||
[[ "$INSTALLEDVERSION" == 2.4.90 ]] && up_to_2.4.100
|
||||
true
|
||||
}
|
||||
|
||||
postupgrade_changes() {
|
||||
# This function is to add any new pillar items if needed.
|
||||
echo "Running post upgrade processes."
|
||||
|
||||
|
||||
[[ "$POSTVERSION" == 2.4.2 ]] && post_to_2.4.3
|
||||
[[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
|
||||
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
|
||||
[[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10
|
||||
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
|
||||
[[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10
|
||||
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
|
||||
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
|
||||
[[ "$POSTVERSION" == 2.4.30 ]] && post_to_2.4.40
|
||||
@@ -403,6 +420,8 @@ postupgrade_changes() {
|
||||
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
|
||||
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
|
||||
[[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
|
||||
[[ "$POSTVERSION" == 2.4.80 ]] && post_to_2.4.90
|
||||
[[ "$POSTVERSION" == 2.4.90 ]] && post_to_2.4.100
|
||||
true
|
||||
}
|
||||
|
||||
@@ -434,8 +453,6 @@ post_to_2.4.20() {
|
||||
}
|
||||
|
||||
post_to_2.4.30() {
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
# there is an occasional error with this state: pki_public_ca_crt: TypeError: list indices must be integers or slices, not str
|
||||
set +e
|
||||
salt-call state.apply ca queue=True
|
||||
@@ -460,8 +477,7 @@ post_to_2.4.50() {
|
||||
}
|
||||
|
||||
post_to_2.4.60() {
|
||||
echo "Regenerating Elastic Agent Installers..."
|
||||
so-elastic-agent-gen-installers
|
||||
echo "Nothing to apply"
|
||||
POSTVERSION=2.4.60
|
||||
}
|
||||
|
||||
@@ -482,6 +498,17 @@ post_to_2.4.80() {
|
||||
POSTVERSION=2.4.80
|
||||
}
|
||||
|
||||
post_to_2.4.90() {
|
||||
disable_logstash_heavynodes
|
||||
POSTVERSION=2.4.90
|
||||
}
|
||||
|
||||
post_to_2.4.100() {
|
||||
echo "Regenerating Elastic Agent Installers"
|
||||
/sbin/so-elastic-agent-gen-installers
|
||||
POSTVERSION=2.4.100
|
||||
}
|
||||
|
||||
repo_sync() {
|
||||
echo "Sync the local repo."
|
||||
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
||||
@@ -547,29 +574,18 @@ up_to_2.4.5() {
|
||||
|
||||
up_to_2.4.10() {
|
||||
echo "Nothing to do for 2.4.10"
|
||||
|
||||
|
||||
INSTALLEDVERSION=2.4.10
|
||||
}
|
||||
|
||||
up_to_2.4.20() {
|
||||
echo "Nothing to do for 2.4.20"
|
||||
|
||||
|
||||
INSTALLEDVERSION=2.4.20
|
||||
}
|
||||
|
||||
up_to_2.4.30() {
|
||||
|
||||
# Remove older defend integration json & installed integration
|
||||
rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
|
||||
|
||||
. $UPDATE_DIR/salt/elasticfleet/tools/sbin/so-elastic-fleet-common
|
||||
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
|
||||
|
||||
rm -f /opt/so/state/eaintegrations.txt
|
||||
|
||||
# Elastic Update for this release, so download Elastic Agent files
|
||||
determine_elastic_agent_upgrade
|
||||
rm -f /opt/so/state/estemplates*.txt
|
||||
echo "Nothing to do for 2.4.30"
|
||||
|
||||
INSTALLEDVERSION=2.4.30
|
||||
}
|
||||
@@ -606,7 +622,7 @@ up_to_2.4.50() {
|
||||
mkdir /opt/so/rules/nids/suri
|
||||
chown socore:socore /opt/so/rules/nids/suri
|
||||
mv -v /opt/so/rules/nids/*.rules /opt/so/rules/nids/suri/.
|
||||
|
||||
|
||||
echo "Adding /nsm/elastic-fleet/artifacts to file_roots in /etc/salt/master using so-yaml"
|
||||
so-yaml.py append /etc/salt/master file_roots.base /nsm/elastic-fleet/artifacts
|
||||
|
||||
@@ -650,6 +666,26 @@ up_to_2.4.80() {
|
||||
INSTALLEDVERSION=2.4.80
|
||||
}
|
||||
|
||||
up_to_2.4.90() {
|
||||
kafkatrust=$(get_random_value)
|
||||
# rearranging the kafka pillar to reduce clutter in SOC UI
|
||||
kafkasavedpass=$(so-yaml.py get /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password)
|
||||
kafkatrimpass=$(echo "$kafkasavedpass" | sed -n '1 p' )
|
||||
echo "Making changes to the Kafka pillar layout"
|
||||
so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.password
|
||||
so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.password "$kafkatrimpass"
|
||||
so-yaml.py add /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.config.trustpass "$kafkatrust"
|
||||
echo "If the Detection index exists, update the refresh_interval"
|
||||
so-elasticsearch-query so-detection*/_settings -X PUT -d '{"index":{"refresh_interval":"1s"}}'
|
||||
|
||||
INSTALLEDVERSION=2.4.90
|
||||
}
|
||||
up_to_2.4.100() {
|
||||
# Elastic Update for this release, so download Elastic Agent files
|
||||
determine_elastic_agent_upgrade
|
||||
INSTALLEDVERSION=2.4.100
|
||||
}
|
||||
|
||||
add_detection_test_pillars() {
|
||||
if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then
|
||||
echo "Adding detection pillar values for automated testing"
|
||||
@@ -679,7 +715,7 @@ Documentation: https://docs.securityonion.net/en/2.4/telemetry.html
|
||||
ASSIST_EOF
|
||||
|
||||
echo -n "Continue the upgrade with SOC Telemetry enabled [Y/n]? "
|
||||
|
||||
|
||||
read -r input
|
||||
input=$(echo "${input,,}" | xargs echo -n)
|
||||
echo ""
|
||||
@@ -720,7 +756,7 @@ suricata_idstools_migration() {
|
||||
rsync -av /opt/so/rules/nids/suri/local.rules /nsm/backup/detections-migration/suricata/local-rules
|
||||
if [[ -f /opt/so/saltstack/local/salt/idstools/rules/local.rules ]]; then
|
||||
rsync -av /opt/so/saltstack/local/salt/idstools/rules/local.rules /nsm/backup/detections-migration/suricata/local-rules/local.rules.bak
|
||||
fi
|
||||
fi
|
||||
|
||||
#Tell SOC to migrate
|
||||
mkdir -p /opt/so/conf/soc/migrations
|
||||
@@ -737,7 +773,7 @@ playbook_migration() {
|
||||
crontab -l | grep -v 'so-playbook-ruleupdate_cron' | crontab -
|
||||
|
||||
if grep -A 1 'playbook:' /opt/so/saltstack/local/pillar/minions/* | grep -q 'enabled: True'; then
|
||||
|
||||
|
||||
# Check for active Elastalert rules
|
||||
active_rules_count=$(find /opt/so/rules/elastalert/playbook/ -type f \( -name "*.yaml" -o -name "*.yml" \) | wc -l)
|
||||
|
||||
@@ -829,7 +865,7 @@ upgrade_space() {
|
||||
fi
|
||||
else
|
||||
echo "You have enough space for upgrade. Proceeding with soup."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
unmount_update() {
|
||||
@@ -887,7 +923,7 @@ upgrade_check() {
|
||||
fi
|
||||
else
|
||||
is_hotfix=false
|
||||
fi
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
@@ -899,7 +935,7 @@ upgrade_check_salt() {
|
||||
echo "Salt needs to be upgraded to $NEWSALTVERSION."
|
||||
UPGRADESALT=1
|
||||
fi
|
||||
}
|
||||
}
|
||||
|
||||
upgrade_salt() {
|
||||
SALTUPGRADED=True
|
||||
@@ -909,7 +945,9 @@ upgrade_salt() {
|
||||
if [[ $is_rpm ]]; then
|
||||
echo "Removing yum versionlock for Salt."
|
||||
echo ""
|
||||
yum versionlock delete "salt-*"
|
||||
yum versionlock delete "salt"
|
||||
yum versionlock delete "salt-minion"
|
||||
yum versionlock delete "salt-master"
|
||||
echo "Updating Salt packages."
|
||||
echo ""
|
||||
set +e
|
||||
@@ -927,7 +965,9 @@ upgrade_salt() {
|
||||
set -e
|
||||
echo "Applying yum versionlock for Salt."
|
||||
echo ""
|
||||
yum versionlock add "salt-*"
|
||||
yum versionlock add "salt-0:$NEWSALTVERSION-0.*"
|
||||
yum versionlock add "salt-minion-0:$NEWSALTVERSION-0.*"
|
||||
yum versionlock add "salt-master-0:$NEWSALTVERSION-0.*"
|
||||
# Else do Ubuntu things
|
||||
elif [[ $is_deb ]]; then
|
||||
echo "Removing apt hold for Salt."
|
||||
@@ -1017,7 +1057,7 @@ apply_hotfix() {
|
||||
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
|
||||
systemctl_func "start" "salt-minion"
|
||||
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
|
||||
fi
|
||||
@@ -1046,7 +1086,7 @@ apply_hotfix() {
|
||||
|
||||
main() {
|
||||
trap 'check_err $?' EXIT
|
||||
|
||||
|
||||
if [ -n "$BRANCH" ]; then
|
||||
echo "SOUP will use the $BRANCH branch."
|
||||
echo ""
|
||||
@@ -1230,7 +1270,7 @@ main() {
|
||||
echo "Waiting on the Salt Master service to be ready."
|
||||
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
|
||||
set -e
|
||||
|
||||
|
||||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||||
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
|
||||
highstate
|
||||
@@ -1281,9 +1321,9 @@ main() {
|
||||
if [[ $NUM_MINIONS -gt 1 ]]; then
|
||||
|
||||
cat << EOF
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
|
||||
|
||||
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
|
||||
@@ -1336,13 +1376,13 @@ while getopts ":b:f:y" opt; do
|
||||
echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License."
|
||||
exit 1
|
||||
else
|
||||
UNATTENDED=true
|
||||
UNATTENDED=true
|
||||
fi
|
||||
;;
|
||||
f )
|
||||
ISOLOC="$OPTARG"
|
||||
;;
|
||||
\? )
|
||||
\? )
|
||||
echo "Usage: soup [-b] [-y] [-f <iso location>]"
|
||||
exit 1
|
||||
;;
|
||||
@@ -1368,6 +1408,8 @@ Please review the following for more information about the update process and re
|
||||
$DOC_BASE_URL/soup.html
|
||||
https://blog.securityonion.net
|
||||
|
||||
WARNING: If you run soup via an SSH session and that SSH session terminates, then any processes running in that session would terminate. You should avoid leaving soup unattended especially if the machine you are SSHing from is configured to sleep after a period of time. You might also consider using something like screen or tmux so that if your SSH session terminates, the processes will continue running on the server.
|
||||
|
||||
EOF
|
||||
|
||||
cat << EOF
|
||||
|
||||
@@ -15,12 +15,11 @@ Access the Security Onion web interface at https://{{ GLOBALS.url_base }}
|
||||
{%- endfor -%}
|
||||
|
||||
{%- if minions_need_restarted | length > 0 %}
|
||||
****************************************************************************************************
|
||||
* The following nodes in your Security Onion grid may need to be restarted due to package updates. *
|
||||
* If the node has already been patched, restarted and been up for less than 15 minutes, then it *
|
||||
* may not have updated it's restart_needed status yet. This will cause it to be listed below, even *
|
||||
* if it has already been restarted. This feature will be improved in the future. *
|
||||
****************************************************************************************************
|
||||
####################################################################################################
|
||||
# The following nodes in your Security Onion grid may need to be restarted due to package updates. #
|
||||
# If a node has already been patched and restarted but has been up for less than 15 minutes, #
|
||||
# then it may not have updated its status yet. #
|
||||
####################################################################################################
|
||||
|
||||
{% for minion in minions_need_restarted -%}
|
||||
{{ minion }}
|
||||
|
||||
@@ -14,7 +14,7 @@ include:
|
||||
# Install the registry container
|
||||
so-dockerregistry:
|
||||
docker_container.running:
|
||||
- image: ghcr.io/security-onion-solutions/registry:2.8.2
|
||||
- image: ghcr.io/security-onion-solutions/registry:2.8.3
|
||||
- hostname: so-registry
|
||||
- networks:
|
||||
- sobridge:
|
||||
|
||||
@@ -43,20 +43,20 @@ engines:
|
||||
- cmd.run:
|
||||
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
|
||||
- cmd.run:
|
||||
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs
|
||||
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver or G@role:so-searchnode' saltutil.kill_all_jobs
|
||||
- cmd.run:
|
||||
cmd: salt-call state.apply kafka.nodes
|
||||
- cmd.run:
|
||||
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate
|
||||
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver or G@role:so-searchnode' state.highstate
|
||||
'KAFKA':
|
||||
to:
|
||||
'REDIS':
|
||||
- cmd.run:
|
||||
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
|
||||
- cmd.run:
|
||||
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs
|
||||
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver or G@role:so-searchnode' saltutil.kill_all_jobs
|
||||
- cmd.run:
|
||||
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate
|
||||
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver or G@role:so-searchnode' state.highstate
|
||||
- files:
|
||||
- /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
|
||||
- /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
|
||||
|
||||
@@ -3,12 +3,10 @@
|
||||
|
||||
{% if grains.os_family == 'Debian' %}
|
||||
{% set SPLITCHAR = '+' %}
|
||||
{% set SALTNOTHELD = salt['cmd.run']('apt-mark showhold | grep -q salt ; echo $?', python_shell=True) %}
|
||||
{% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %}
|
||||
{% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %}
|
||||
{% else %}
|
||||
{% set SPLITCHAR = '-' %}
|
||||
{% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %}
|
||||
{% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %}
|
||||
{% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
master:
|
||||
version: 3006.6
|
||||
version: 3006.9
|
||||
|
||||
@@ -1,16 +1,13 @@
|
||||
{% from 'salt/map.jinja' import SALTNOTHELD %}
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
|
||||
include:
|
||||
- salt.minion
|
||||
|
||||
{% if SALTNOTHELD == 1 %}
|
||||
hold_salt_master_package:
|
||||
module.run:
|
||||
- pkg.hold:
|
||||
- name: salt-master
|
||||
{% endif %}
|
||||
|
||||
# prior to 2.4.30 this engine ran on the manager with salt-minion
|
||||
# this has changed to running with the salt-master in 2.4.30
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# version cannot be used elsewhere in this pillar as soup is grepping for it to determine if Salt needs to be patched
|
||||
salt:
|
||||
minion:
|
||||
version: 3006.6
|
||||
version: 3006.9
|
||||
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||
service_start_delay: 30 # in seconds.
|
||||
|
||||
@@ -2,13 +2,13 @@
|
||||
{% from 'salt/map.jinja' import UPGRADECOMMAND with context %}
|
||||
{% from 'salt/map.jinja' import SALTVERSION %}
|
||||
{% from 'salt/map.jinja' import INSTALLEDSALTVERSION %}
|
||||
{% from 'salt/map.jinja' import SALTNOTHELD %}
|
||||
{% from 'salt/map.jinja' import SALTPACKAGES %}
|
||||
{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %}
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as SALTMINION %}
|
||||
{% set service_start_delay = SALTMINION.salt.minion.service_start_delay %}
|
||||
|
||||
include:
|
||||
- salt.python_modules
|
||||
- salt
|
||||
- systemd.reload
|
||||
- repo.client
|
||||
@@ -19,15 +19,12 @@ include:
|
||||
|
||||
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
|
||||
|
||||
{% if SALTNOTHELD | int == 0 %}
|
||||
unhold_salt_packages:
|
||||
module.run:
|
||||
- pkg.unhold:
|
||||
- pkgs:
|
||||
pkg.unheld:
|
||||
- pkgs:
|
||||
{% for package in SALTPACKAGES %}
|
||||
- {{ package }}
|
||||
- {{ package }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
install_salt_minion:
|
||||
cmd.run:
|
||||
@@ -41,15 +38,12 @@ install_salt_minion:
|
||||
|
||||
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
|
||||
|
||||
{% if SALTNOTHELD | int == 1 %}
|
||||
hold_salt_packages:
|
||||
module.run:
|
||||
- pkg.hold:
|
||||
- pkgs:
|
||||
pkg.held:
|
||||
- pkgs:
|
||||
{% for package in SALTPACKAGES %}
|
||||
- {{ package }}
|
||||
- {{ package }}: {{SALTVERSION}}-0.*
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
remove_error_log_level_logfile:
|
||||
file.line:
|
||||
|
||||
Binary file not shown.
Binary file not shown.
BIN
salt/salt/module_packages/docker/docker-7.1.0-py3-none-any.whl
Normal file
BIN
salt/salt/module_packages/docker/docker-7.1.0-py3-none-any.whl
Normal file
Binary file not shown.
BIN
salt/salt/module_packages/docker/idna-3.7-py3-none-any.whl
Normal file
BIN
salt/salt/module_packages/docker/idna-3.7-py3-none-any.whl
Normal file
Binary file not shown.
Binary file not shown.
BIN
salt/salt/module_packages/docker/urllib3-2.2.2-py3-none-any.whl
Normal file
BIN
salt/salt/module_packages/docker/urllib3-2.2.2-py3-none-any.whl
Normal file
Binary file not shown.
21
salt/salt/python_modules.sls
Normal file
21
salt/salt/python_modules.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
docker_module_package:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/salt/module_packages/docker
|
||||
- source: salt://salt/module_packages/docker
|
||||
- clean: True
|
||||
- makedirs: True
|
||||
|
||||
# fail hard on this state so that soup would be cancelled on a manager (eventhough salt would have already updated)
|
||||
# on a non manager, failing hard here will prevent the minion from upgrading
|
||||
# we want to fail hard here to prevent the minion from upgrading and potetially being able to manager docker containers from a dep mismatch
|
||||
docker_python_module_install:
|
||||
cmd.run:
|
||||
- name: /opt/saltstack/salt/bin/python3.10 -m pip install docker --no-index --find-links=/opt/so/conf/salt/module_packages/docker/ --upgrade
|
||||
- onchanges:
|
||||
- file: docker_module_package
|
||||
- failhard: True
|
||||
@@ -90,7 +90,7 @@ filedetectionsbackup:
|
||||
|
||||
crondetectionsruntime:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-detections-runtime-status cron
|
||||
- name: /usr/sbin/so-detections-runtime-status cron
|
||||
- identifier: detections-runtime-status
|
||||
- user: root
|
||||
- minute: '*/10'
|
||||
@@ -190,6 +190,14 @@ socsigmarepo:
|
||||
- group: 939
|
||||
- mode: 775
|
||||
|
||||
socsensoronirepos:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/soc/ai_summary_repos
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 775
|
||||
- makedirs: True
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
{% endfor %}
|
||||
|
||||
{# add all grid heavy nodes to soc.server.modules.elastic.remoteHostUrls #}
|
||||
{% for node_type, minions in salt['pillar.get']('logstash:nodes', {}).items() %}
|
||||
{% for node_type, minions in salt['pillar.get']('elasticsearch:nodes', {}).items() %}
|
||||
{% if node_type in ['heavynode'] %}
|
||||
{% for m in minions.keys() %}
|
||||
{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %}
|
||||
|
||||
@@ -96,6 +96,12 @@ soc:
|
||||
links:
|
||||
- '/#/alerts?q=rule.uuid: {:so_detection.publicId|escape} | groupby rule.name event.module* event.severity_label'
|
||||
target: ''
|
||||
- name: actionAdd
|
||||
description: actionAddHelp
|
||||
icon: fa-plus
|
||||
links:
|
||||
- '/#/config?s=soc.config.actions'
|
||||
target: ''
|
||||
eventFields:
|
||||
default:
|
||||
- soc_timestamp
|
||||
@@ -1298,6 +1304,7 @@ soc:
|
||||
maxPacketCount: 5000
|
||||
htmlDir: html
|
||||
importUploadDir: /nsm/soc/uploads
|
||||
forceUserOtp: false
|
||||
modules:
|
||||
cases: soc
|
||||
filedatastore:
|
||||
@@ -1305,7 +1312,10 @@ soc:
|
||||
kratos:
|
||||
hostUrl:
|
||||
elastalertengine:
|
||||
allowRegex: ''
|
||||
aiRepoUrl: https://github.com/Security-Onion-Solutions/securityonion-resources
|
||||
aiRepoBranch: generated-summaries-stable
|
||||
aiRepoPath: /opt/sensoroni/ai_summary_repos
|
||||
showAiSummaries: true
|
||||
autoUpdateEnabled: true
|
||||
autoEnabledSigmaRules:
|
||||
default:
|
||||
@@ -1321,7 +1331,6 @@ soc:
|
||||
communityRulesImportFrequencySeconds: 86400
|
||||
communityRulesImportErrorSeconds: 300
|
||||
failAfterConsecutiveErrorCount: 10
|
||||
denyRegex: ''
|
||||
elastAlertRulesFolder: /opt/sensoroni/elastalert
|
||||
reposFolder: /opt/sensoroni/sigma/repos
|
||||
rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint
|
||||
@@ -1360,6 +1369,8 @@ soc:
|
||||
maxLogLength: 1024
|
||||
asyncThreshold: 10
|
||||
lookupTunnelParent: true
|
||||
maxScrollSize: 10000
|
||||
bulkIndexerWorkerCount: -1
|
||||
influxdb:
|
||||
hostUrl:
|
||||
token:
|
||||
@@ -1384,7 +1395,10 @@ soc:
|
||||
userFiles:
|
||||
- rbac/users_roles
|
||||
strelkaengine:
|
||||
allowRegex: ''
|
||||
aiRepoUrl: https://github.com/Security-Onion-Solutions/securityonion-resources
|
||||
aiRepoBranch: generated-summaries-stable
|
||||
aiRepoPath: /opt/sensoroni/ai_summary_repos
|
||||
showAiSummaries: true
|
||||
autoEnabledYaraRules:
|
||||
- securityonion-yara
|
||||
autoUpdateEnabled: true
|
||||
@@ -1392,7 +1406,6 @@ soc:
|
||||
communityRulesImportErrorSeconds: 300
|
||||
failAfterConsecutiveErrorCount: 10
|
||||
compileYaraPythonScriptPath: /opt/sensoroni/yara/compile_yara.py
|
||||
denyRegex: ''
|
||||
reposFolder: /opt/sensoroni/yara/repos
|
||||
rulesRepos:
|
||||
default:
|
||||
@@ -1407,14 +1420,18 @@ soc:
|
||||
stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state
|
||||
integrityCheckFrequencySeconds: 1200
|
||||
suricataengine:
|
||||
allowRegex: ''
|
||||
aiRepoUrl: https://github.com/Security-Onion-Solutions/securityonion-resources
|
||||
aiRepoBranch: generated-summaries-stable
|
||||
aiRepoPath: /opt/sensoroni/ai_summary_repos
|
||||
showAiSummaries: true
|
||||
autoUpdateEnabled: true
|
||||
communityRulesImportFrequencySeconds: 86400
|
||||
communityRulesImportErrorSeconds: 300
|
||||
customRulesets:
|
||||
disableRegex: []
|
||||
enableRegex: []
|
||||
failAfterConsecutiveErrorCount: 10
|
||||
communityRulesFile: /nsm/rules/suricata/emerging-all.rules
|
||||
denyRegex: ''
|
||||
rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint
|
||||
stateFilePath: /opt/sensoroni/fingerprints/suricataengine.state
|
||||
integrityCheckFrequencySeconds: 1200
|
||||
@@ -2286,15 +2303,15 @@ soc:
|
||||
|
||||
alert http $EXTERNAL_NET any -> $HOME_NET any (msg:"Example Rule Title - 'example' String Detected"; content:"example"; sid:[publicId]; rev:1;)
|
||||
strelka: |
|
||||
/*
|
||||
/*
|
||||
This is a YARA rule template. Replace all template values with your own values.
|
||||
The YARA rule name is the unique identifier for the rule.
|
||||
Docs: https://yara.readthedocs.io/en/stable/writingrules.html#writing-yara-rules
|
||||
*/
|
||||
*/
|
||||
|
||||
rule Example // This identifier _must_ be unique
|
||||
{
|
||||
meta:
|
||||
meta:
|
||||
description = "Generic YARA Rule"
|
||||
author = "@SecurityOnion"
|
||||
date = "YYYY-MM-DD"
|
||||
@@ -2317,7 +2334,7 @@ soc:
|
||||
id: [publicId]
|
||||
status: 'experimental'
|
||||
description: |
|
||||
This should be a detailed description of what this Detection focuses on: what we are trying to find and why we are trying to find it.
|
||||
This should be a detailed description of what this Detection focuses on: what we are trying to find and why we are trying to find it.
|
||||
For example, from rule 97a80ec7-0e2f-4d05-9ef4-65760e634f6b: "Detects a whoami.exe executed with the /priv command line flag instructing the tool to show all current user privileges. This is often used after a privilege escalation attempt."
|
||||
references:
|
||||
- 'https://local.invalid'
|
||||
@@ -2326,7 +2343,7 @@ soc:
|
||||
tags:
|
||||
- detection.threat_hunting
|
||||
- attack.technique_id
|
||||
logsource:
|
||||
logsource:
|
||||
category: process_creation
|
||||
product: windows
|
||||
detection:
|
||||
|
||||
@@ -33,6 +33,7 @@ so-soc:
|
||||
- /nsm/soc/uploads:/nsm/soc/uploads:rw
|
||||
- /opt/so/log/soc/:/opt/sensoroni/logs/:rw
|
||||
- /opt/so/conf/soc/soc.json:/opt/sensoroni/sensoroni.json:ro
|
||||
- /opt/so/conf/soc/ai_summary_repos:/opt/sensoroni/ai_summary_repos:rw
|
||||
{% if SOCMERGED.telemetryEnabled and not GLOBALS.airgap %}
|
||||
- /opt/so/conf/soc/analytics.js:/opt/sensoroni/html/js/analytics.js:ro
|
||||
{% endif %}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## Getting Started
|
||||
|
||||
New to Security Onion? Click the menu in the upper-right corner and you'll find links for [Help](/docs/) and a [Cheat Sheet](/docs/cheatsheet.pdf) that will help you best utilize Security Onion to hunt for evil! In addition, check out our free Security Onion Essentials online course, available on our [Training](https://securityonionsolutions.com/training) website.
|
||||
New to Security Onion? Click the menu in the upper-right corner and you'll find links for [Help](/docs/) and a [Cheat Sheet](/docs/cheatsheet.pdf) that will help you best utilize Security Onion to hunt for evil! In addition, check out our free Security Onion Essentials online course, available on our [Training](https://securityonion.com/training) website.
|
||||
|
||||
If you're ready to dive in, take a look at the [Alerts](/#/alerts) interface to see what Security Onion has detected so far. If you find any false positives, then you can tune those in [Detections](/#/detections).
|
||||
|
||||
@@ -20,13 +20,17 @@ For more coverage of your enterprise, you can deploy the Elastic Agent to endpoi
|
||||
|
||||
To see all the latest features and fixes in this version of Security Onion, click the upper-right menu and then click the [What's New](/docs/release-notes.html) link.
|
||||
|
||||
## Security Onion Pro
|
||||
|
||||
Need enterprise features and premium support? Check out [Security Onion Pro](https://securityonion.com/pro/)!
|
||||
|
||||
## Enterprise Appliances
|
||||
|
||||
Want the best hardware for your enterprise deployment? Check out our [enterprise appliances](https://securityonionsolutions.com/hardware/)!
|
||||
Want the best hardware for your enterprise deployment? Check out our [enterprise appliances](https://securityonion.com/hardware/)!
|
||||
|
||||
## Premium Support
|
||||
|
||||
Experiencing difficulties and need priority support or remote assistance? We offer a [premium support plan](https://securityonionsolutions.com/support/) to assist corporate, educational, and government organizations.
|
||||
Experiencing difficulties and need priority support or remote assistance? We offer a [premium support plan](https://securityonion.com/support/) to assist corporate, educational, and government organizations.
|
||||
|
||||
## Customize This Space
|
||||
|
||||
|
||||
@@ -5,9 +5,9 @@
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'soc/defaults.map.jinja' import SOCDEFAULTS with context %}
|
||||
{% from 'logstash/map.jinja' import LOGSTASH_NODES %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %}
|
||||
{% from 'manager/map.jinja' import MANAGERMERGED %}
|
||||
{% set DOCKER_EXTRA_HOSTS = LOGSTASH_NODES %}
|
||||
{% set DOCKER_EXTRA_HOSTS = ELASTICSEARCH_NODES %}
|
||||
{% do DOCKER_EXTRA_HOSTS.append({GLOBALS.influxdb_host:pillar.node_data[GLOBALS.influxdb_host].ip}) %}
|
||||
|
||||
{% set SOCMERGED = salt['pillar.get']('soc', SOCDEFAULTS, merge=true) %}
|
||||
|
||||
@@ -81,20 +81,133 @@ soc:
|
||||
description: Maximum number of packets to show in the PCAP viewer. Larger values can cause more resource utilization on both the SOC server and the browser.
|
||||
global: True
|
||||
advanced: True
|
||||
forceUserOtp:
|
||||
title: Require TOTP
|
||||
description: Require all users to enable Time-based One Time Passwords (MFA) upon login to SOC.
|
||||
global: True
|
||||
modules:
|
||||
elastalertengine:
|
||||
additionalAlerters:
|
||||
title: Additional Alerters
|
||||
description: Specify additional alerters to enable for all Sigma rules, one alerter name per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. Note that the configuration parameters for these alerters must be provided in the ElastAlert configuration section. Filter for 'Alerter' to find this related setting. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
helpLink: sigma.html
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
allowRegex:
|
||||
description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.'
|
||||
aiRepoUrl:
|
||||
description: URL to the AI repository. This is used to pull in AI models for use in ElastAlert rules.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: sigma.html
|
||||
aiRepoBranch:
|
||||
description: The branch to pull from the AI repository. Leaving this blank will pull the default branch.
|
||||
global: True
|
||||
advanced: True
|
||||
aiRepoPath:
|
||||
description: Path to the AI repository. This is used to pull in AI models for use in ElastAlert rules.
|
||||
global: True
|
||||
advanced: True
|
||||
showAiSummaries:
|
||||
description: Show AI summaries for ElastAlert rules.
|
||||
global: True
|
||||
additionalAlerters:
|
||||
title: "Notifications: Sev 0/Default Alerters"
|
||||
description: "Specify default alerters to enable for outbound notifications. These alerters will be used unless overridden by higher severity alerter settings. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
|
||||
global: True
|
||||
helpLink: notifications.html
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
additionalSev0AlertersParams:
|
||||
title: "Notifications: Sev 0/Default Parameters"
|
||||
description: Optional configuration parameters for default alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
multiline: True
|
||||
syntax: yaml
|
||||
helpLink: notifications.html
|
||||
forcedType: string
|
||||
additionalSev1Alerters:
|
||||
title: "Notifications: Sev 1/Informational Alerters"
|
||||
description: "Specify specific alerters to use when alerting at the info severity level or higher. These alerters will be used unless overridden by higher severity alerter settings. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
|
||||
global: True
|
||||
helpLink: notifications.html
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
additionalSev1AlertersParams:
|
||||
title: "Notifications: Sev 1/Informational Parameters"
|
||||
description: Optional configuration parameters for informational severity alerters. Info level is less severe than 'Low Severity'. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
multiline: True
|
||||
syntax: yaml
|
||||
helpLink: notifications.html
|
||||
forcedType: string
|
||||
additionalSev2Alerters:
|
||||
title: "Notifications: Sev 2/Low Alerters"
|
||||
description: "Specify specific alerters to use when alerting at the low severity level or higher. These alerters will be used unless overridden by higher severity alerter settings. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
|
||||
global: True
|
||||
helpLink: notifications.html
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
additionalSev2AlertersParams:
|
||||
title: "Notifications: Sev 2/Low Parameters"
|
||||
description: Optional configuration parameters for low severity alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
multiline: True
|
||||
syntax: yaml
|
||||
helpLink: notifications.html
|
||||
forcedType: string
|
||||
additionalSev3Alerters:
|
||||
title: "Notifications: Sev 3/Medium Alerters"
|
||||
description: "Specify specific alerters to use when alerting at the medium severity level or higher. These alerters will be used unless overridden by higher severity alerter settings. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
|
||||
global: True
|
||||
helpLink: notifications.html
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
additionalSev3AlertersParams:
|
||||
title: "Notifications: Sev 3/Medium Parameters"
|
||||
description: Optional configuration parameters for medium severity alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
multiline: True
|
||||
syntax: yaml
|
||||
helpLink: notifications.html
|
||||
forcedType: string
|
||||
additionalSev4Alerters:
|
||||
title: "Notifications: Sev 4/High Alerters"
|
||||
description: "Specify specific alerters to use when alerting at the high severity level or critical severity level. These alerters will be used unless overridden by critical severity alerter settings. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
|
||||
global: True
|
||||
helpLink: notifications.html
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
additionalSev4AlertersParams:
|
||||
title: "Notifications: Sev 4/High Parameters"
|
||||
description: Optional configuration parameters for high severity alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
multiline: True
|
||||
syntax: yaml
|
||||
helpLink: notifications.html
|
||||
forcedType: string
|
||||
additionalSev5Alerters:
|
||||
title: "Notifications: Sev 5/Critical Alerters"
|
||||
description: "Specify specific alerters to use when alerting at the critical severity level. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
|
||||
global: True
|
||||
helpLink: notifications.html
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
additionalSev5AlertersParams:
|
||||
title: "Notifications: Sev 5/Critical Parameters"
|
||||
description: Optional configuration parameters for critical severity alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
multiline: True
|
||||
syntax: yaml
|
||||
helpLink: notifications.html
|
||||
forcedType: string
|
||||
additionalUserDefinedNotifications:
|
||||
customAlerters:
|
||||
description: "Specify custom notification alerters to use when the Sigma rule contains the following tag: so.alerters.customAlerters. This setting can be duplicated to create new custom alerter configurations. Specify one alerter name (Ex: 'email') per line. Alerters refers to ElastAlert 2 alerters, as documented at https://elastalert2.readthedocs.io. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
|
||||
global: True
|
||||
helpLink: notifications.html
|
||||
forcedType: "[]string"
|
||||
duplicates: True
|
||||
multiline: True
|
||||
customAlertersParams:
|
||||
description: "Optional configuration parameters for custom notification alerters, used when the Sigma rule contains the following tag: so.params.customAlertersParams. This setting can be duplicated to create new custom alerter configurations. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key."
|
||||
global: True
|
||||
multiline: True
|
||||
syntax: yaml
|
||||
helpLink: notifications.html
|
||||
duplicates: True
|
||||
forcedType: string
|
||||
autoEnabledSigmaRules:
|
||||
default: &autoEnabledSigmaRules
|
||||
description: 'Sigma rules to automatically enable on initial import. Format is $Ruleset+$Level - for example, for the core community ruleset and critical level rules: core+critical. These will be applied based on role if defined and default if not.'
|
||||
@@ -103,11 +216,6 @@ soc:
|
||||
helpLink: sigma.html
|
||||
so-eval: *autoEnabledSigmaRules
|
||||
so-import: *autoEnabledSigmaRules
|
||||
denyRegex:
|
||||
description: 'Regex used to filter imported Sigma rules. Deny regex takes precedence over the Allow regex setting.'
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: sigma.html
|
||||
communityRulesImportFrequencySeconds:
|
||||
description: 'How often to check for new Sigma rules (in seconds). This applies to both Community Rule Packages and any configured Git repos.'
|
||||
global: True
|
||||
@@ -174,6 +282,10 @@ soc:
|
||||
lookupTunnelParent:
|
||||
description: When true, if a pivoted event appears to be encapsulated, such as in a VXLAN packet, then SOC will pivot to the VXLAN packet stream. When false, SOC will attempt to pivot to the encapsulated packet stream itself, but at the risk that it may be unable to locate it in the stored PCAP data.
|
||||
global: True
|
||||
maxScrollSize:
|
||||
description: The maximum number of documents to request in a single Elasticsearch scroll request.
|
||||
bulkIndexWorkerCount:
|
||||
description: The number of worker threads to use when bulk indexing data into Elasticsearch. A value below 1 will default to the number of CPUs available.
|
||||
sostatus:
|
||||
refreshIntervalMs:
|
||||
description: Duration (in milliseconds) between refreshes of the grid status. Shortening this duration may not have expected results, as the backend systems feeding this sostatus data will continue their updates as scheduled.
|
||||
@@ -195,21 +307,26 @@ soc:
|
||||
advanced: True
|
||||
forcedType: int
|
||||
strelkaengine:
|
||||
allowRegex:
|
||||
description: 'Regex used to filter imported YARA rules. Deny regex takes precedence over the Allow regex setting.'
|
||||
aiRepoUrl:
|
||||
description: URL to the AI repository. This is used to pull in AI models for use in Strelka rules.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: yara.html
|
||||
aiRepoBranch:
|
||||
description: The branch to pull from the AI repository. Leaving this blank will pull the default branch.
|
||||
global: True
|
||||
advanced: True
|
||||
aiRepoPath:
|
||||
description: Path to the AI repository. This is used to pull in AI models for use in Strelka rules.
|
||||
global: True
|
||||
advanced: True
|
||||
showAiSummaries:
|
||||
description: Show AI summaries for Strelka rules.
|
||||
global: True
|
||||
autoEnabledYaraRules:
|
||||
description: 'YARA rules to automatically enable on initial import. Format is $Ruleset - for example, for the default shipped ruleset: securityonion-yara'
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: sigma.html
|
||||
denyRegex:
|
||||
description: 'Regex used to filter imported YARA rules. Deny regex takes precedence over the Allow regex setting.'
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: yara.html
|
||||
communityRulesImportFrequencySeconds:
|
||||
description: 'How often to check for new YARA rules (in seconds). This applies to both Community Rules and any configured Git repos.'
|
||||
global: True
|
||||
@@ -228,21 +345,34 @@ soc:
|
||||
helpLink: yara.html
|
||||
airgap: *serulesRepos
|
||||
suricataengine:
|
||||
allowRegex:
|
||||
description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.'
|
||||
aiRepoUrl:
|
||||
description: URL to the AI repository. This is used to pull in AI models for use in Suricata rules.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: suricata.html
|
||||
denyRegex:
|
||||
description: 'Regex used to filter imported Suricata rules. Deny regex takes precedence over the Allow regex setting.'
|
||||
aiRepoBranch:
|
||||
description: The branch to pull from the AI repository. Leaving this blank will pull the default branch.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: suricata.html
|
||||
aiRepoPath:
|
||||
description: Path to the AI repository. This is used to pull in AI models for use in Suricata rules.
|
||||
global: True
|
||||
advanced: True
|
||||
showAiSummaries:
|
||||
description: Show AI summaries for Suricata rules.
|
||||
global: True
|
||||
communityRulesImportFrequencySeconds:
|
||||
description: 'How often to check for new Suricata rules (in seconds).'
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: suricata.html
|
||||
disableRegex:
|
||||
description: A list of regular expressions used to automatically disable rules that match any of them. Each regular expression is tested against the rule's content.
|
||||
global: True
|
||||
forcedType: "[]string"
|
||||
enableRegex:
|
||||
description: A list of regular expressions used to automatically enable rules that match any of them. Each regular expression is tested against the rule's content. Takes priority over disableRegex matches.
|
||||
global: True
|
||||
forcedType: "[]string"
|
||||
integrityCheckFrequencySeconds:
|
||||
description: 'How often the Suricata integrity checker runs (in seconds). This verifies the integrity of deployed rules.'
|
||||
global: True
|
||||
|
||||
@@ -17,8 +17,6 @@
|
||||
{% set COMMONNAME = GLOBALS.manager %}
|
||||
{% endif %}
|
||||
|
||||
{% set kafka_password = salt['pillar.get']('kafka:password') %}
|
||||
|
||||
{% if grains.id.split('_')|last in ['manager', 'managersearch', 'eval', 'standalone', 'import'] %}
|
||||
include:
|
||||
- ca
|
||||
@@ -666,7 +664,6 @@ elastickeyperms:
|
||||
{%- endif %}
|
||||
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
|
||||
elasticfleet_kafka_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticfleet-kafka.key
|
||||
@@ -696,17 +693,13 @@ elasticfleet_kafka_crt:
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-kafka.key -topk8 -out /etc/pki/elasticfleet-kafka.p8 -nocrypt"
|
||||
- onchanges:
|
||||
- x509: elasticfleet_kafka_key
|
||||
|
||||
elasticfleet_kafka_cert_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-kafka.crt
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- user: 947
|
||||
- group: 939
|
||||
|
||||
elasticfleet_kafka_key_perms:
|
||||
@@ -714,187 +707,8 @@ elasticfleet_kafka_key_perms:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-kafka.key
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- user: 947
|
||||
- group: 939
|
||||
|
||||
elasticfleet_kafka_pkcs8_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-kafka.p8
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_client_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka-client.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/kafka-client.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/kafka-client.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_client_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka-client.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka-client.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_client_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-client.key
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_client_crt_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-client.crt
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %}
|
||||
|
||||
kafka_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/kafka.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/kafka.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}"
|
||||
- onchanges:
|
||||
- x509: /etc/pki/kafka.key
|
||||
kafka_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka.key
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_crt_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka.crt
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_pkcs12_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka.p12
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
{% endif %}
|
||||
|
||||
# Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka.
|
||||
# Manager will have cert, but be unused until a pipeline is created and logstash enabled.
|
||||
{% if grains['role'] in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %}
|
||||
kafka_logstash_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka-logstash.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/kafka-logstash.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_logstash_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka-logstash.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka-logstash.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:{{ kafka_password }}"
|
||||
- onchanges:
|
||||
- x509: /etc/pki/kafka-logstash.key
|
||||
|
||||
kafka_logstash_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-logstash.key
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_logstash_crt_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-logstash.crt
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_logstash_pkcs12_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-logstash.p12
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 931
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
@@ -101,20 +101,20 @@
|
||||
|
||||
{# change address-groups vars from list to comma seperated string #}
|
||||
{% for k, v in SURICATAMERGED.config.vars['address-groups'].items() %}
|
||||
{% if v is string %}
|
||||
{% do SURICATAMERGED.config.vars['address-groups'].update({k: '[' ~ v ~ ']'}) %}
|
||||
{# if address-group value is a list #}
|
||||
{% if v is iterable and (v is not string and v is not mapping and v | length > 1) %}
|
||||
{% elif v is iterable and v is not mapping %}
|
||||
{% do SURICATAMERGED.config.vars['address-groups'].update({k: '[' ~ v | join(',') ~ ']'}) %}
|
||||
{% else %}
|
||||
{% do SURICATAMERGED.config.vars['address-groups'].update({k: v[0]}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# change port-groups vars from list to comma seperated string #}
|
||||
{% for k, v in SURICATAMERGED.config.vars['port-groups'].items() %}
|
||||
{% if v is string %}
|
||||
{% do SURICATAMERGED.config.vars['port-groups'].update({k: '[' ~ v ~ ']'}) %}
|
||||
{# if address-group value is a list #}
|
||||
{% if v is iterable and (v is not string and v is not mapping and v | length > 1) %}
|
||||
{% elif v is iterable and v is not mapping %}
|
||||
{% do SURICATAMERGED.config.vars['port-groups'].update({k: '[' ~ v | join(',') ~ ']'}) %}
|
||||
{% else %}
|
||||
{% do SURICATAMERGED.config.vars['port-groups'].update({k: v[0]}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
@@ -154,12 +154,14 @@ suricata:
|
||||
description: Assign a list of hosts, or networks, using CIDR notation, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable.
|
||||
regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$
|
||||
regexFailureMessage: You must enter a valid IP address or CIDR.
|
||||
helpLink: suricata.html
|
||||
forcedType: "[]string"
|
||||
duplicates: True
|
||||
helpLink: suricata.html
|
||||
EXTERNAL_NET: &suriaddressgroup
|
||||
description: Assign a list of hosts, or networks, or other customization, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable.
|
||||
helpLink: suricata.html
|
||||
forcedType: "[]string"
|
||||
duplicates: True
|
||||
helpLink: suricata.html
|
||||
HTTP_SERVERS: *suriaddressgroup
|
||||
SMTP_SERVERS: *suriaddressgroup
|
||||
SQL_SERVERS: *suriaddressgroup
|
||||
@@ -176,8 +178,9 @@ suricata:
|
||||
port-groups:
|
||||
HTTP_PORTS: &suriportgroup
|
||||
description: Assign a list of network port numbers to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable.
|
||||
helpLink: suricata.html
|
||||
forcedType: "[]string"
|
||||
duplicates: True
|
||||
helpLink: suricata.html
|
||||
SHELLCODE_PORTS: *suriportgroup
|
||||
ORACLE_PORTS: *suriportgroup
|
||||
SSH_PORTS: *suriportgroup
|
||||
|
||||
@@ -155,6 +155,7 @@ base:
|
||||
- nginx
|
||||
- elasticfleet.install_agent_grid
|
||||
- stig
|
||||
- kafka
|
||||
|
||||
'*_managersearch and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
@@ -184,6 +185,7 @@ base:
|
||||
- utility
|
||||
- elasticfleet
|
||||
- stig
|
||||
- kafka
|
||||
|
||||
'*_heavynode and G@saltversion:{{saltversion}}':
|
||||
- match: compound
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user