mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-19 15:33:06 +01:00
Compare commits
563 Commits
2.4.70-202
...
2.4.100-20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a9f2dfc4b8 | ||
|
|
b7e047d149 | ||
|
|
f69137b38d | ||
|
|
9746f6e5e2 | ||
|
|
89a1e2500e | ||
|
|
394ce29ea3 | ||
|
|
f19a35ff06 | ||
|
|
8943e88ca8 | ||
|
|
18774aa0a7 | ||
|
|
af80a78406 | ||
|
|
6043da4424 | ||
|
|
75086bac7f | ||
|
|
726df310ee | ||
|
|
b952728b2c | ||
|
|
1cac2ff1d4 | ||
|
|
a93c77a1cc | ||
|
|
dd09f5b153 | ||
|
|
29f996de66 | ||
|
|
c575e02fbb | ||
|
|
e96a0108c3 | ||
|
|
e86fce692c | ||
|
|
8d35c7c139 | ||
|
|
0a5725a62e | ||
|
|
1c6f5126db | ||
|
|
1ec5e3bf2a | ||
|
|
d29727c869 | ||
|
|
eabb894580 | ||
|
|
96339f0de6 | ||
|
|
d7e3e134a5 | ||
|
|
dfb0ff7a98 | ||
|
|
48f1e24bf5 | ||
|
|
cf47508185 | ||
|
|
2a024039bf | ||
|
|
212cc478de | ||
|
|
88ea60df2a | ||
|
|
c1b7232a88 | ||
|
|
04577a48be | ||
|
|
18ef37a2d0 | ||
|
|
4108e67178 | ||
|
|
ff479de7bd | ||
|
|
4afac201b9 | ||
|
|
c30537fe6a | ||
|
|
1ed73b6f8e | ||
|
|
f01825166d | ||
|
|
07f8bda27e | ||
|
|
e3ecc9d4be | ||
|
|
ca209ed54c | ||
|
|
df6ff027b5 | ||
|
|
e772497e12 | ||
|
|
205bbd9c61 | ||
|
|
224bc6b429 | ||
|
|
dc197f6a5c | ||
|
|
f182833a8d | ||
|
|
61ab1f1ef2 | ||
|
|
dea582f24a | ||
|
|
b860bf753a | ||
|
|
b5690f6879 | ||
|
|
a39ad55578 | ||
|
|
4c276d1211 | ||
|
|
5f74b1b730 | ||
|
|
b9040eb0de | ||
|
|
ab63d5dbdb | ||
|
|
f233f13637 | ||
|
|
c8a8236401 | ||
|
|
f5603b1274 | ||
|
|
1d27fcc50e | ||
|
|
dd2926201d | ||
|
|
ebcef8adbd | ||
|
|
ff14217d38 | ||
|
|
46596f01fa | ||
|
|
c1388a68f0 | ||
|
|
374da11037 | ||
|
|
caa8d9ecb0 | ||
|
|
02c7de6b1a | ||
|
|
c71b9f6e8f | ||
|
|
8c1feccbe0 | ||
|
|
5ee15c8b41 | ||
|
|
5328f55322 | ||
|
|
712f904c43 | ||
|
|
ccd7d86302 | ||
|
|
fc89604982 | ||
|
|
09f7329a21 | ||
|
|
cfd6676583 | ||
|
|
3713ee9d93 | ||
|
|
009c8d55c3 | ||
|
|
c0c01f0d17 | ||
|
|
2fe5dccbb4 | ||
|
|
c83a143eef | ||
|
|
56ef2a4e1c | ||
|
|
c36e8abc19 | ||
|
|
e76293acdb | ||
|
|
5bdb4ed51b | ||
|
|
aaf5d76071 | ||
|
|
d9a696a411 | ||
|
|
76ab4c92f0 | ||
|
|
60beaf51bc | ||
|
|
9ab17ff79c | ||
|
|
1a363790a0 | ||
|
|
d488bb6393 | ||
|
|
114ad779b4 | ||
|
|
49d2ac2b13 | ||
|
|
9a2252ed3f | ||
|
|
9264a03dbc | ||
|
|
fb2a42a9af | ||
|
|
63531cdbb6 | ||
|
|
bae348bef7 | ||
|
|
bd223d8643 | ||
|
|
3fa6c72620 | ||
|
|
2b90bdc86a | ||
|
|
6831b72804 | ||
|
|
5e12b928d9 | ||
|
|
0453f51e64 | ||
|
|
9594e4115c | ||
|
|
201e14f287 | ||
|
|
d833bd0d55 | ||
|
|
46eeb014af | ||
|
|
8e7a2cf353 | ||
|
|
2c528811cc | ||
|
|
3130b56d58 | ||
|
|
b466d83625 | ||
|
|
6d008546f1 | ||
|
|
c60b14e2e7 | ||
|
|
c753a7cffa | ||
|
|
5cba4d7d9b | ||
|
|
685df9e5ea | ||
|
|
ef5a42cf40 | ||
|
|
45ab6c7309 | ||
|
|
1b54a109d5 | ||
|
|
945d04a510 | ||
|
|
658db27a46 | ||
|
|
3e248da14d | ||
|
|
ed7f8dbf1d | ||
|
|
d6af3aab6d | ||
|
|
0cb067f6f2 | ||
|
|
ccf88fa62b | ||
|
|
20f915f649 | ||
|
|
f447b6b698 | ||
|
|
66b087f12f | ||
|
|
f2ad4c40e6 | ||
|
|
8538f2eca2 | ||
|
|
c55fa6dc6a | ||
|
|
17f37750e5 | ||
|
|
e789c17bc3 | ||
|
|
6f44d39b18 | ||
|
|
dd85249781 | ||
|
|
bdba621442 | ||
|
|
034315ed85 | ||
|
|
224c668c31 | ||
|
|
2e17e93cfe | ||
|
|
7dfb75ba6b | ||
|
|
af0425b8f1 | ||
|
|
6cf0a0bb42 | ||
|
|
d97400e6f5 | ||
|
|
cf1335dd84 | ||
|
|
be74449fb9 | ||
|
|
45b2413175 | ||
|
|
022df966c7 | ||
|
|
92385d652e | ||
|
|
4478d7b55a | ||
|
|
612716ee69 | ||
|
|
f78a5d1a78 | ||
|
|
2d0de87530 | ||
|
|
18df491f7e | ||
|
|
cee6ee7a2a | ||
|
|
6d18177f98 | ||
|
|
c0bb395571 | ||
|
|
f051ddc7f0 | ||
|
|
72ad49ed12 | ||
|
|
d11f4ef9ba | ||
|
|
03ca7977a0 | ||
|
|
91b2e7d400 | ||
|
|
34c3a58efe | ||
|
|
a867557f54 | ||
|
|
b814f32e0a | ||
|
|
2df44721d0 | ||
|
|
d0565baaa3 | ||
|
|
38e7da1334 | ||
|
|
1b623c5c7a | ||
|
|
542a116b8c | ||
|
|
e7b6496f98 | ||
|
|
3991c7b5fe | ||
|
|
678b232c24 | ||
|
|
fbd0dbd048 | ||
|
|
1df19faf5c | ||
|
|
8ec5794833 | ||
|
|
bf07d56da6 | ||
|
|
cdbffa2323 | ||
|
|
55469ebd24 | ||
|
|
4e81860a13 | ||
|
|
a23789287e | ||
|
|
fe1824aedd | ||
|
|
e58b2c45dd | ||
|
|
5d322ebc0b | ||
|
|
7ea8d5efd0 | ||
|
|
4182ff66a0 | ||
|
|
ff29d9ca51 | ||
|
|
4a88dedcb8 | ||
|
|
cfe5c1d76a | ||
|
|
ebf5159c95 | ||
|
|
d432019ad9 | ||
|
|
0d8fd42be3 | ||
|
|
d5faf535c3 | ||
|
|
8e1edd1d91 | ||
|
|
d791b23838 | ||
|
|
0db0754ee5 | ||
|
|
1f5a990b1e | ||
|
|
7a2f01be53 | ||
|
|
dadb0db8f3 | ||
|
|
dfd8ac3626 | ||
|
|
9716e09b83 | ||
|
|
669f68ad88 | ||
|
|
32af2d8436 | ||
|
|
24e945eee4 | ||
|
|
8615e5d5ea | ||
|
|
2dd5ff4333 | ||
|
|
6a396ec1aa | ||
|
|
34f558c023 | ||
|
|
9504f0885a | ||
|
|
ef59678441 | ||
|
|
c6f6811f47 | ||
|
|
ce8f9fe024 | ||
|
|
40b7999786 | ||
|
|
69be03f86a | ||
|
|
8dc8092241 | ||
|
|
578c6c567f | ||
|
|
662df1208d | ||
|
|
745b6775f1 | ||
|
|
176aaa8f3d | ||
|
|
4d499be1a8 | ||
|
|
c27225d91f | ||
|
|
1b47d5c622 | ||
|
|
32d7927a49 | ||
|
|
861630681c | ||
|
|
9d725f2b0b | ||
|
|
132263ac1a | ||
|
|
92a847e3bd | ||
|
|
75bbc41d38 | ||
|
|
7716f4aff8 | ||
|
|
8eb6dcc5b7 | ||
|
|
847638442b | ||
|
|
5743189eef | ||
|
|
81d874c6ae | ||
|
|
bfe8a3a01b | ||
|
|
71ed9204ff | ||
|
|
222ebbdec1 | ||
|
|
260d4e44bc | ||
|
|
0c5b3f7c1c | ||
|
|
feee80cad9 | ||
|
|
5f69456e22 | ||
|
|
e59d124c82 | ||
|
|
13d4738e8f | ||
|
|
abdfbba32a | ||
|
|
7d0a961482 | ||
|
|
0f226cc08e | ||
|
|
cfcfc6819f | ||
|
|
fe4e2a9540 | ||
|
|
492554d951 | ||
|
|
dfd5e95c93 | ||
|
|
50f0c43212 | ||
|
|
7fe8715bce | ||
|
|
f837ea944a | ||
|
|
c2d43e5d22 | ||
|
|
51bb4837f5 | ||
|
|
caec424e44 | ||
|
|
156176c628 | ||
|
|
81b4c4e2c0 | ||
|
|
d4107dc60a | ||
|
|
d34605a512 | ||
|
|
af5e7cd72c | ||
|
|
93378e92e6 | ||
|
|
81ce762250 | ||
|
|
cb727bf48d | ||
|
|
9a0bad88cc | ||
|
|
680e84851b | ||
|
|
ea771ed21b | ||
|
|
c332cd777c | ||
|
|
9fce85c988 | ||
|
|
6141c7a849 | ||
|
|
bf91030204 | ||
|
|
9577c3f59d | ||
|
|
77dedc575e | ||
|
|
0295b8d658 | ||
|
|
6a9d78fa7c | ||
|
|
b84521cdd2 | ||
|
|
ff4679ec08 | ||
|
|
c5ce7102e8 | ||
|
|
70c001e22b | ||
|
|
f1dc22a200 | ||
|
|
aae1b69093 | ||
|
|
469ca44016 | ||
|
|
81fcd68e9b | ||
|
|
8781419b4a | ||
|
|
2eea671857 | ||
|
|
73acfbf864 | ||
|
|
ae0e994461 | ||
|
|
07b9011636 | ||
|
|
bc2b3b7f8f | ||
|
|
ea02a2b868 | ||
|
|
ba3a6cbe87 | ||
|
|
268dcbe00b | ||
|
|
6be97f13d0 | ||
|
|
95d6c93a07 | ||
|
|
a2bb220043 | ||
|
|
911d6dcce1 | ||
|
|
5f6a9850eb | ||
|
|
de18bf06c3 | ||
|
|
73473d671d | ||
|
|
3fbab7c3af | ||
|
|
521cccaed6 | ||
|
|
35da3408dc | ||
|
|
c03096e806 | ||
|
|
2afc947d6c | ||
|
|
076da649cf | ||
|
|
55f8303dc2 | ||
|
|
93ced0959c | ||
|
|
6f13fa50bf | ||
|
|
3bface12e0 | ||
|
|
b584c8e353 | ||
|
|
6caf87df2d | ||
|
|
4d1f2c2bc1 | ||
|
|
0b1175b46c | ||
|
|
4e50dabc56 | ||
|
|
ce45a5926a | ||
|
|
c540a4f257 | ||
|
|
7af94c172f | ||
|
|
7556587e35 | ||
|
|
a0030b27e2 | ||
|
|
8080e05444 | ||
|
|
af11879545 | ||
|
|
c89f1c9d95 | ||
|
|
b7ac599a42 | ||
|
|
8363877c66 | ||
|
|
4bcb4b5b9c | ||
|
|
68302e14b9 | ||
|
|
c1abc7a7f1 | ||
|
|
484717d57d | ||
|
|
b91c608fcf | ||
|
|
8f8ece2b34 | ||
|
|
9b5c1c01e9 | ||
|
|
816a1d446e | ||
|
|
19bfd5beca | ||
|
|
9ac7e051b3 | ||
|
|
80b1d51f76 | ||
|
|
6340ebb36d | ||
|
|
70721afa51 | ||
|
|
9c31622598 | ||
|
|
f372b0907b | ||
|
|
fac96e0b08 | ||
|
|
2bc53f9868 | ||
|
|
e8106befe9 | ||
|
|
83412b813f | ||
|
|
b56d497543 | ||
|
|
dd40962288 | ||
|
|
b7eebad2a5 | ||
|
|
8f8698fd02 | ||
|
|
092f716f12 | ||
|
|
c38f48c7f2 | ||
|
|
98837bc379 | ||
|
|
0f243bb6ec | ||
|
|
88fc1bbe32 | ||
|
|
d5ef0e5744 | ||
|
|
2ecac38f6d | ||
|
|
e90557d7dc | ||
|
|
628893fd5b | ||
|
|
a81e4c3362 | ||
|
|
ca7b89c308 | ||
|
|
03335cc015 | ||
|
|
08557ae287 | ||
|
|
08d2a6242d | ||
|
|
4b481bd405 | ||
|
|
0b1e3b2a7f | ||
|
|
dbd9873450 | ||
|
|
c6d0a17669 | ||
|
|
adeab10f6d | ||
|
|
824f852ed7 | ||
|
|
284c1be85f | ||
|
|
7ad6baf483 | ||
|
|
f1638faa3a | ||
|
|
dea786abfa | ||
|
|
f96b82b112 | ||
|
|
95fe11c6b4 | ||
|
|
f2f688b9b8 | ||
|
|
0139e18271 | ||
|
|
657995d744 | ||
|
|
4057238185 | ||
|
|
fb07ff65c9 | ||
|
|
dbc56ffee7 | ||
|
|
ee696be51d | ||
|
|
5d3fd3d389 | ||
|
|
fa063722e1 | ||
|
|
f5cc35509b | ||
|
|
d39c8fae54 | ||
|
|
d3b81babec | ||
|
|
f35f6bd4c8 | ||
|
|
d5cfef94a3 | ||
|
|
f37f5ba97b | ||
|
|
42818a9950 | ||
|
|
e85c3e5b27 | ||
|
|
a39c88c7b4 | ||
|
|
73ebf5256a | ||
|
|
6d31cd2a41 | ||
|
|
5600fed9c4 | ||
|
|
6920b77b4a | ||
|
|
ccd6b3914c | ||
|
|
c4723263a4 | ||
|
|
4581a46529 | ||
|
|
33a2c5dcd8 | ||
|
|
f6a8a21f94 | ||
|
|
ff5773c837 | ||
|
|
66f8084916 | ||
|
|
a2467d0418 | ||
|
|
3b0339a9b3 | ||
|
|
fb1d4fdd3c | ||
|
|
56a16539ae | ||
|
|
c0b2cf7388 | ||
|
|
d9c58d9333 | ||
|
|
ef3a52468f | ||
|
|
c88b731793 | ||
|
|
2e85a28c02 | ||
|
|
964fef1aab | ||
|
|
1a832fa0a5 | ||
|
|
75bdc92bbf | ||
|
|
a8c231ad8c | ||
|
|
f396247838 | ||
|
|
e3ea4776c7 | ||
|
|
37a928b065 | ||
|
|
85c269e697 | ||
|
|
6e70268ab9 | ||
|
|
fb8929ea37 | ||
|
|
5d9c0dd8b5 | ||
|
|
debf093c54 | ||
|
|
00b5a5cc0c | ||
|
|
dbb99d0367 | ||
|
|
7702f05756 | ||
|
|
2c635bce62 | ||
|
|
48713a4e7b | ||
|
|
e831354401 | ||
|
|
55c5ea5c4c | ||
|
|
1fd5165079 | ||
|
|
949cea95f4 | ||
|
|
12762e08ef | ||
|
|
62bdb2627a | ||
|
|
386be4e746 | ||
|
|
d9ec556061 | ||
|
|
876d860488 | ||
|
|
59097070ef | ||
|
|
77b5aa4369 | ||
|
|
0d7c331ff0 | ||
|
|
1c1a1a1d3f | ||
|
|
47efcfd6e2 | ||
|
|
15a0b959aa | ||
|
|
fcb6a47e8c | ||
|
|
b5f656ae58 | ||
|
|
382cd24a57 | ||
|
|
b1beb617b3 | ||
|
|
91f8b1fef7 | ||
|
|
2ad87bf1fe | ||
|
|
eca2a4a9c8 | ||
|
|
dff609d829 | ||
|
|
e960ae66a3 | ||
|
|
093cbc5ebc | ||
|
|
f663ef8c16 | ||
|
|
de9f6425f9 | ||
|
|
47ced60243 | ||
|
|
58ebbfba20 | ||
|
|
e164d15ec6 | ||
|
|
3efdb4e532 | ||
|
|
de0af58cf8 | ||
|
|
84abfa6881 | ||
|
|
6b60e85a33 | ||
|
|
63f3e23e2b | ||
|
|
eb1249618b | ||
|
|
cef9bb1487 | ||
|
|
bb49944b96 | ||
|
|
fcc4050f86 | ||
|
|
9c83a52c6d | ||
|
|
a6e8b25969 | ||
|
|
529bc01d69 | ||
|
|
11055b1d32 | ||
|
|
fd9a91420d | ||
|
|
529c8d7cf2 | ||
|
|
086ebe1a7c | ||
|
|
29c964cca1 | ||
|
|
36573d6005 | ||
|
|
aa0c589361 | ||
|
|
685b80e519 | ||
|
|
5a401af1fd | ||
|
|
25d63f7516 | ||
|
|
6c5e0579cf | ||
|
|
4ac04a1a46 | ||
|
|
746128e37b | ||
|
|
fe81ffaf78 | ||
|
|
1f6eb9cdc3 | ||
|
|
5cc358de4e | ||
|
|
610dd2c08d | ||
|
|
506bbd314d | ||
|
|
4caa6a10b5 | ||
|
|
665b7197a6 | ||
|
|
4b79623ce3 | ||
|
|
c4994a208b | ||
|
|
eedea2ca88 | ||
|
|
de6ea29e3b | ||
|
|
bb983d4ba2 | ||
|
|
c014508519 | ||
|
|
fcfbb1e857 | ||
|
|
911ee579a9 | ||
|
|
a6ff92b099 | ||
|
|
d73ba7dd3e | ||
|
|
04ddcd5c93 | ||
|
|
af29ae1968 | ||
|
|
fbd3cff90d | ||
|
|
0ed9894b7e | ||
|
|
a54a72c269 | ||
|
|
f514e5e9bb | ||
|
|
3955587372 | ||
|
|
6b28dc72e8 | ||
|
|
ca7253a589 | ||
|
|
af53dcda1b | ||
|
|
d3bd56b131 | ||
|
|
e9e61ea2d8 | ||
|
|
86b984001d | ||
|
|
fa7f8104c8 | ||
|
|
bd5fe43285 | ||
|
|
d38051e806 | ||
|
|
daa5342986 | ||
|
|
c48436ccbf | ||
|
|
7aa00faa6c | ||
|
|
6217a7b9a9 | ||
|
|
d67ebabc95 | ||
|
|
65274e89d7 | ||
|
|
721e04f793 | ||
|
|
433309ef1a | ||
|
|
735cfb4c29 | ||
|
|
6202090836 | ||
|
|
436cbc1f06 | ||
|
|
40b08d737c | ||
|
|
4c5b42b898 | ||
|
|
7a6b72ebac | ||
|
|
1b8584d4bb | ||
|
|
13105c4ab3 | ||
|
|
dc27bbb01d | ||
|
|
b863060df1 | ||
|
|
18f95e867f | ||
|
|
ed6137a76a | ||
|
|
c3f02a698e | ||
|
|
db106f8ca1 | ||
|
|
8e47cc73a5 | ||
|
|
639bf05081 | ||
|
|
4e142e0212 | ||
|
|
c9bf1c86c6 | ||
|
|
82830c8173 | ||
|
|
7f5741c43b | ||
|
|
643d4831c1 | ||
|
|
b032eed22a | ||
|
|
1b49c8540e | ||
|
|
f7534a0ae3 | ||
|
|
780ad9eb10 | ||
|
|
e25bc8efe4 | ||
|
|
26abe90671 | ||
|
|
446f1ffdf5 | ||
|
|
2168698595 | ||
|
|
8cf29682bb | ||
|
|
86dc7cc804 |
2
.github/.gitleaks.toml
vendored
2
.github/.gitleaks.toml
vendored
@@ -536,7 +536,7 @@ secretGroup = 4
|
||||
|
||||
[allowlist]
|
||||
description = "global allow lists"
|
||||
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''']
|
||||
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''', '''ssl_.*password''']
|
||||
paths = [
|
||||
'''gitleaks.toml''',
|
||||
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
### 2.4.70-20240529 ISO image released on 2024/05/29
|
||||
### 2.4.100-20240829 ISO image released on 2024/08/29
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.70-20240529 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso
|
||||
2.4.100-20240829 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.100-20240829.iso
|
||||
|
||||
MD5: 8FCCF31C2470D1ABA380AF196B611DEC
|
||||
SHA1: EE5E8F8C14819E7A1FE423E6920531A97F39600B
|
||||
SHA256: EF5E781D50D50660F452ADC54FD4911296ECBECED7879FA8E04687337CA89BEC
|
||||
MD5: 377586C143FABD662DB414DEA49D46B7
|
||||
SHA1: 69D4B94522789AF47075A9FF1354B069679AC366
|
||||
SHA256: 52FBA5C8762B8DCF2945AD2837B3A19E63ADCC209AB510D7FD0F86AE713AA153
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.100-20240829.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -25,27 +25,29 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.100-20240829.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.100-20240829.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.70-20240529.iso.sig securityonion-2.4.70-20240529.iso
|
||||
gpg --verify securityonion-2.4.100-20240829.iso.sig securityonion-2.4.100-20240829.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Wed 29 May 2024 11:40:59 AM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Thu 29 Aug 2024 12:02:55 PM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
|
||||
```
|
||||
|
||||
If it fails to verify, try downloading again. If it still fails to verify, try downloading from another computer or another network.
|
||||
|
||||
Once you've verified the ISO image, you're ready to proceed to our Installation guide:
|
||||
https://docs.securityonion.net/en/2.4/installation.html
|
||||
|
||||
@@ -5,9 +5,11 @@
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 2.4.x | :white_check_mark: |
|
||||
| 2.3.x | :white_check_mark: |
|
||||
| 2.3.x | :x: |
|
||||
| 16.04.x | :x: |
|
||||
|
||||
Security Onion 2.3 has reached End Of Life and is no longer supported.
|
||||
|
||||
Security Onion 16.04 has reached End Of Life and is no longer supported.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
34
pillar/elasticsearch/nodes.sls
Normal file
34
pillar/elasticsearch/nodes.sls
Normal file
@@ -0,0 +1,34 @@
|
||||
{% set node_types = {} %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='elasticsearch:enabled:true',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='pillar') | dictsort()
|
||||
%}
|
||||
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% set hostname = minionid.split('_') | first %}
|
||||
{% set node_type = minionid.split('_') | last %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
elasticsearch:
|
||||
nodes:
|
||||
{% for node_type, values in node_types.items() %}
|
||||
{{node_type}}:
|
||||
{% for hostname, ip in values.items() %}
|
||||
{{hostname}}:
|
||||
ip: {{ip}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
2
pillar/kafka/nodes.sls
Normal file
2
pillar/kafka/nodes.sls
Normal file
@@ -0,0 +1,2 @@
|
||||
kafka:
|
||||
nodes:
|
||||
@@ -1,16 +1,15 @@
|
||||
{% set node_types = {} %}
|
||||
{% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ',
|
||||
tgt='logstash:enabled:true',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='compound') | dictsort()
|
||||
tgt_type='pillar') | dictsort()
|
||||
%}
|
||||
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% set hostname = cached_grains[minionid]['host'] %}
|
||||
{% set node_type = minionid.split('_')[1] %}
|
||||
{% set hostname = minionid.split('_') | first %}
|
||||
{% set node_type = minionid.split('_') | last %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
|
||||
34
pillar/redis/nodes.sls
Normal file
34
pillar/redis/nodes.sls
Normal file
@@ -0,0 +1,34 @@
|
||||
{% set node_types = {} %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='redis:enabled:true',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='pillar') | dictsort()
|
||||
%}
|
||||
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% set hostname = minionid.split('_') | first %}
|
||||
{% set node_type = minionid.split('_') | last %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
redis:
|
||||
nodes:
|
||||
{% for node_type, values in node_types.items() %}
|
||||
{{node_type}}:
|
||||
{% for hostname, ip in values.items() %}
|
||||
{{hostname}}:
|
||||
ip: {{ip}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
@@ -47,10 +47,12 @@ base:
|
||||
- kibana.adv_kibana
|
||||
- kratos.soc_kratos
|
||||
- kratos.adv_kratos
|
||||
- redis.nodes
|
||||
- redis.soc_redis
|
||||
- redis.adv_redis
|
||||
- influxdb.soc_influxdb
|
||||
- influxdb.adv_influxdb
|
||||
- elasticsearch.nodes
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
- elasticfleet.soc_elasticfleet
|
||||
@@ -61,6 +63,9 @@ base:
|
||||
- backup.adv_backup
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
- stig.soc_stig
|
||||
|
||||
'*_sensor':
|
||||
@@ -144,10 +149,12 @@ base:
|
||||
- idstools.adv_idstools
|
||||
- kratos.soc_kratos
|
||||
- kratos.adv_kratos
|
||||
- redis.nodes
|
||||
- redis.soc_redis
|
||||
- redis.adv_redis
|
||||
- influxdb.soc_influxdb
|
||||
- influxdb.adv_influxdb
|
||||
- elasticsearch.nodes
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
- elasticfleet.soc_elasticfleet
|
||||
@@ -176,6 +183,9 @@ base:
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
|
||||
'*_heavynode':
|
||||
- elasticsearch.auth
|
||||
@@ -209,17 +219,22 @@ base:
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- elasticsearch.nodes
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- redis.nodes
|
||||
- redis.soc_redis
|
||||
- redis.adv_redis
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
|
||||
'*_receiver':
|
||||
- logstash.nodes
|
||||
@@ -232,6 +247,10 @@ base:
|
||||
- redis.adv_redis
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
- soc.license
|
||||
|
||||
'*_import':
|
||||
- secrets
|
||||
|
||||
12
pyci.sh
12
pyci.sh
@@ -15,12 +15,16 @@ TARGET_DIR=${1:-.}
|
||||
|
||||
PATH=$PATH:/usr/local/bin
|
||||
|
||||
if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
|
||||
echo "Missing dependencies. Consider running the following command:"
|
||||
echo " python -m pip install flake8 pytest pytest-cov"
|
||||
if [ ! -d .venv ]; then
|
||||
python -m venv .venv
|
||||
fi
|
||||
|
||||
source .venv/bin/activate
|
||||
|
||||
if ! pip install flake8 pytest pytest-cov pyyaml; then
|
||||
echo "Unable to install dependencies."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
pip install pytest pytest-cov
|
||||
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
|
||||
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"
|
||||
@@ -103,7 +103,8 @@
|
||||
'utility',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-managersearch': [
|
||||
'salt.master',
|
||||
@@ -125,7 +126,8 @@
|
||||
'utility',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-searchnode': [
|
||||
'ssl',
|
||||
@@ -134,7 +136,9 @@
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
'stig',
|
||||
'kafka.ca',
|
||||
'kafka.ssl'
|
||||
],
|
||||
'so-standalone': [
|
||||
'salt.master',
|
||||
@@ -159,7 +163,8 @@
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-sensor': [
|
||||
'ssl',
|
||||
@@ -190,7 +195,9 @@
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'kafka',
|
||||
'stig'
|
||||
],
|
||||
'so-desktop': [
|
||||
'ssl',
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
mine_functions:
|
||||
x509.get_pem_entries: [/etc/pki/ca.crt]
|
||||
|
||||
x509_signing_policies:
|
||||
filebeat:
|
||||
- minions: '*'
|
||||
@@ -70,3 +67,17 @@ x509_signing_policies:
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
kafka:
|
||||
- minions: '*'
|
||||
- signing_private_key: /etc/pki/ca.key
|
||||
- signing_cert: /etc/pki/ca.crt
|
||||
- C: US
|
||||
- ST: Utah
|
||||
- L: Salt Lake City
|
||||
- basicConstraints: "critical CA:false"
|
||||
- keyUsage: "digitalSignature, keyEncipherment"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- extendedKeyUsage: "serverAuth, clientAuth"
|
||||
- days_valid: 820
|
||||
- copypath: /etc/pki/issued_certs/
|
||||
|
||||
@@ -14,6 +14,11 @@ net.core.wmem_default:
|
||||
sysctl.present:
|
||||
- value: 26214400
|
||||
|
||||
# Users are not a fan of console messages
|
||||
kernel.printk:
|
||||
sysctl.present:
|
||||
- value: "3 4 1 3"
|
||||
|
||||
# Remove variables.txt from /tmp - This is temp
|
||||
rmvariablesfile:
|
||||
file.absent:
|
||||
|
||||
@@ -57,6 +57,12 @@ copy_so-yaml_manager_tools_sbin:
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_so-repo-sync_manager_tools_sbin:
|
||||
file.copy:
|
||||
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-repo-sync
|
||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
|
||||
- preserve: True
|
||||
|
||||
# This section is used to put the new script in place so that it can be called during soup.
|
||||
# It is faster than calling the states that normally manage them to put them in place.
|
||||
copy_so-common_sbin:
|
||||
@@ -94,6 +100,13 @@ copy_so-yaml_sbin:
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_so-repo-sync_sbin:
|
||||
file.copy:
|
||||
- name: /usr/sbin/so-repo-sync
|
||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
{% else %}
|
||||
fix_23_soup_sbin:
|
||||
cmd.run:
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# Elastic agent is not managed by salt. Because of this we must store this base information in a
|
||||
# script that accompanies the soup system. Since so-common is one of those special soup files,
|
||||
# and since this same logic is required during installation, it's included in this file.
|
||||
ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
|
||||
ELASTIC_AGENT_TARBALL_VERSION="8.14.3"
|
||||
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
|
||||
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
@@ -31,6 +31,11 @@ if ! echo "$PATH" | grep -q "/usr/sbin"; then
|
||||
export PATH="$PATH:/usr/sbin"
|
||||
fi
|
||||
|
||||
# See if a proxy is set. If so use it.
|
||||
if [ -f /etc/profile.d/so-proxy.sh ]; then
|
||||
. /etc/profile.d/so-proxy.sh
|
||||
fi
|
||||
|
||||
# Define a banner to separate sections
|
||||
banner="========================================================================="
|
||||
|
||||
|
||||
@@ -50,6 +50,7 @@ container_list() {
|
||||
"so-idh"
|
||||
"so-idstools"
|
||||
"so-influxdb"
|
||||
"so-kafka"
|
||||
"so-kibana"
|
||||
"so-kratos"
|
||||
"so-logstash"
|
||||
|
||||
@@ -95,6 +95,8 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|shutdown process" # server not yet ready (logstash waiting on elastic)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|contain valid certificates" # server not yet ready (logstash waiting on elastic)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failedaction" # server not yet ready (logstash waiting on elastic)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in start_workers" # server not yet ready (logstash waiting on elastic)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|block in buffer_initialize" # server not yet ready (logstash waiting on elastic)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no route to host" # server not yet ready
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not running" # server not yet ready
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unavailable" # server not yet ready
|
||||
@@ -147,6 +149,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncing rule" # false positive (rule sync log line includes rule name which can contain 'error')
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
@@ -170,6 +173,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cannot join on an empty table" # InfluxDB flux query, import nodes
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exhausting result iterator" # InfluxDB flux query mismatched table results (temporary data issue)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to finish run" # InfluxDB rare error, self-recoverable
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to gather disk name" # InfluxDB known error, can't read disks because the container doesn't have them mounted
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration"
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets"
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed"
|
||||
@@ -205,6 +209,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|detect-parse" # Suricata encountering a malformed rule
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integrity check failed" # Detections: Exclude false positive due to automated testing
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|syncErrors" # Detections: Not an actual error
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
|
||||
fi
|
||||
|
||||
RESULT=0
|
||||
@@ -241,6 +246,7 @@ exclude_log "mysqld.log" # MySQL is removed as of 2.4.70, logs may still be on
|
||||
exclude_log "soctopus.log" # Soctopus is removed as of 2.4.70, logs may still be on disk
|
||||
exclude_log "agentstatus.log" # ignore this log since it tracks agents in error state
|
||||
exclude_log "detections_runtime-status_yara.log" # temporarily ignore this log until Detections is more stable
|
||||
exclude_log "/nsm/kafka/data/" # ignore Kafka data directory from log check.
|
||||
|
||||
for log_file in $(cat /tmp/log_check_files); do
|
||||
status "Checking log file $log_file"
|
||||
|
||||
@@ -9,6 +9,9 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02")
|
||||
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
|
||||
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
{%- set model = salt['grains.get']('sosmodel') %}
|
||||
model={{ model }}
|
||||
@@ -16,25 +19,35 @@ model={{ model }}
|
||||
if [[ $model =~ ^(SO2AMI01|SO2AZI01|SO2GCI01)$ ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
for i in "${software_raid[@]}"; do
|
||||
if [[ "$model" == $i ]]; then
|
||||
is_softwareraid=true
|
||||
is_hwraid=false
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
for i in "${hardware_raid[@]}"; do
|
||||
if [[ "$model" == $i ]]; then
|
||||
is_softwareraid=false
|
||||
is_hwraid=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
{%- else %}
|
||||
echo "This is not an appliance"
|
||||
exit 0
|
||||
{%- endif %}
|
||||
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200|SOSSNNV|SOSMN)$ ]]; then
|
||||
is_bossraid=true
|
||||
fi
|
||||
if [[ $model =~ ^(SOSSNNV|SOSMN)$ ]]; then
|
||||
is_swraid=true
|
||||
fi
|
||||
if [[ $model =~ ^(SOS10K|SOS500|SOS1000|SOS1000F|SOS4000|SOSSN7200)$ ]]; then
|
||||
is_hwraid=true
|
||||
fi
|
||||
|
||||
check_nsm_raid() {
|
||||
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
|
||||
MEGACTL=$(/opt/raidtools/megasasctl |grep optimal)
|
||||
|
||||
if [[ $APPLIANCE == '1' ]]; then
|
||||
if [[ "$model" == "SOS500" || "$model" == "SOS500-DE02" ]]; then
|
||||
#This doesn't have raid
|
||||
HWRAID=0
|
||||
else
|
||||
if [[ -n $PERCCLI ]]; then
|
||||
HWRAID=0
|
||||
elif [[ -n $MEGACTL ]]; then
|
||||
@@ -42,7 +55,6 @@ check_nsm_raid() {
|
||||
else
|
||||
HWRAID=1
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
@@ -50,17 +62,27 @@ check_nsm_raid() {
|
||||
check_boss_raid() {
|
||||
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
|
||||
MVTEST=$(/usr/local/bin/mvcli info -o vd | grep "No adapter")
|
||||
BOSSNVMECLI=$(/usr/local/bin/mnv_cli info -o vd -i 0 | grep Functional)
|
||||
|
||||
# Check to see if this is a SM based system
|
||||
if [[ -z $MVTEST ]]; then
|
||||
if [[ -n $MVCLI ]]; then
|
||||
# Is this NVMe Boss Raid?
|
||||
if [[ "$model" =~ "-DE02" ]]; then
|
||||
if [[ -n $BOSSNVMECLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
else
|
||||
# This doesn't have boss raid so lets make it 0
|
||||
BOSSRAID=0
|
||||
# Check to see if this is a SM based system
|
||||
if [[ -z $MVTEST ]]; then
|
||||
if [[ -n $MVCLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
else
|
||||
# This doesn't have boss raid so lets make it 0
|
||||
BOSSRAID=0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -79,14 +101,13 @@ SWRAID=0
|
||||
BOSSRAID=0
|
||||
HWRAID=0
|
||||
|
||||
if [[ $is_hwraid ]]; then
|
||||
if [[ "$is_hwraid" == "true" ]]; then
|
||||
check_nsm_raid
|
||||
check_boss_raid
|
||||
fi
|
||||
if [[ $is_bossraid ]]; then
|
||||
check_boss_raid
|
||||
fi
|
||||
if [[ $is_swraid ]]; then
|
||||
if [[ "$is_softwareraid" == "true" ]]; then
|
||||
check_software_raid
|
||||
check_boss_raid
|
||||
fi
|
||||
|
||||
sum=$(($SWRAID + $BOSSRAID + $HWRAID))
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
. /usr/sbin/so-common
|
||||
. /usr/sbin/so-image-common
|
||||
|
||||
REPLAYIFACE=${REPLAYIFACE:-$(lookup_pillar interface sensor)}
|
||||
REPLAYIFACE=${REPLAYIFACE:-"{{salt['pillar.get']('sensor:interface', '')}}"}
|
||||
REPLAYSPEED=${REPLAYSPEED:-10}
|
||||
|
||||
mkdir -p /opt/so/samples
|
||||
@@ -187,3 +187,12 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-kafka':
|
||||
final_octet: 88
|
||||
port_bindings:
|
||||
- 0.0.0.0:9092:9092
|
||||
- 0.0.0.0:9093:9093
|
||||
- 0.0.0.0:8778:8778
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
|
||||
@@ -20,30 +20,30 @@ dockergroup:
|
||||
dockerheldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.6.21-1
|
||||
- docker-ce: 5:24.0.3-1~debian.12~bookworm
|
||||
- docker-ce-cli: 5:24.0.3-1~debian.12~bookworm
|
||||
- docker-ce-rootless-extras: 5:24.0.3-1~debian.12~bookworm
|
||||
- containerd.io: 1.6.33-1
|
||||
- docker-ce: 5:26.1.4-1~debian.12~bookworm
|
||||
- docker-ce-cli: 5:26.1.4-1~debian.12~bookworm
|
||||
- docker-ce-rootless-extras: 5:26.1.4-1~debian.12~bookworm
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% elif grains.oscodename == 'jammy' %}
|
||||
dockerheldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.6.21-1
|
||||
- docker-ce: 5:24.0.2-1~ubuntu.22.04~jammy
|
||||
- docker-ce-cli: 5:24.0.2-1~ubuntu.22.04~jammy
|
||||
- docker-ce-rootless-extras: 5:24.0.2-1~ubuntu.22.04~jammy
|
||||
- containerd.io: 1.6.33-1
|
||||
- docker-ce: 5:26.1.4-1~ubuntu.22.04~jammy
|
||||
- docker-ce-cli: 5:26.1.4-1~ubuntu.22.04~jammy
|
||||
- docker-ce-rootless-extras: 5:26.1.4-1~ubuntu.22.04~jammy
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% else %}
|
||||
dockerheldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.4.9-1
|
||||
- docker-ce: 5:20.10.8~3-0~ubuntu-focal
|
||||
- docker-ce-cli: 5:20.10.5~3-0~ubuntu-focal
|
||||
- docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-focal
|
||||
- containerd.io: 1.6.33-1
|
||||
- docker-ce: 5:26.1.4-1~ubuntu.20.04~focal
|
||||
- docker-ce-cli: 5:26.1.4-1~ubuntu.20.04~focal
|
||||
- docker-ce-rootless-extras: 5:26.1.4-1~ubuntu.20.04~focal
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% endif %}
|
||||
@@ -51,10 +51,10 @@ dockerheldpackages:
|
||||
dockerheldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.6.21-3.1.el9
|
||||
- docker-ce: 24.0.4-1.el9
|
||||
- docker-ce-cli: 24.0.4-1.el9
|
||||
- docker-ce-rootless-extras: 24.0.4-1.el9
|
||||
- containerd.io: 1.6.33-3.1.el9
|
||||
- docker-ce: 3:26.1.4-1.el9
|
||||
- docker-ce-cli: 1:26.1.4-1.el9
|
||||
- docker-ce-rootless-extras: 26.1.4-1.el9
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% endif %}
|
||||
|
||||
@@ -101,3 +101,4 @@ docker:
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
so-zeek: *dockerOptions
|
||||
so-kafka: *dockerOptions
|
||||
@@ -3,8 +3,8 @@ elastalert:
|
||||
description: You can enable or disable Elastalert.
|
||||
helpLink: elastalert.html
|
||||
alerter_parameters:
|
||||
title: Alerter Parameters
|
||||
description: Optional configuration parameters for additional alerters that can be enabled for all Sigma rules. Filter for 'Alerter' in this Configuration screen to find the setting that allows these alerters to be enabled within the SOC ElastAlert module. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available alerters and their required configuration parameters. A full update of the ElastAlert rule engine, via the Detections screen, is required in order to apply these changes. Requires a valid Security Onion license key.
|
||||
title: Custom Configuration Parameters
|
||||
description: Optional configuration parameters made available as defaults for all rules and alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available configuration parameters. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
multiline: True
|
||||
syntax: yaml
|
||||
|
||||
@@ -97,6 +97,7 @@ elasticfleet:
|
||||
- symantec_endpoint
|
||||
- system
|
||||
- tcp
|
||||
- tenable_io
|
||||
- tenable_sc
|
||||
- ti_abusech
|
||||
- ti_anomali
|
||||
|
||||
@@ -27,7 +27,9 @@ wait_for_elasticsearch_elasticfleet:
|
||||
so-elastic-fleet-auto-configure-logstash-outputs:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-outputs-update
|
||||
- retry: True
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
{% endif %}
|
||||
|
||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||
@@ -35,7 +37,9 @@ so-elastic-fleet-auto-configure-logstash-outputs:
|
||||
so-elastic-fleet-auto-configure-server-urls:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-urls-update
|
||||
- retry: True
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
{% endif %}
|
||||
|
||||
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
|
||||
@@ -43,12 +47,16 @@ so-elastic-fleet-auto-configure-server-urls:
|
||||
so-elastic-fleet-auto-configure-elasticsearch-urls:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-es-url-update
|
||||
- retry: True
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
|
||||
so-elastic-fleet-auto-configure-artifact-urls:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
|
||||
- retry: True
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -0,0 +1,19 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "fleet_server",
|
||||
"version": ""
|
||||
},
|
||||
"name": "fleet_server-1",
|
||||
"namespace": "default",
|
||||
"policy_id": "FleetServer_hostname",
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"fleet_server-fleet-server": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"custom": "server.ssl.supported_protocols: [\"TLSv1.2\", \"TLSv1.3\"]\nserver.ssl.cipher_suites: [ \"ECDHE-RSA-AES-128-GCM-SHA256\", \"ECDHE-RSA-AES-256-GCM-SHA384\", \"ECDHE-RSA-AES-128-CBC-SHA\", \"ECDHE-RSA-AES-256-CBC-SHA\", \"RSA-AES-128-GCM-SHA256\", \"RSA-AES-256-GCM-SHA384\"]"
|
||||
},
|
||||
"streams": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,7 +5,7 @@
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.10.2"
|
||||
"version": "8.14.0"
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_id": "endpoints-initial",
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"winlogs-winlog": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"winlog.winlog": {
|
||||
"winlog.winlogs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"channel": "Microsoft-Windows-Windows Defender/Operational",
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"custom": "",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.43.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-1.38.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.43.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.43.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-1.38.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.59.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-1.45.1\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.59.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.59.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-1.45.1\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"tags": [
|
||||
"import"
|
||||
]
|
||||
|
||||
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
# Get all the fleet policies
|
||||
json_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true')
|
||||
|
||||
# Extract the IDs that start with "FleetServer_"
|
||||
POLICY=$(echo "$json_output" | jq -r '.items[] | select(.id | startswith("FleetServer_")) | .id')
|
||||
|
||||
# Iterate over each ID in the POLICY variable
|
||||
for POLICYNAME in $POLICY; do
|
||||
printf "\nUpdating Policy: $POLICYNAME\n"
|
||||
|
||||
# First get the Integration ID
|
||||
INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$POLICYNAME" | jq -r '.item.package_policies[] | select(.package.name == "fleet_server") | .id')
|
||||
|
||||
# Modify the default integration policy to update the policy_id and an with the correct naming
|
||||
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "$POLICYNAME" --arg name "fleet_server-$POLICYNAME" '
|
||||
.policy_id = $policy_id |
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Now update the integration policy using the modified JSON
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
|
||||
done
|
||||
@@ -12,7 +12,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
# First, check for any package upgrades
|
||||
/usr/sbin/so-elastic-fleet-package-upgrade
|
||||
|
||||
# Second, configure Elastic Defend Integration seperately
|
||||
# Second, update Fleet Server policies
|
||||
/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
|
||||
|
||||
# Third, configure Elastic Defend Integration seperately
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
||||
|
||||
# Initial Endpoints
|
||||
|
||||
@@ -19,7 +19,7 @@ NUM_RUNNING=$(pgrep -cf "/bin/bash /sbin/so-elastic-agent-gen-installers")
|
||||
|
||||
for i in {1..30}
|
||||
do
|
||||
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys?perPage=100" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||
FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',')
|
||||
if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi
|
||||
done
|
||||
|
||||
@@ -21,64 +21,104 @@ function update_logstash_outputs() {
|
||||
# Update Logstash Outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
}
|
||||
function update_kafka_outputs() {
|
||||
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
|
||||
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
|
||||
|
||||
# Get current list of Logstash Outputs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
}
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
|
||||
printf "Failed to query for current Logstash Outputs..."
|
||||
exit 1
|
||||
fi
|
||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||
# Get current list of Kafka Outputs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
|
||||
|
||||
# Get the current list of Logstash outputs & hash them
|
||||
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
||||
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
if [ "$CHECKSUM" != "so-manager_kafka" ]; then
|
||||
printf "Failed to query for current Kafka Outputs..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
declare -a NEW_LIST=()
|
||||
# Get the current list of kafka outputs & hash them
|
||||
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
||||
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
|
||||
declare -a NEW_LIST=()
|
||||
|
||||
# Query for the current Grid Nodes that are running kafka
|
||||
KAFKANODES=$(salt-call --out=json pillar.get kafka:nodes | jq '.local')
|
||||
|
||||
# Query for Kafka nodes with Broker role and add hostname to list
|
||||
while IFS= read -r line; do
|
||||
NEW_LIST+=("$line")
|
||||
done < <(jq -r 'to_entries | .[] | select(.value.role | contains("broker")) | .key + ":9092"' <<< $KAFKANODES)
|
||||
|
||||
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
|
||||
{% else %}
|
||||
# Get current list of Logstash Outputs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
|
||||
printf "Failed to query for current Logstash Outputs..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get the current list of Logstash outputs & hash them
|
||||
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
||||
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
|
||||
declare -a NEW_LIST=()
|
||||
|
||||
{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
|
||||
{% if ELASTICFLEETMERGED.enable_manager_output %}
|
||||
# Create array & add initial elements
|
||||
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
|
||||
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
|
||||
else
|
||||
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
|
||||
fi
|
||||
{% endif %}
|
||||
|
||||
# Query for FQDN entries & add them to the list
|
||||
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
|
||||
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
|
||||
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
|
||||
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
|
||||
do
|
||||
NEW_LIST+=("$CUSTOMNAME:5055")
|
||||
done
|
||||
{% endif %}
|
||||
|
||||
# Query for the current Grid Nodes that are running Logstash
|
||||
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
|
||||
|
||||
# Query for Receiver Nodes & add them to the list
|
||||
if grep -q "receiver" <<< $LOGSTASHNODES; then
|
||||
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
|
||||
for NODE in "${RECEIVERNODES[@]}"
|
||||
do
|
||||
NEW_LIST+=("$NODE:5055")
|
||||
done
|
||||
fi
|
||||
|
||||
# Query for Fleet Nodes & add them to the list
|
||||
if grep -q "fleet" <<< $LOGSTASHNODES; then
|
||||
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
|
||||
for NODE in "${FLEETNODES[@]}"
|
||||
do
|
||||
NEW_LIST+=("$NODE:5055")
|
||||
done
|
||||
fi
|
||||
|
||||
{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
|
||||
{% if ELASTICFLEETMERGED.enable_manager_output %}
|
||||
# Create array & add initial elements
|
||||
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
|
||||
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
|
||||
else
|
||||
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
|
||||
fi
|
||||
{% endif %}
|
||||
|
||||
# Query for FQDN entries & add them to the list
|
||||
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
|
||||
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
|
||||
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
|
||||
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
|
||||
do
|
||||
NEW_LIST+=("$CUSTOMNAME:5055")
|
||||
done
|
||||
{% endif %}
|
||||
|
||||
# Query for the current Grid Nodes that are running Logstash
|
||||
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
|
||||
|
||||
# Query for Receiver Nodes & add them to the list
|
||||
if grep -q "receiver" <<< $LOGSTASHNODES; then
|
||||
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
|
||||
for NODE in "${RECEIVERNODES[@]}"
|
||||
do
|
||||
NEW_LIST+=("$NODE:5055")
|
||||
done
|
||||
fi
|
||||
|
||||
# Query for Fleet Nodes & add them to the list
|
||||
if grep -q "fleet" <<< $LOGSTASHNODES; then
|
||||
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
|
||||
for NODE in "${FLEETNODES[@]}"
|
||||
do
|
||||
NEW_LIST+=("$NODE:5055")
|
||||
done
|
||||
fi
|
||||
|
||||
# Sort & hash the new list of Logstash Outputs
|
||||
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
||||
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||
@@ -87,9 +127,28 @@ NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
||||
printf "\nHashes match - no update needed.\n"
|
||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||
|
||||
# Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed
|
||||
printf "Checking if the correct output policy is set as default\n"
|
||||
OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON)
|
||||
OUTPUT_DEFAULT_MONITORING=$(jq -r '.item.is_default_monitoring' <<< $RAW_JSON)
|
||||
if [[ "$OUTPUT_DEFAULT" = "false" || "$OUTPUT_DEFAULT_MONITORING" = "false" ]]; then
|
||||
printf "Default output policy needs to be updated.\n"
|
||||
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
|
||||
update_kafka_outputs
|
||||
{%- else %}
|
||||
update_logstash_outputs
|
||||
{%- endif %}
|
||||
else
|
||||
printf "Default output policy is set - no update needed.\n"
|
||||
fi
|
||||
exit 0
|
||||
else
|
||||
printf "\nHashes don't match - update needed.\n"
|
||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
|
||||
update_kafka_outputs
|
||||
{%- else %}
|
||||
update_logstash_outputs
|
||||
{%- endif %}
|
||||
fi
|
||||
|
||||
@@ -53,7 +53,8 @@ fi
|
||||
printf "\n### Create ES Token ###\n"
|
||||
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
|
||||
|
||||
### Create Outputs & Fleet URLs ###
|
||||
### Create Outputs, Fleet Policy and Fleet URLs ###
|
||||
# Create the Manager Elasticsearch Output first and set it as the default output
|
||||
printf "\nAdd Manager Elasticsearch Output...\n"
|
||||
ESCACRT=$(openssl x509 -in $INTCA)
|
||||
JSON_STRING=$( jq -n \
|
||||
@@ -62,7 +63,21 @@ JSON_STRING=$( jq -n \
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
printf "\n\n"
|
||||
|
||||
printf "\nCreate Logstash Output Config if node is not an Import or Eval install\n"
|
||||
# Create the Manager Fleet Server Host Agent Policy
|
||||
# This has to be done while the Elasticsearch Output is set to the default Output
|
||||
printf "Create Manager Fleet Server Policy...\n"
|
||||
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
|
||||
|
||||
# Modify the default integration policy to update the policy_id with the correct naming
|
||||
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
|
||||
.policy_id = $policy_id |
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Add the Fleet Server Integration to the new Fleet Policy
|
||||
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
|
||||
|
||||
# Now we can create the Logstash Output and set it to to be the default Output
|
||||
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
|
||||
{% if grains.role not in ['so-import', 'so-eval'] %}
|
||||
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
|
||||
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
|
||||
@@ -77,6 +92,11 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl
|
||||
printf "\n\n"
|
||||
{%- endif %}
|
||||
|
||||
printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n"
|
||||
{% if grains.role not in ['so-import', 'so-eval'] %}
|
||||
/usr/sbin/so-kafka-fleet-output-policy
|
||||
{% endif %}
|
||||
|
||||
# Add Manager Hostname & URL Base to Fleet Host URLs
|
||||
printf "\nAdd SO-Manager Fleet URL\n"
|
||||
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
|
||||
@@ -96,16 +116,6 @@ printf "\n\n"
|
||||
# Load Elasticsearch templates
|
||||
/usr/sbin/so-elasticsearch-templates-load
|
||||
|
||||
# Manager Fleet Server Host
|
||||
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "true" "120"
|
||||
|
||||
#Temp Fixup for ES Output bug
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg NAME "FleetServer_{{ GLOBALS.hostname }}" \
|
||||
'{"name": $NAME,"description": $NAME,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":120,"data_output_id":"so-manager_elasticsearch"}'
|
||||
)
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/FleetServer_{{ GLOBALS.hostname }}" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
|
||||
# Initial Endpoints Policy
|
||||
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
|
||||
|
||||
|
||||
@@ -0,0 +1,52 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
# Check to make sure that Kibana API is up & ready
|
||||
RETURN_CODE=0
|
||||
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
||||
RETURN_CODE=$?
|
||||
|
||||
if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
|
||||
if ! echo "$output" | grep -q "so-manager_kafka"; then
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-securityonion","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
|
||||
)
|
||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
|
||||
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
|
||||
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
fi
|
||||
|
||||
elif echo "$output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
|
||||
fi
|
||||
{% else %}
|
||||
echo -e "\nNo update required...\n"
|
||||
{% endif %}
|
||||
@@ -4,7 +4,7 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA
|
||||
|
||||
@@ -1,23 +1,37 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS with context %}
|
||||
|
||||
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
|
||||
|
||||
{# ES_LOGSTASH_NODES is the same as LOGSTASH_NODES from logstash/map.jinja but heavynodes and fleet nodes are removed #}
|
||||
{% set ES_LOGSTASH_NODES = [] %}
|
||||
{% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{# this is a list of dicts containing hostname:ip for elasticsearch nodes that need to know about each other for cluster #}
|
||||
{% set ELASTICSEARCH_SEED_HOSTS = [] %}
|
||||
{% set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{% for node_type, node_details in node_data.items() | sort %}
|
||||
{% if node_type not in ['heavynode', 'fleet'] %}
|
||||
{% if node_type != 'heavynode' %}
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% do ES_LOGSTASH_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% do ELASTICSEARCH_SEED_HOSTS.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# this is a list of dicts containing hostname:ip of all nodes running elasticsearch #}
|
||||
{% set ELASTICSEARCH_NODES = [] %}
|
||||
{% set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{% for node_type, node_details in node_data.items() %}
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% do ELASTICSEARCH_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %}
|
||||
{% if ES_LOGSTASH_NODES | length > 1 %}
|
||||
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
|
||||
{% for NODE in ES_LOGSTASH_NODES %}
|
||||
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
@@ -118,6 +118,11 @@ esingestconf:
|
||||
- user: 930
|
||||
- group: 939
|
||||
|
||||
# Remove .fleet_final_pipeline-1 because we are using global@custom now
|
||||
so-fleet-final-pipeline-remove:
|
||||
file.absent:
|
||||
- name: /opt/so/conf/elasticsearch/ingest/.fleet_final_pipeline-1
|
||||
|
||||
# Auto-generate Elasticsearch ingest node pipelines from pillar
|
||||
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
|
||||
es_ingest_conf_{{pipeline}}:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -7,8 +7,8 @@
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'logstash/map.jinja' import LOGSTASH_NODES %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ES_LOGSTASH_NODES %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
||||
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
|
||||
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
|
||||
@@ -27,7 +27,7 @@ so-elasticsearch:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }}
|
||||
- extra_hosts:
|
||||
{% for node in LOGSTASH_NODES %}
|
||||
{% for node in ELASTICSEARCH_NODES %}
|
||||
{% for hostname, ip in node.items() %}
|
||||
- {{hostname}}:{{ip}}
|
||||
{% endfor %}
|
||||
@@ -38,7 +38,7 @@ so-elasticsearch:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- environment:
|
||||
{% if ES_LOGSTASH_NODES | length == 1 or GLOBALS.role == 'so-heavynode' %}
|
||||
{% if ELASTICSEARCH_SEED_HOSTS | length == 1 or GLOBALS.role == 'so-heavynode' %}
|
||||
- discovery.type=single-node
|
||||
{% endif %}
|
||||
- ES_JAVA_OPTS=-Xms{{ GLOBALS.elasticsearch.es_heap }} -Xmx{{ GLOBALS.elasticsearch.es_heap }} -Des.transport.cname_in_publish_address=true -Dlog4j2.formatMsgNoLookups=true
|
||||
|
||||
@@ -62,6 +62,7 @@
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "dataset_tag_temp" } },
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
|
||||
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
{%- endraw %}
|
||||
{%- if HIGHLANDER %}
|
||||
@@ -73,6 +74,8 @@
|
||||
}
|
||||
{%- endif %}
|
||||
{%- raw %}
|
||||
,
|
||||
{ "pipeline": { "name": "global@custom", "ignore_missing_pipeline": true, "description": "[Fleet] Global pipeline for all data streams" } }
|
||||
]
|
||||
}
|
||||
{% endraw %}
|
||||
|
||||
@@ -1,106 +0,0 @@
|
||||
{
|
||||
"version": 3,
|
||||
"_meta": {
|
||||
"managed_by": "fleet",
|
||||
"managed": true
|
||||
},
|
||||
"description": "Final pipeline for processing all incoming Fleet Agent documents. \n",
|
||||
"processors": [
|
||||
{
|
||||
"date": {
|
||||
"description": "Add time when event was ingested (and remove sub-seconds to improve storage efficiency)",
|
||||
"tag": "truncate-subseconds-event-ingested",
|
||||
"field": "_ingest.timestamp",
|
||||
"target_field": "event.ingested",
|
||||
"formats": [
|
||||
"ISO8601"
|
||||
],
|
||||
"output_format": "date_time_no_millis",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"description": "Remove any pre-existing untrusted values.",
|
||||
"field": [
|
||||
"event.agent_id_status",
|
||||
"_security"
|
||||
],
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set_security_user": {
|
||||
"field": "_security",
|
||||
"properties": [
|
||||
"authentication_type",
|
||||
"username",
|
||||
"realm",
|
||||
"api_key"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"description": "Add event.agent_id_status based on the API key metadata and the agent.id contained in the event.\n",
|
||||
"tag": "agent-id-status",
|
||||
"source": "boolean is_user_trusted(def ctx, def users) {\n if (ctx?._security?.username == null) {\n return false;\n }\n\n def user = null;\n for (def item : users) {\n if (item?.username == ctx._security.username) {\n user = item;\n break;\n }\n }\n\n if (user == null || user?.realm == null || ctx?._security?.realm?.name == null) {\n return false;\n }\n\n if (ctx._security.realm.name != user.realm) {\n return false;\n }\n\n return true;\n}\n\nString verified(def ctx, def params) {\n // No agent.id field to validate.\n if (ctx?.agent?.id == null) {\n return \"missing\";\n }\n\n // Check auth metadata from API key.\n if (ctx?._security?.authentication_type == null\n // Agents only use API keys.\n || ctx._security.authentication_type != 'API_KEY'\n // Verify the API key owner before trusting any metadata it contains.\n || !is_user_trusted(ctx, params.trusted_users)\n // Verify the API key has metadata indicating the assigned agent ID.\n || ctx?._security?.api_key?.metadata?.agent_id == null) {\n return \"auth_metadata_missing\";\n }\n\n // The API key can only be used represent the agent.id it was issued to.\n if (ctx._security.api_key.metadata.agent_id != ctx.agent.id) {\n // Potential masquerade attempt.\n return \"mismatch\";\n }\n\n return \"verified\";\n}\n\nif (ctx?.event == null) {\n ctx.event = [:];\n}\n\nctx.event.agent_id_status = verified(ctx, params);",
|
||||
"params": {
|
||||
"trusted_users": [
|
||||
{
|
||||
"username": "elastic/fleet-server",
|
||||
"realm": "_service_account"
|
||||
},
|
||||
{
|
||||
"username": "cloud-internal-agent-server",
|
||||
"realm": "found"
|
||||
},
|
||||
{
|
||||
"username": "elastic",
|
||||
"realm": "reserved"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": "_security",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
|
||||
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
|
||||
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}" } },
|
||||
{ "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
|
||||
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
||||
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
|
||||
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
|
||||
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
|
||||
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
],
|
||||
"on_failure": [
|
||||
{
|
||||
"remove": {
|
||||
"field": "_security",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "error.message",
|
||||
"value": [
|
||||
"failed in Fleet agent final_pipeline: {{ _ingest.on_failure_message }}"
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
27
salt/elasticsearch/files/ingest/global@custom
Normal file
27
salt/elasticsearch/files/ingest/global@custom
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"version": 3,
|
||||
"_meta": {
|
||||
"managed_by": "securityonion",
|
||||
"managed": true
|
||||
},
|
||||
"description": "Custom pipeline for processing all incoming Fleet Agent documents. \n",
|
||||
"processors": [
|
||||
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
|
||||
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
|
||||
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp}}" } },
|
||||
{ "set": { "if": "ctx.network?.direction == 'egress'", "override": true, "field": "network.initiated", "value": "true" } },
|
||||
{ "set": { "if": "ctx.network?.direction == 'ingress'", "override": true, "field": "network.initiated", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv4'", "override": true, "field": "destination.ipv6", "value": "false" } },
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
||||
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } },
|
||||
{ "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } },
|
||||
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
|
||||
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
]
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
{
|
||||
"description" : "suricata.alert",
|
||||
"processors" : [
|
||||
{ "set": { "field": "_index", "value": "logs-suricata.alerts-so" } },
|
||||
{ "set": { "field": "tags","value": "alert" }},
|
||||
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
|
||||
|
||||
@@ -466,6 +466,13 @@ elasticsearch:
|
||||
so-logs-sonicwall_firewall_x_log: *indexSettings
|
||||
so-logs-snort_x_log: *indexSettings
|
||||
so-logs-symantec_endpoint_x_log: *indexSettings
|
||||
so-logs-tenable_io_x_asset: *indexSettings
|
||||
so-logs-tenable_io_x_plugin: *indexSettings
|
||||
so-logs-tenable_io_x_scan: *indexSettings
|
||||
so-logs-tenable_io_x_vulnerability: *indexSettings
|
||||
so-logs-tenable_sc_x_asset: *indexSettings
|
||||
so-logs-tenable_sc_x_plugin: *indexSettings
|
||||
so-logs-tenable_sc_x_vulnerability: *indexSettings
|
||||
so-logs-ti_abusech_x_malware: *indexSettings
|
||||
so-logs-ti_abusech_x_malwarebazaar: *indexSettings
|
||||
so-logs-ti_abusech_x_threatfox: *indexSettings
|
||||
@@ -521,6 +528,7 @@ elasticsearch:
|
||||
so-endgame: *indexSettings
|
||||
so-idh: *indexSettings
|
||||
so-suricata: *indexSettings
|
||||
so-suricata_x_alerts: *indexSettings
|
||||
so-import: *indexSettings
|
||||
so-kratos: *indexSettings
|
||||
so-kismet: *indexSettings
|
||||
@@ -529,6 +537,58 @@ elasticsearch:
|
||||
so-strelka: *indexSettings
|
||||
so-syslog: *indexSettings
|
||||
so-zeek: *indexSettings
|
||||
so-metrics-fleet_server_x_agent_status: &fleetMetricsSettings
|
||||
index_sorting:
|
||||
description: Sorts the index by event time, at the cost of additional processing resource consumption.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
index_template:
|
||||
ignore_missing_component_templates:
|
||||
description: Ignore component templates if they aren't in Elasticsearch.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
index_patterns:
|
||||
description: Patterns for matching multiple indices or tables.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
template:
|
||||
settings:
|
||||
index:
|
||||
mode:
|
||||
description: Type of mode used for this index. Time series indices can be used for metrics to reduce necessary storage.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
number_of_replicas:
|
||||
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
composed_of:
|
||||
description: The index template is composed of these component templates.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
priority:
|
||||
description: The priority of the index template.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
data_stream:
|
||||
hidden:
|
||||
description: Hide the data stream.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
allow_custom_routing:
|
||||
description: Allow custom routing for the data stream.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
so-metrics-fleet_server_x_agent_versions: *fleetMetricsSettings
|
||||
so_roles:
|
||||
so-manager: &soroleSettings
|
||||
config:
|
||||
|
||||
@@ -1,3 +1,8 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
|
||||
{% set DEFAULT_GLOBAL_OVERRIDES = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings.pop('global_overrides') %}
|
||||
|
||||
@@ -17,10 +22,26 @@
|
||||
{% set ES_INDEX_SETTINGS = {} %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
|
||||
{% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %}
|
||||
{# if policy isn't defined in the original index settings, then dont merge policy from the global_overrides #}
|
||||
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
|
||||
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
|
||||
|
||||
{# prevent this action from being performed on custom defined indices. #}
|
||||
{# the custom defined index is not present in either of the dictionaries and fails to reder. #}
|
||||
{% if index in ES_INDEX_SETTINGS_ORIG and index in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES %}
|
||||
|
||||
{# dont merge policy from the global_overrides if policy isn't defined in the original index settingss #}
|
||||
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
|
||||
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
|
||||
{% endif %}
|
||||
|
||||
{# this prevents and index from inderiting a policy phase from global overrides if it wasnt defined in the defaults. #}
|
||||
{% if ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
||||
{% for phase in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.copy() %}
|
||||
{% if ES_INDEX_SETTINGS_ORIG[index].policy.phases[phase] is not defined %}
|
||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.pop(phase) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if settings.index_template is defined %}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
"name": "logs"
|
||||
},
|
||||
"codec": "best_compression",
|
||||
"default_pipeline": "logs-elastic_agent-1.13.1",
|
||||
"default_pipeline": "logs-elastic_agent-1.20.0",
|
||||
"mapping": {
|
||||
"total_fields": {
|
||||
"limit": "10000"
|
||||
|
||||
@@ -0,0 +1,201 @@
|
||||
{
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {
|
||||
"name": "metrics"
|
||||
},
|
||||
"default_pipeline": "metrics-fleet_server.agent_status-1.5.0",
|
||||
"mapping": {
|
||||
"total_fields": {
|
||||
"limit": "1000"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings": {
|
||||
"dynamic": false,
|
||||
"_source": {
|
||||
"mode": "synthetic"
|
||||
},
|
||||
"properties": {
|
||||
"cluster": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"time_series_dimension": true,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"fleet": {
|
||||
"properties": {
|
||||
"agents": {
|
||||
"properties": {
|
||||
"offline": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"total": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"updating": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"inactive": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"healthy": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"unhealthy": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"unenrolled": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"enrolled": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"unhealthy_reason": {
|
||||
"properties": {
|
||||
"output": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"input": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"other": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
},
|
||||
"upgrading_step": {
|
||||
"properties": {
|
||||
"rollback": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"requested": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"restarting": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"downloading": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"scheduled": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"extracting": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"replacing": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"failed": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"watching": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"agent": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"version": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"@timestamp": {
|
||||
"ignore_malformed": false,
|
||||
"type": "date"
|
||||
},
|
||||
"data_stream": {
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "constant_keyword"
|
||||
},
|
||||
"type": {
|
||||
"type": "constant_keyword"
|
||||
},
|
||||
"dataset": {
|
||||
"type": "constant_keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"kibana": {
|
||||
"properties": {
|
||||
"uuid": {
|
||||
"path": "agent.id",
|
||||
"type": "alias"
|
||||
},
|
||||
"version": {
|
||||
"path": "agent.version",
|
||||
"type": "alias"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"package": {
|
||||
"name": "fleet_server"
|
||||
},
|
||||
"managed_by": "fleet",
|
||||
"managed": true
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,102 @@
|
||||
{
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {
|
||||
"name": "metrics"
|
||||
},
|
||||
"default_pipeline": "metrics-fleet_server.agent_versions-1.5.0",
|
||||
"mapping": {
|
||||
"total_fields": {
|
||||
"limit": "1000"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"mappings": {
|
||||
"dynamic": false,
|
||||
"_source": {
|
||||
"mode": "synthetic"
|
||||
},
|
||||
"properties": {
|
||||
"cluster": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"time_series_dimension": true,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"fleet": {
|
||||
"properties": {
|
||||
"agent": {
|
||||
"properties": {
|
||||
"count": {
|
||||
"time_series_metric": "gauge",
|
||||
"meta": {},
|
||||
"type": "long"
|
||||
},
|
||||
"version": {
|
||||
"time_series_dimension": true,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"agent": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"version": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"@timestamp": {
|
||||
"ignore_malformed": false,
|
||||
"type": "date"
|
||||
},
|
||||
"data_stream": {
|
||||
"properties": {
|
||||
"namespace": {
|
||||
"type": "constant_keyword"
|
||||
},
|
||||
"type": {
|
||||
"type": "constant_keyword"
|
||||
},
|
||||
"dataset": {
|
||||
"type": "constant_keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"kibana": {
|
||||
"properties": {
|
||||
"uuid": {
|
||||
"path": "agent.id",
|
||||
"type": "alias"
|
||||
},
|
||||
"version": {
|
||||
"path": "agent.version",
|
||||
"type": "alias"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"package": {
|
||||
"name": "fleet_server"
|
||||
},
|
||||
"managed_by": "fleet",
|
||||
"managed": true
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,112 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"dynamic": "strict",
|
||||
"properties": {
|
||||
"binary": {
|
||||
"type": "binary"
|
||||
},
|
||||
"boolean": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"byte": {
|
||||
"type": "byte"
|
||||
},
|
||||
"created_at": {
|
||||
"type": "date"
|
||||
},
|
||||
"created_by": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"date": {
|
||||
"type": "date"
|
||||
},
|
||||
"date_nanos": {
|
||||
"type": "date_nanos"
|
||||
},
|
||||
"date_range": {
|
||||
"type": "date_range"
|
||||
},
|
||||
"deserializer": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"double": {
|
||||
"type": "double"
|
||||
},
|
||||
"double_range": {
|
||||
"type": "double_range"
|
||||
},
|
||||
"float": {
|
||||
"type": "float"
|
||||
},
|
||||
"float_range": {
|
||||
"type": "float_range"
|
||||
},
|
||||
"geo_point": {
|
||||
"type": "geo_point"
|
||||
},
|
||||
"geo_shape": {
|
||||
"type": "geo_shape"
|
||||
},
|
||||
"half_float": {
|
||||
"type": "half_float"
|
||||
},
|
||||
"integer": {
|
||||
"type": "integer"
|
||||
},
|
||||
"integer_range": {
|
||||
"type": "integer_range"
|
||||
},
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
},
|
||||
"ip_range": {
|
||||
"type": "ip_range"
|
||||
},
|
||||
"keyword": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"list_id": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"long": {
|
||||
"type": "long"
|
||||
},
|
||||
"long_range": {
|
||||
"type": "long_range"
|
||||
},
|
||||
"meta": {
|
||||
"type": "object",
|
||||
"enabled": false
|
||||
},
|
||||
"serializer": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"shape": {
|
||||
"type": "shape"
|
||||
},
|
||||
"short": {
|
||||
"type": "short"
|
||||
},
|
||||
"text": {
|
||||
"type": "text"
|
||||
},
|
||||
"tie_breaker_id": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"updated_at": {
|
||||
"type": "date"
|
||||
},
|
||||
"updated_by": {
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases": {}
|
||||
},
|
||||
"version": 2,
|
||||
"_meta": {
|
||||
"managed": true,
|
||||
"description": "default mappings for the .items index template installed by Kibana/Security"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,55 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"dynamic": "strict",
|
||||
"properties": {
|
||||
"created_at": {
|
||||
"type": "date"
|
||||
},
|
||||
"created_by": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"description": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"deserializer": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"immutable": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"meta": {
|
||||
"type": "object",
|
||||
"enabled": false
|
||||
},
|
||||
"name": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"serializer": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"tie_breaker_id": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"type": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"updated_at": {
|
||||
"type": "date"
|
||||
},
|
||||
"updated_by": {
|
||||
"type": "keyword"
|
||||
},
|
||||
"version": {
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
},
|
||||
"aliases": {}
|
||||
},
|
||||
"version": 2,
|
||||
"_meta": {
|
||||
"managed": true,
|
||||
"description": "default mappings for the .lists index template installed by Kibana/Security"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"related": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -20,7 +20,7 @@ if [ ! -f /opt/so/state/espipelines.txt ]; then
|
||||
|
||||
cd ${ELASTICSEARCH_INGEST_PIPELINES}
|
||||
echo "Loading pipelines..."
|
||||
for i in .[a-z]* *;
|
||||
for i in *;
|
||||
do
|
||||
echo $i;
|
||||
retry 5 5 "so-elasticsearch-query _ingest/pipeline/$i -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load pipeline: $i"
|
||||
|
||||
@@ -4,7 +4,7 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{%- set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{%- set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
@@ -40,9 +40,9 @@ fi
|
||||
|
||||
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total available space
|
||||
{% if GLOBALS.role == 'so-manager' %}
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $5}'); do
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $8}'); do
|
||||
{% else %}
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $8}'); do
|
||||
{% endif %}
|
||||
size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}')
|
||||
unit=$(echo $i | grep -oE '[A-Za-z]+')
|
||||
|
||||
@@ -13,10 +13,10 @@ TOTAL_USED_SPACE=0
|
||||
# Iterate through the output of _cat/allocation for each node in the cluster to determine the total used space
|
||||
{% if GLOBALS.role == 'so-manager' %}
|
||||
# Get total disk space - disk.total
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $3}'); do
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | grep -v "{{ GLOBALS.manager }}$" | awk '{print $6}'); do
|
||||
{% else %}
|
||||
# Get disk space taken up by indices - disk.indices
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $2}'); do
|
||||
for i in $(/usr/sbin/so-elasticsearch-query _cat/allocation | awk '{print $5}'); do
|
||||
{% endif %}
|
||||
size=$(echo $i | grep -oE '[0-9].*' | awk '{print int($1+0.5)}')
|
||||
unit=$(echo $i | grep -oE '[A-Za-z]+')
|
||||
|
||||
@@ -10,10 +10,26 @@
|
||||
|
||||
{%- for index, settings in ES_INDEX_SETTINGS.items() %}
|
||||
{%- if settings.policy is defined %}
|
||||
echo
|
||||
echo "Setting up {{ index }}-logs policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
{%- if index == 'so-logs-detections.alerts' %}
|
||||
echo
|
||||
echo "Setting up so-logs-detections.alerts-so policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-so" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
{%- elif index == 'so-logs-soc' %}
|
||||
echo
|
||||
echo "Setting up so-soc-logs policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/so-soc-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
echo
|
||||
echo "Setting up {{ index }}-logs policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
{%- else %}
|
||||
echo
|
||||
echo "Setting up {{ index }}-logs policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||
echo
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
echo
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
# Elastic License 2.0.
|
||||
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
|
||||
|
||||
STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt
|
||||
STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt
|
||||
@@ -68,9 +67,9 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
{% if GLOBALS.role != 'so-heavynode' %}
|
||||
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||
INSTALLED=$(elastic_fleet_package_is_installed {{ SUPPORTED_PACKAGES[0] }} )
|
||||
if [ "$INSTALLED" != "installed" ]; then
|
||||
TEMPLATE="logs-endpoint.alerts@package"
|
||||
INSTALLED=$(so-elasticsearch-query _component_template/$TEMPLATE | jq -r .component_templates[0].name)
|
||||
if [ "$INSTALLED" != "$TEMPLATE" ]; then
|
||||
echo
|
||||
echo "Packages not yet installed."
|
||||
echo
|
||||
@@ -134,7 +133,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
TEMPLATE=${i::-14}
|
||||
COMPONENT_PATTERN=${TEMPLATE:3}
|
||||
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" ]]; then
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ logs-http_endpoint\.generic|logs-winlog\.winlog ]]; then
|
||||
load_failures=$((load_failures+1))
|
||||
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
|
||||
else
|
||||
@@ -153,7 +152,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
cd - >/dev/null
|
||||
|
||||
if [[ $load_failures -eq 0 ]]; then
|
||||
echo "All template loaded successfully"
|
||||
echo "All templates loaded successfully"
|
||||
touch $STATE_FILE_SUCCESS
|
||||
else
|
||||
echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate"
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
'so-elastic-fleet',
|
||||
'so-elastic-fleet-package-registry',
|
||||
'so-influxdb',
|
||||
'so-kafka',
|
||||
'so-kibana',
|
||||
'so-kratos',
|
||||
'so-logstash',
|
||||
@@ -80,6 +81,7 @@
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-logstash',
|
||||
'so-redis',
|
||||
'so-kafka'
|
||||
] %}
|
||||
|
||||
{% elif GLOBALS.role == 'so-idh' %}
|
||||
|
||||
@@ -90,6 +90,14 @@ firewall:
|
||||
tcp:
|
||||
- 8086
|
||||
udp: []
|
||||
kafka_controller:
|
||||
tcp:
|
||||
- 9093
|
||||
udp: []
|
||||
kafka_data:
|
||||
tcp:
|
||||
- 9092
|
||||
udp: []
|
||||
kibana:
|
||||
tcp:
|
||||
- 5601
|
||||
@@ -753,7 +761,6 @@ firewall:
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- beats_5056
|
||||
- redis
|
||||
- elasticsearch_node
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
@@ -1267,35 +1274,51 @@ firewall:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
desktop:
|
||||
portgroups:
|
||||
- elastic_agent_data
|
||||
fleet:
|
||||
portgroups:
|
||||
- beats_5056
|
||||
- elastic_agent_data
|
||||
idh:
|
||||
portgroups:
|
||||
- elastic_agent_data
|
||||
sensor:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_data
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
- beats_5644
|
||||
- elastic_agent_data
|
||||
standalone:
|
||||
portgroups:
|
||||
- redis
|
||||
- elastic_agent_data
|
||||
manager:
|
||||
portgroups:
|
||||
- elastic_agent_data
|
||||
managersearch:
|
||||
portgroups:
|
||||
- redis
|
||||
- beats_5644
|
||||
- elastic_agent_data
|
||||
self:
|
||||
portgroups:
|
||||
- redis
|
||||
- beats_5644
|
||||
- elastic_agent_data
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
beats_endpoint_ssl:
|
||||
portgroups:
|
||||
- beats_5644
|
||||
elastic_agent_endpoint:
|
||||
portgroups:
|
||||
- elastic_agent_data
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
receiver:
|
||||
portgroups: []
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
|
||||
@@ -18,4 +18,28 @@
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{# Only add Kafka firewall items when Kafka enabled #}
|
||||
{% set role = GLOBALS.role.split('-')[1] %}
|
||||
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and role in ['manager', 'managersearch', 'standalone'] %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and role == 'receiver' %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.self.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.standalone.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.manager.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.managersearch.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and role in ['manager', 'managersearch', 'standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
|
||||
@@ -120,6 +120,12 @@ firewall:
|
||||
influxdb:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kafka_controller:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kafka_data:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kibana:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
@@ -939,7 +945,6 @@ firewall:
|
||||
portgroups: *portgroupshost
|
||||
customhostgroup9:
|
||||
portgroups: *portgroupshost
|
||||
|
||||
idh:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
|
||||
@@ -1,2 +1,3 @@
|
||||
global:
|
||||
pcapengine: STENO
|
||||
pipeline: REDIS
|
||||
@@ -36,9 +36,10 @@ global:
|
||||
global: True
|
||||
advanced: True
|
||||
pipeline:
|
||||
description: Sets which pipeline technology for events to use. Currently only Redis is supported.
|
||||
description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license.
|
||||
regex: ^(REDIS|KAFKA)$
|
||||
regexFailureMessage: You must enter either REDIS or KAFKA.
|
||||
global: True
|
||||
readonly: True
|
||||
advanced: True
|
||||
repo_host:
|
||||
description: Specify the host where operating system packages will be served from.
|
||||
|
||||
@@ -33,6 +33,19 @@ idstools_sbin_jinja:
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
|
||||
suricatacustomdirsfile:
|
||||
file.directory:
|
||||
- name: /nsm/rules/detect-suricata/custom_file
|
||||
- user: 939
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
suricatacustomdirsurl:
|
||||
file.directory:
|
||||
- name: /nsm/rules/detect-suricata/custom_temp
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
|
||||
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED -%}
|
||||
{%- from 'soc/merged.map.jinja' import SOCMERGED -%}
|
||||
--suricata-version=7.0.3
|
||||
--merged=/opt/so/rules/nids/suri/all.rules
|
||||
--output=/nsm/rules/detect-suricata/custom_temp
|
||||
--local=/opt/so/rules/nids/suri/local.rules
|
||||
{%- if GLOBALS.md_engine == "SURICATA" %}
|
||||
--local=/opt/so/rules/nids/suri/extraction.rules
|
||||
@@ -10,8 +12,12 @@
|
||||
--disable=/opt/so/idstools/etc/disable.conf
|
||||
--enable=/opt/so/idstools/etc/enable.conf
|
||||
--modify=/opt/so/idstools/etc/modify.conf
|
||||
{%- if IDSTOOLSMERGED.config.urls | length > 0 %}
|
||||
{%- for URL in IDSTOOLSMERGED.config.urls %}
|
||||
--url={{ URL }}
|
||||
{%- endfor %}
|
||||
{%- if SOCMERGED.config.server.modules.suricataengine.customRulesets %}
|
||||
{%- for ruleset in SOCMERGED.config.server.modules.suricataengine.customRulesets %}
|
||||
{%- if 'url' in ruleset %}
|
||||
--url={{ ruleset.url }}
|
||||
{%- elif 'file' in ruleset %}
|
||||
--local={{ ruleset.file }}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
|
||||
@@ -11,8 +11,8 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
|
||||
{%- set proxy = salt['pillar.get']('manager:proxy') %}
|
||||
{%- set noproxy = salt['pillar.get']('manager:no_proxy', '') %}
|
||||
|
||||
# Download the rules from the internet
|
||||
{%- if proxy %}
|
||||
# Download the rules from the internet
|
||||
export http_proxy={{ proxy }}
|
||||
export https_proxy={{ proxy }}
|
||||
export no_proxy="{{ noproxy }}"
|
||||
@@ -20,14 +20,12 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
|
||||
|
||||
mkdir -p /nsm/rules/suricata
|
||||
chown -R socore:socore /nsm/rules/suricata
|
||||
{%- if not GLOBALS.airgap %}
|
||||
# Download the rules from the internet
|
||||
{%- if GLOBALS.airgap != 'True' %}
|
||||
{%- if IDSTOOLSMERGED.config.ruleset == 'ETOPEN' %}
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 7.0.3 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
|
||||
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }}
|
||||
{%- elif IDSTOOLSMERGED.config.ruleset == 'TALOS' %}
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --url=https://www.snort.org/rules/snortrules-snapshot-2983.tar.gz?oinkcode={{ IDSTOOLSMERGED.config.oinkcode }}
|
||||
docker exec so-idstools idstools-rulecat -v --suricata-version 7.0.3 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
|
||||
File diff suppressed because one or more lines are too long
37
salt/kafka/ca.sls
Normal file
37
salt/kafka/ca.sls
Normal file
@@ -0,0 +1,37 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set KAFKATRUST = salt['pillar.get']('kafka:truststore') %}
|
||||
|
||||
kafkaconfdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/kafka
|
||||
- user: 960
|
||||
- group: 960
|
||||
- makedirs: True
|
||||
|
||||
{% if GLOBALS.is_manager %}
|
||||
# Manager runs so-kafka-trust to create truststore for Kafka ssl communication
|
||||
kafka_truststore:
|
||||
cmd.script:
|
||||
- source: salt://kafka/tools/sbin_jinja/so-kafka-trust
|
||||
- template: jinja
|
||||
- cwd: /opt/so
|
||||
- defaults:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
KAFKATRUST: {{ KAFKATRUST }}
|
||||
{% endif %}
|
||||
|
||||
kafkacertz:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/kafka/kafka-truststore.jks
|
||||
- source: salt://kafka/files/kafka-truststore
|
||||
- user: 960
|
||||
- group: 931
|
||||
|
||||
{% endif %}
|
||||
85
salt/kafka/config.map.jinja
Normal file
85
salt/kafka/config.map.jinja
Normal file
@@ -0,0 +1,85 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
{% from 'kafka/map.jinja' import KAFKAMERGED %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %}
|
||||
{% set KAFKA_PASSWORD = salt['pillar.get']('kafka:config:password') %}
|
||||
{% set KAFKA_TRUSTPASS = salt['pillar.get']('kafka:config:trustpass') %}
|
||||
|
||||
{# Create list of KRaft controllers #}
|
||||
{% set controllers = [] %}
|
||||
|
||||
{# Check for Kafka nodes with controller in process_x_roles #}
|
||||
{% for node in KAFKA_NODES_PILLAR %}
|
||||
{% if 'controller' in KAFKA_NODES_PILLAR[node].role %}
|
||||
{% do controllers.append(KAFKA_NODES_PILLAR[node].nodeid ~ "@" ~ node ~ ":9093") %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% set kafka_controller_quorum_voters = ','.join(controllers) %}
|
||||
|
||||
{# By default all Kafka eligible nodes are given the role of broker, except for
|
||||
grid MANAGER (broker,controller) until overridden through SOC UI #}
|
||||
{% set node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
|
||||
|
||||
{# Generate server.properties for 'broker' , 'controller', 'broker,controller' node types
|
||||
anything above this line is a configuration needed for ALL Kafka nodes #}
|
||||
{% if node_type == 'broker' %}
|
||||
{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
|
||||
|
||||
{# Nodes with only the 'broker' role need to have the below settings for communicating with controller nodes #}
|
||||
{% do KAFKAMERGED.config.broker.update({'controller_x_listener_x_names': KAFKAMERGED.config.controller.controller_x_listener_x_names }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({
|
||||
'listener_x_security_x_protocol_x_map': KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map
|
||||
+ ',' + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map })
|
||||
%}
|
||||
{% endif %}
|
||||
|
||||
{% if node_type == 'controller' %}
|
||||
{% do KAFKAMERGED.config.controller.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %}
|
||||
{% do KAFKAMERGED.config.controller.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %}
|
||||
{% do KAFKAMERGED.config.controller.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{# Kafka nodes of this type are not recommended for use outside of development / testing. #}
|
||||
{% if node_type == 'broker,controller' %}
|
||||
{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'controller_x_listener_x_names': KAFKAMERGED.config.controller.controller_x_listener_x_names }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'process_x_roles': 'broker,controller' }) %}
|
||||
{% do KAFKAMERGED.config.broker.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
|
||||
|
||||
{% do KAFKAMERGED.config.broker.update({
|
||||
'listeners': KAFKAMERGED.config.broker.listeners + ',' + KAFKAMERGED.config.controller.listeners })
|
||||
%}
|
||||
|
||||
{% do KAFKAMERGED.config.broker.update({
|
||||
'listener_x_security_x_protocol_x_map': KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map
|
||||
+ ',' + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map })
|
||||
%}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{# Truststore config #}
|
||||
{% do KAFKAMERGED.config.broker.update({'ssl_x_truststore_x_password': KAFKA_TRUSTPASS }) %}
|
||||
{% do KAFKAMERGED.config.controller.update({'ssl_x_truststore_x_password': KAFKA_TRUSTPASS }) %}
|
||||
{% do KAFKAMERGED.config.client.update({'ssl_x_truststore_x_password': KAFKA_TRUSTPASS }) %}
|
||||
|
||||
{# Client properties stuff #}
|
||||
{% do KAFKAMERGED.config.client.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
|
||||
|
||||
{% if 'broker' in node_type %}
|
||||
{% set KAFKACONFIG = KAFKAMERGED.config.broker %}
|
||||
{% else %}
|
||||
{% set KAFKACONFIG = KAFKAMERGED.config.controller %}
|
||||
{% endif %}
|
||||
|
||||
{% set KAFKACLIENT = KAFKAMERGED.config.client %}
|
||||
84
salt/kafka/config.sls
Normal file
84
salt/kafka/config.sls
Normal file
@@ -0,0 +1,84 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
kafka_group:
|
||||
group.present:
|
||||
- name: kafka
|
||||
- gid: 960
|
||||
|
||||
kafka_user:
|
||||
user.present:
|
||||
- name: kafka
|
||||
- uid: 960
|
||||
- gid: 960
|
||||
- home: /opt/so/conf/kafka
|
||||
- createhome: False
|
||||
|
||||
kafka_home_dir:
|
||||
file.absent:
|
||||
- name: /home/kafka
|
||||
|
||||
kafka_sbin_tools:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://kafka/tools/sbin
|
||||
- user: 960
|
||||
- group: 960
|
||||
- file_mode: 755
|
||||
|
||||
kafka_sbin_jinja_tools:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://kafka/tools/sbin_jinja
|
||||
- user: 960
|
||||
- group: 960
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- defaults:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
|
||||
kafka_log_dir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/kafka
|
||||
- user: 960
|
||||
- group: 960
|
||||
- makedirs: True
|
||||
|
||||
kafka_data_dir:
|
||||
file.directory:
|
||||
- name: /nsm/kafka/data
|
||||
- user: 960
|
||||
- group: 960
|
||||
- makedirs: True
|
||||
|
||||
{% for sc in ['server', 'client'] %}
|
||||
kafka_kraft_{{sc}}_properties:
|
||||
file.managed:
|
||||
- source: salt://kafka/etc/{{sc}}.properties.jinja
|
||||
- name: /opt/so/conf/kafka/{{sc}}.properties
|
||||
- template: jinja
|
||||
- user: 960
|
||||
- group: 960
|
||||
- makedirs: True
|
||||
- show_changes: False
|
||||
{% endfor %}
|
||||
|
||||
reset_quorum_on_changes:
|
||||
cmd.run:
|
||||
- name: rm -f /nsm/kafka/data/__cluster_metadata-0/quorum-state
|
||||
- onchanges:
|
||||
- file: /opt/so/conf/kafka/server.properties
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
64
salt/kafka/defaults.yaml
Normal file
64
salt/kafka/defaults.yaml
Normal file
@@ -0,0 +1,64 @@
|
||||
kafka:
|
||||
enabled: False
|
||||
cluster_id:
|
||||
controllers:
|
||||
reset:
|
||||
logstash: []
|
||||
config:
|
||||
password:
|
||||
trustpass:
|
||||
broker:
|
||||
advertised_x_listeners:
|
||||
auto_x_create_x_topics_x_enable: true
|
||||
controller_x_quorum_x_voters:
|
||||
default_x_replication_x_factor: 1
|
||||
inter_x_broker_x_listener_x_name: BROKER
|
||||
listeners: BROKER://0.0.0.0:9092
|
||||
listener_x_security_x_protocol_x_map: BROKER:SSL
|
||||
log_x_dirs: /nsm/kafka/data
|
||||
log_x_retention_x_check_x_interval_x_ms: 300000
|
||||
log_x_retention_x_hours: 168
|
||||
log_x_segment_x_bytes: 1073741824
|
||||
node_x_id:
|
||||
num_x_io_x_threads: 8
|
||||
num_x_network_x_threads: 3
|
||||
num_x_partitions: 3
|
||||
num_x_recovery_x_threads_x_per_x_data_x_dir: 1
|
||||
offsets_x_topic_x_replication_x_factor: 1
|
||||
process_x_roles: broker
|
||||
socket_x_receive_x_buffer_x_bytes: 102400
|
||||
socket_x_request_x_max_x_bytes: 104857600
|
||||
socket_x_send_x_buffer_x_bytes: 102400
|
||||
ssl_x_keystore_x_location: /etc/pki/kafka.p12
|
||||
ssl_x_keystore_x_type: PKCS12
|
||||
ssl_x_keystore_x_password:
|
||||
ssl_x_truststore_x_location: /etc/pki/kafka-truststore.jks
|
||||
ssl_x_truststore_x_type: JKS
|
||||
ssl_x_truststore_x_password:
|
||||
transaction_x_state_x_log_x_min_x_isr: 1
|
||||
transaction_x_state_x_log_x_replication_x_factor: 1
|
||||
client:
|
||||
security_x_protocol: SSL
|
||||
ssl_x_truststore_x_location: /etc/pki/kafka-truststore.jks
|
||||
ssl_x_truststore_x_type: JKS
|
||||
ssl_x_truststore_x_password:
|
||||
ssl_x_keystore_x_location: /etc/pki/kafka.p12
|
||||
ssl_x_keystore_x_type: PKCS12
|
||||
ssl_x_keystore_x_password:
|
||||
controller:
|
||||
controller_x_listener_x_names: CONTROLLER
|
||||
controller_x_quorum_x_voters:
|
||||
listeners: CONTROLLER://0.0.0.0:9093
|
||||
listener_x_security_x_protocol_x_map: CONTROLLER:SSL
|
||||
log_x_dirs: /nsm/kafka/data
|
||||
log_x_retention_x_check_x_interval_x_ms: 300000
|
||||
log_x_retention_x_hours: 168
|
||||
log_x_segment_x_bytes: 1073741824
|
||||
node_x_id:
|
||||
process_x_roles: controller
|
||||
ssl_x_keystore_x_location: /etc/pki/kafka.p12
|
||||
ssl_x_keystore_x_type: PKCS12
|
||||
ssl_x_keystore_x_password:
|
||||
ssl_x_truststore_x_location: /etc/pki/kafka-truststore.jks
|
||||
ssl_x_truststore_x_type: JKS
|
||||
ssl_x_truststore_x_password:
|
||||
34
salt/kafka/disabled.sls
Normal file
34
salt/kafka/disabled.sls
Normal file
@@ -0,0 +1,34 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'kafka/map.jinja' import KAFKAMERGED %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
so-kafka:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-kafka_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-kafka$
|
||||
- onlyif: grep -q '^so-kafka$' /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% if GLOBALS.is_manager and KAFKAMERGED.enabled or GLOBALS.pipeline == "KAFKA" %}
|
||||
ensure_default_pipeline:
|
||||
cmd.run:
|
||||
- name: |
|
||||
/usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False;
|
||||
/usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/global/soc_global.sls global.pipeline REDIS
|
||||
{% endif %}
|
||||
|
||||
{# If Kafka has never been manually enabled, the 'Kafka' user does not exist. In this case certs for Kafka should not exist since they'll be owned by uid 960 #}
|
||||
{% for cert in ['kafka-client.crt','kafka-client.key','kafka.crt','kafka.key','kafka-logstash.crt','kafka-logstash.key','kafka-logstash.p12','kafka.p12','elasticfleet-kafka.p8'] %}
|
||||
check_kafka_cert_{{cert}}:
|
||||
file.absent:
|
||||
- name: /etc/pki/{{cert}}
|
||||
- onlyif: stat -c %U /etc/pki/{{cert}} | grep -q UNKNOWN
|
||||
- show_changes: False
|
||||
{% endfor %}
|
||||
90
salt/kafka/enabled.sls
Normal file
90
salt/kafka/enabled.sls
Normal file
@@ -0,0 +1,90 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %}
|
||||
{% if 'gmd' in salt['pillar.get']('features', []) %}
|
||||
|
||||
include:
|
||||
- kafka.ca
|
||||
- kafka.config
|
||||
- kafka.ssl
|
||||
- kafka.storage
|
||||
- kafka.sostatus
|
||||
|
||||
so-kafka:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }}
|
||||
- hostname: so-kafka
|
||||
- name: so-kafka
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-kafka'].ip }}
|
||||
- user: kafka
|
||||
- environment:
|
||||
KAFKA_HEAP_OPTS: -Xmx2G -Xms1G
|
||||
KAFKA_OPTS: -javaagent:/opt/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/opt/jolokia/jolokia.xml
|
||||
- extra_hosts:
|
||||
{% for node in KAFKANODES %}
|
||||
- {{ node }}:{{ KAFKANODES[node].ip }}
|
||||
{% endfor %}
|
||||
{% if DOCKER.containers['so-kafka'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-kafka'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- binds:
|
||||
- /etc/pki/kafka.p12:/etc/pki/kafka.p12:ro
|
||||
- /opt/so/conf/kafka/kafka-truststore.jks:/etc/pki/kafka-truststore.jks:ro
|
||||
- /nsm/kafka/data/:/nsm/kafka/data/:rw
|
||||
- /opt/so/log/kafka:/opt/kafka/logs/:rw
|
||||
- /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro
|
||||
- /opt/so/conf/kafka/client.properties:/opt/kafka/config/kraft/client.properties
|
||||
- watch:
|
||||
{% for sc in ['server', 'client'] %}
|
||||
- file: kafka_kraft_{{sc}}_properties
|
||||
{% endfor %}
|
||||
- file: kafkacertz
|
||||
- require:
|
||||
- file: kafkacertz
|
||||
|
||||
delete_so-kafka_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-kafka$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_no_license_detected:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_no_license_detected
|
||||
- comment:
|
||||
- "Kafka for Guaranteed Message Delivery is a feature supported only for customers with a valid license.
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||
for more information about purchasing a license to enable this feature."
|
||||
include:
|
||||
- kafka.disabled
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
7
salt/kafka/etc/client.properties.jinja
Normal file
7
salt/kafka/etc/client.properties.jinja
Normal file
@@ -0,0 +1,7 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% from 'kafka/config.map.jinja' import KAFKACLIENT -%}
|
||||
{{ KAFKACLIENT | yaml(False) | replace("_x_", ".") }}
|
||||
7
salt/kafka/etc/server.properties.jinja
Normal file
7
salt/kafka/etc/server.properties.jinja
Normal file
@@ -0,0 +1,7 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% from 'kafka/config.map.jinja' import KAFKACONFIG -%}
|
||||
{{ KAFKACONFIG | yaml(False) | replace("_x_", ".") }}
|
||||
10
salt/kafka/files/managed_node_pillar.jinja
Normal file
10
salt/kafka/files/managed_node_pillar.jinja
Normal file
@@ -0,0 +1,10 @@
|
||||
kafka:
|
||||
nodes:
|
||||
{% for node, values in COMBINED_KAFKANODES.items() %}
|
||||
{{ node }}:
|
||||
ip: {{ values['ip'] }}
|
||||
nodeid: {{ values['nodeid'] }}
|
||||
{%- if values['role'] != none %}
|
||||
role: {{ values['role'] }}
|
||||
{%- endif %}
|
||||
{% endfor %}
|
||||
29
salt/kafka/init.sls
Normal file
29
salt/kafka/init.sls
Normal file
@@ -0,0 +1,29 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% from 'kafka/map.jinja' import KAFKAMERGED %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
{# Run kafka/nodes.sls before Kafka is enabled, so kafka nodes pillar is setup #}
|
||||
{% if grains.role in ['so-manager','so-managersearch', 'so-standalone'] %}
|
||||
- kafka.nodes
|
||||
{% endif %}
|
||||
{% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %}
|
||||
{% if grains.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-receiver'] %}
|
||||
- kafka.enabled
|
||||
{# Searchnodes only run kafka.ssl state when Kafka is enabled #}
|
||||
{% elif grains.role == "so-searchnode" %}
|
||||
- kafka.ssl
|
||||
{% endif %}
|
||||
{% else %}
|
||||
- kafka.disabled
|
||||
{% endif %}
|
||||
10
salt/kafka/map.jinja
Normal file
10
salt/kafka/map.jinja
Normal file
@@ -0,0 +1,10 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{# This is only used to determine if Kafka is enabled / disabled. Configuration is found in kafka/config.map.jinja #}
|
||||
{# kafka/config.map.jinja depends on there being a kafka nodes pillar being populated #}
|
||||
|
||||
{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %}
|
||||
{% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %}
|
||||
88
salt/kafka/nodes.map.jinja
Normal file
88
salt/kafka/nodes.map.jinja
Normal file
@@ -0,0 +1,88 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{# USED TO GENERATE PILLAR/KAFKA/NODES.SLS. #}
|
||||
{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% set process_x_roles = KAFKADEFAULTS.kafka.config.broker.process_x_roles %}
|
||||
|
||||
{% set current_kafkanodes = salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='compound') %}
|
||||
|
||||
{% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', default=None) %}
|
||||
{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:controllers', default=None) %}
|
||||
|
||||
{% set existing_ids = [] %}
|
||||
|
||||
{# Check STORED_KAFKANODES for existing kafka nodes and pull their IDs so they are not reused across the grid #}
|
||||
{% if STORED_KAFKANODES != none %}
|
||||
{% for node, values in STORED_KAFKANODES.items() %}
|
||||
{% if values.get('nodeid') %}
|
||||
{% do existing_ids.append(values['nodeid']) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{# Create list of possible node ids #}
|
||||
{% set all_possible_ids = range(1, 2000)|list %}
|
||||
|
||||
{# Create list of available node ids by looping through all_possible_ids and ensuring it isn't in existing_ids #}
|
||||
{% set available_ids = [] %}
|
||||
{% for id in all_possible_ids %}
|
||||
{% if id not in existing_ids %}
|
||||
{% do available_ids.append(id) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Collect kafka eligible nodes and check if they're already in STORED_KAFKANODES to avoid potentially reassigning a nodeid #}
|
||||
{% set NEW_KAFKANODES = {} %}
|
||||
{% for minionid, ip in current_kafkanodes.items() %}
|
||||
{% set hostname = minionid.split('_')[0] %}
|
||||
{% if not STORED_KAFKANODES or hostname not in STORED_KAFKANODES %}
|
||||
{% set new_id = available_ids.pop(0) %}
|
||||
{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0], 'role': process_x_roles }}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Combine STORED_KAFKANODES and NEW_KAFKANODES for writing to the pillar/kafka/nodes.sls #}
|
||||
{% set COMBINED_KAFKANODES = {} %}
|
||||
{% for node, details in NEW_KAFKANODES.items() %}
|
||||
{% do COMBINED_KAFKANODES.update({node: details}) %}
|
||||
{% endfor %}
|
||||
{% if STORED_KAFKANODES != none %}
|
||||
{% for node, details in STORED_KAFKANODES.items() %}
|
||||
{% do COMBINED_KAFKANODES.update({node: details}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{# Update the process_x_roles value for any host in the kafka_controllers_pillar configured from SOC UI #}
|
||||
{% set ns = namespace(has_controller=false) %}
|
||||
{% if KAFKA_CONTROLLERS_PILLAR != none %}
|
||||
{% set KAFKA_CONTROLLERS_PILLAR_LIST = KAFKA_CONTROLLERS_PILLAR.split(',') %}
|
||||
{% for hostname in KAFKA_CONTROLLERS_PILLAR_LIST %}
|
||||
{% if hostname in COMBINED_KAFKANODES %}
|
||||
{% do COMBINED_KAFKANODES[hostname].update({'role': 'controller'}) %}
|
||||
{% set ns.has_controller = true %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% for hostname in COMBINED_KAFKANODES %}
|
||||
{% if hostname not in KAFKA_CONTROLLERS_PILLAR_LIST %}
|
||||
{% do COMBINED_KAFKANODES[hostname].update({'role': 'broker'}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{# If the kafka_controllers_pillar is NOT empty check that atleast one node contains the controller role.
|
||||
otherwise default to GLOBALS.manager having broker,controller role #}
|
||||
{% if not ns.has_controller %}
|
||||
{% do COMBINED_KAFKANODES[GLOBALS.manager].update({'role': 'broker,controller'}) %}
|
||||
{% endif %}
|
||||
{# If kafka_controllers_pillar is empty, default to having grid manager as 'broker,controller'
|
||||
so there is always atleast 1 controller in the cluster #}
|
||||
{% else %}
|
||||
{% do COMBINED_KAFKANODES[GLOBALS.manager].update({'role': 'broker,controller'}) %}
|
||||
{% endif %}
|
||||
18
salt/kafka/nodes.sls
Normal file
18
salt/kafka/nodes.sls
Normal file
@@ -0,0 +1,18 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %}
|
||||
{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %}
|
||||
|
||||
{# Write Kafka pillar, so all grid members have access to nodeid of other kafka nodes and their roles #}
|
||||
write_kafka_pillar_yaml:
|
||||
file.managed:
|
||||
- name: /opt/so/saltstack/local/pillar/kafka/nodes.sls
|
||||
- mode: 644
|
||||
- user: socore
|
||||
- source: salt://kafka/files/managed_node_pillar.jinja
|
||||
- template: jinja
|
||||
- context:
|
||||
COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }}
|
||||
9
salt/kafka/reset.sls
Normal file
9
salt/kafka/reset.sls
Normal file
@@ -0,0 +1,9 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
wipe_kafka_data:
|
||||
file.absent:
|
||||
- name: /nsm/kafka/data/
|
||||
- force: True
|
||||
229
salt/kafka/soc_kafka.yaml
Normal file
229
salt/kafka/soc_kafka.yaml
Normal file
@@ -0,0 +1,229 @@
|
||||
kafka:
|
||||
enabled:
|
||||
description: Set to True to enable Kafka. To avoid grid problems, do not enable Kafka until the related configuration is in place. Requires a valid Security Onion license key.
|
||||
helpLink: kafka.html
|
||||
cluster_id:
|
||||
description: The ID of the Kafka cluster.
|
||||
readonly: True
|
||||
advanced: True
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
controllers:
|
||||
description: A comma-separated list of hostnames that will act as Kafka controllers. These hosts will be responsible for managing the Kafka cluster. Note that only manager and receiver nodes are eligible to run Kafka. This configuration needs to be set before enabling Kafka. Failure to do so may result in Kafka topics becoming unavailable requiring manual intervention to restore functionality or reset Kafka, either of which can result in data loss.
|
||||
forcedType: string
|
||||
helpLink: kafka.html
|
||||
reset:
|
||||
description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed.
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
logstash:
|
||||
description: By default logstash is disabled when Kafka is enabled. This option allows you to specify any hosts you would like to re-enable logstash on alongside Kafka.
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
config:
|
||||
password:
|
||||
description: The password used for the Kafka certificates.
|
||||
readonly: True
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
trustpass:
|
||||
description: The password used for the Kafka truststore.
|
||||
readonly: True
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
broker:
|
||||
advertised_x_listeners:
|
||||
description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication.
|
||||
title: advertised.listeners
|
||||
helpLink: kafka.html
|
||||
auto_x_create_x_topics_x_enable:
|
||||
description: Enable the auto creation of topics.
|
||||
title: auto.create.topics.enable
|
||||
forcedType: bool
|
||||
helpLink: kafka.html
|
||||
default_x_replication_x_factor:
|
||||
description: The default replication factor for automatically created topics. This value must be less than the amount of brokers in the cluster. Hosts specified in controllers should not be counted towards total broker count.
|
||||
title: default.replication.factor
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
inter_x_broker_x_listener_x_name:
|
||||
description: The name of the listener used for inter-broker communication.
|
||||
title: inter.broker.listener.name
|
||||
helpLink: kafka.html
|
||||
listeners:
|
||||
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
|
||||
helpLink: kafka.html
|
||||
listener_x_security_x_protocol_x_map:
|
||||
description: Comma-seperated mapping of listener name and security protocols.
|
||||
title: listener.security.protocol.map
|
||||
helpLink: kafka.html
|
||||
log_x_dirs:
|
||||
description: Where Kafka logs are stored within the Docker container.
|
||||
title: log.dirs
|
||||
helpLink: kafka.html
|
||||
log_x_retention_x_check_x_interval_x_ms:
|
||||
description: Frequency at which log files are checked if they are qualified for deletion.
|
||||
title: log.retention.check.interval.ms
|
||||
helpLink: kafka.html
|
||||
log_x_retention_x_hours:
|
||||
description: How long, in hours, a log file is kept.
|
||||
title: log.retention.hours
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
log_x_segment_x_bytes:
|
||||
description: The maximum allowable size for a log file.
|
||||
title: log.segment.bytes
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
num_x_io_x_threads:
|
||||
description: The number of threads used by Kafka.
|
||||
title: num.io.threads
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
num_x_network_x_threads:
|
||||
description: The number of threads used for network communication.
|
||||
title: num.network.threads
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
num_x_partitions:
|
||||
description: The number of log partitions assigned per topic.
|
||||
title: num.partitions
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
num_x_recovery_x_threads_x_per_x_data_x_dir:
|
||||
description: The number of threads used for log recuperation at startup and purging at shutdown. This ammount of threads is used per data directory.
|
||||
title: num.recovery.threads.per.data.dir
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
offsets_x_topic_x_replication_x_factor:
|
||||
description: The offsets topic replication factor.
|
||||
title: offsets.topic.replication.factor
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
process_x_roles:
|
||||
description: The role performed by Kafka brokers.
|
||||
title: process.roles
|
||||
readonly: True
|
||||
helpLink: kafka.html
|
||||
socket_x_receive_x_buffer_x_bytes:
|
||||
description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default.
|
||||
title: socket.receive.buffer.bytes
|
||||
#forcedType: int - soc needs to allow -1 as an int before we can use this
|
||||
helpLink: kafka.html
|
||||
socket_x_request_x_max_x_bytes:
|
||||
description: The maximum bytes allowed for a request to the socket.
|
||||
title: socket.request.max.bytes
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
socket_x_send_x_buffer_x_bytes:
|
||||
description: Size, in bytes of the SO_SNDBUF buffer. A value of -1 will use the OS default.
|
||||
title: socket.send.buffer.byte
|
||||
#forcedType: int - soc needs to allow -1 as an int before we can use this
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_location:
|
||||
description: The key store file location within the Docker container.
|
||||
title: ssl.keystore.location
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_password:
|
||||
description: The key store file password. Invalid for PEM format.
|
||||
title: ssl.keystore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_type:
|
||||
description: The key store file format.
|
||||
title: ssl.keystore.type
|
||||
regex: ^(JKS|PKCS12|PEM)$
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_location:
|
||||
description: The trust store file location within the Docker container.
|
||||
title: ssl.truststore.location
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_type:
|
||||
description: The trust store file format.
|
||||
title: ssl.truststore.type
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_password:
|
||||
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
|
||||
title: ssl.truststore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
transaction_x_state_x_log_x_min_x_isr:
|
||||
description: Overrides min.insync.replicas for the transaction topic. When a producer configures acks to "all" (or "-1"), this setting determines the minimum number of replicas required to acknowledge a write as successful. Failure to meet this minimum triggers an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used in conjunction, min.insync.replicas and acks enable stronger durability guarantees. For instance, creating a topic with a replication factor of 3, setting min.insync.replicas to 2, and using acks of "all" ensures that the producer raises an exception if a majority of replicas fail to receive a write.
|
||||
title: transaction.state.log.min.isr
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
transaction_x_state_x_log_x_replication_x_factor:
|
||||
description: Set the replication factor higher for the transaction topic to ensure availability. Internal topic creation will not proceed until the cluster size satisfies this replication factor prerequisite.
|
||||
title: transaction.state.log.replication.factor
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
client:
|
||||
security_x_protocol:
|
||||
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
|
||||
title: security.protocol
|
||||
regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT)
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_location:
|
||||
description: The key store file location within the Docker container.
|
||||
title: ssl.keystore.location
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_password:
|
||||
description: The key store file password. Invalid for PEM format.
|
||||
title: ssl.keystore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
ssl_x_keystore_x_type:
|
||||
description: The key store file format.
|
||||
title: ssl.keystore.type
|
||||
regex: ^(JKS|PKCS12|PEM)$
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_location:
|
||||
description: The trust store file location within the Docker container.
|
||||
title: ssl.truststore.location
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_type:
|
||||
description: The trust store file format.
|
||||
title: ssl.truststore.type
|
||||
helpLink: kafka.html
|
||||
ssl_x_truststore_x_password:
|
||||
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
|
||||
title: ssl.truststore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
controller:
|
||||
controller_x_listener_x_names:
|
||||
description: Set listeners used by the controller in a comma-seperated list.
|
||||
title: controller.listener.names
|
||||
helpLink: kafka.html
|
||||
listeners:
|
||||
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
|
||||
helpLink: kafka.html
|
||||
listener_x_security_x_protocol_x_map:
|
||||
description: Comma-seperated mapping of listener name and security protocols.
|
||||
title: listener.security.protocol.map
|
||||
helpLink: kafka.html
|
||||
log_x_dirs:
|
||||
description: Where Kafka logs are stored within the Docker container.
|
||||
title: log.dirs
|
||||
helpLink: kafka.html
|
||||
log_x_retention_x_check_x_interval_x_ms:
|
||||
description: Frequency at which log files are checked if they are qualified for deletion.
|
||||
title: log.retention.check.interval.ms
|
||||
helpLink: kafka.html
|
||||
log_x_retention_x_hours:
|
||||
description: How long, in hours, a log file is kept.
|
||||
title: log.retention.hours
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
log_x_segment_x_bytes:
|
||||
description: The maximum allowable size for a log file.
|
||||
title: log.segment.bytes
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
process_x_roles:
|
||||
description: The role performed by controller node.
|
||||
title: process.roles
|
||||
readonly: True
|
||||
helpLink: kafka.html
|
||||
21
salt/kafka/sostatus.sls
Normal file
21
salt/kafka/sostatus.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-kafka_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-kafka
|
||||
- unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
201
salt/kafka/ssl.sls
Normal file
201
salt/kafka/ssl.sls
Normal file
@@ -0,0 +1,201 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set kafka_password = salt['pillar.get']('kafka:config:password') %}
|
||||
|
||||
include:
|
||||
- ca.dirs
|
||||
{% set global_ca_server = [] %}
|
||||
{% set x509dict = salt['mine.get'](GLOBALS.manager | lower~'*', 'x509.get_pem_entries') %}
|
||||
{% for host in x509dict %}
|
||||
{% if 'manager' in host.split('_')|last or host.split('_')|last == 'standalone' %}
|
||||
{% do global_ca_server.append(host) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% set ca_server = global_ca_server[0] %}
|
||||
|
||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
kafka_client_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka-client.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/kafka-client.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/kafka-client.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_client_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka-client.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka-client.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_client_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-client.key
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_client_crt_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-client.crt
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %}
|
||||
kafka_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/kafka.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/kafka.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}"
|
||||
- onchanges:
|
||||
- x509: /etc/pki/kafka.key
|
||||
kafka_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka.key
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_crt_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka.crt
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
|
||||
kafka_pkcs12_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka.p12
|
||||
- mode: 640
|
||||
- user: 960
|
||||
- group: 939
|
||||
{% endif %}
|
||||
|
||||
# Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka.
|
||||
# Manager will have cert, but be unused until a pipeline is created and logstash enabled.
|
||||
{% if GLOBALS.role in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %}
|
||||
kafka_logstash_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/kafka-logstash.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/kafka-logstash.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
kafka_logstash_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/kafka-logstash.crt
|
||||
- ca_server: {{ ca_server }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/kafka-logstash.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 0
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:{{ kafka_password }}"
|
||||
- onchanges:
|
||||
- x509: /etc/pki/kafka-logstash.key
|
||||
|
||||
kafka_logstash_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-logstash.key
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
kafka_logstash_crt_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-logstash.crt
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
kafka_logstash_pkcs12_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/kafka-logstash.p12
|
||||
- mode: 640
|
||||
- user: 931
|
||||
- group: 939
|
||||
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
30
salt/kafka/storage.sls
Normal file
30
salt/kafka/storage.sls
Normal file
@@ -0,0 +1,30 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id') %}
|
||||
|
||||
{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #}
|
||||
{% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %}
|
||||
kafka_storage_init:
|
||||
cmd.run:
|
||||
- name: |
|
||||
docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /opt/kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /opt/kafka/config/kraft/newserver.properties
|
||||
kafka_rm_kafkainit:
|
||||
cmd.run:
|
||||
- name: |
|
||||
docker rm so-kafkainit
|
||||
{% endif %}
|
||||
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
47
salt/kafka/tools/sbin/so-kafka-cli
Normal file
47
salt/kafka/tools/sbin/so-kafka-cli
Normal file
@@ -0,0 +1,47 @@
|
||||
#! /bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
if [ -z "$NOROOT" ]; then
|
||||
# Check for prerequisites
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
function usage() {
|
||||
echo -e "\nUsage: $0 <script> [options]"
|
||||
echo ""
|
||||
echo "Available scripts:"
|
||||
show_available_kafka_cli_tools
|
||||
}
|
||||
|
||||
function show_available_kafka_cli_tools(){
|
||||
docker exec so-kafka ls /opt/kafka/bin | grep kafka
|
||||
}
|
||||
|
||||
if [ -z $1 ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
available_tools=$(show_available_kafka_cli_tools)
|
||||
script_exists=false
|
||||
|
||||
for script in $available_tools; do
|
||||
if [ "$script" == "$1" ]; then
|
||||
script_exists=true
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$script_exists" == true ]; then
|
||||
docker exec so-kafka /opt/kafka/bin/$1 "${@:2}"
|
||||
else
|
||||
echo -e "\nInvalid script: $1"
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
87
salt/kafka/tools/sbin/so-kafka-config-update
Normal file
87
salt/kafka/tools/sbin/so-kafka-config-update
Normal file
@@ -0,0 +1,87 @@
|
||||
#! /bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
if [ -z "$NOROOT" ]; then
|
||||
# Check for prerequisites
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
usage() {
|
||||
cat <<USAGE_EOF
|
||||
|
||||
Usage: $0 <operation> [parameters]
|
||||
|
||||
Where <operation> is one of the following:
|
||||
|
||||
topic-partitions: Increase the number of partitions for a Kafka topic
|
||||
Required arguments: topic-partitions <topic name> <# partitions>
|
||||
Example: $0 topic-partitions suricata-topic 6
|
||||
|
||||
list-topics: List of Kafka topics
|
||||
Example: $0 list-topics
|
||||
|
||||
USAGE_EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [[ $# -lt 1 || $1 == --help || $1 == -h ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
kafka_client_config="/opt/kafka/config/kraft/client.properties"
|
||||
|
||||
too_few_arguments() {
|
||||
echo -e "\nMissing one or more required arguments!\n"
|
||||
usage
|
||||
}
|
||||
|
||||
get_kafka_brokers() {
|
||||
brokers_cache="/opt/so/state/kafka_brokers"
|
||||
broker_port="9092"
|
||||
if [[ ! -f "$brokers_cache" ]] || [[ $(find "/$brokers_cache" -mmin +120) ]]; then
|
||||
echo "Refreshing Kafka brokers list"
|
||||
salt-call pillar.get kafka:nodes --out=json | jq -r --arg broker_port "$broker_port" '.local | to_entries[] | select(.value.role | contains("broker")) | "\(.value.ip):\($broker_port)"' | paste -sd "," - > "$brokers_cache"
|
||||
else
|
||||
echo "Using cached Kafka brokers list"
|
||||
fi
|
||||
brokers=$(cat "$brokers_cache")
|
||||
}
|
||||
|
||||
increase_topic_partitions() {
|
||||
get_kafka_brokers
|
||||
command=$(so-kafka-cli kafka-topics.sh --bootstrap-server $brokers --command-config $kafka_client_config --alter --topic $topic --partitions $partition_count)
|
||||
if $command; then
|
||||
echo -e "Successfully increased the number of partitions for topic $topic to $partition_count\n"
|
||||
so-kafka-cli kafka-topics.sh --bootstrap-server $brokers --command-config $kafka_client_config --describe --topic $topic
|
||||
fi
|
||||
}
|
||||
|
||||
get_kafka_topics_list() {
|
||||
get_kafka_brokers
|
||||
so-kafka-cli kafka-topics.sh --bootstrap-server $brokers --command-config $kafka_client_config --exclude-internal --list | sort
|
||||
}
|
||||
|
||||
operation=$1
|
||||
case "${operation}" in
|
||||
"topic-partitions")
|
||||
if [[ $# -lt 3 ]]; then
|
||||
too_few_arguments
|
||||
fi
|
||||
topic=$2
|
||||
partition_count=$3
|
||||
increase_topic_partitions
|
||||
;;
|
||||
"list-topics")
|
||||
get_kafka_topics_list
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
||||
13
salt/kafka/tools/sbin_jinja/so-kafka-trust
Normal file
13
salt/kafka/tools/sbin_jinja/so-kafka-trust
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
{% set TRUSTPASS = salt['pillar.get']('kafka:config:trustpass') %}
|
||||
|
||||
if [ ! -f /opt/so/saltstack/local/salt/kafka/files/kafka-truststore ]; then
|
||||
docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-kafkatrust --user root --entrypoint /opt/java/openjdk/bin/keytool {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} -import -file /etc/pki/ca.crt -alias SOS -keystore /etc/pki/kafka-truststore -storepass {{ TRUSTPASS }} -storetype jks -noprompt
|
||||
docker cp so-kafkatrust:/etc/pki/kafka-truststore /opt/so/saltstack/local/salt/kafka/files/kafka-truststore
|
||||
docker rm so-kafkatrust
|
||||
fi
|
||||
@@ -1 +1,2 @@
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.4","id": "8.10.4","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.14.3","id": "8.14.3","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ update() {
|
||||
|
||||
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
|
||||
for i in "${LINES[@]}"; do
|
||||
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.4" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
|
||||
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.14.3" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
|
||||
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
|
||||
done
|
||||
|
||||
|
||||
@@ -7,9 +7,7 @@ logstash:
|
||||
- search
|
||||
receiver:
|
||||
- receiver
|
||||
heavynode:
|
||||
- manager
|
||||
- search
|
||||
heavynode: []
|
||||
searchnode:
|
||||
- search
|
||||
manager:
|
||||
|
||||
@@ -14,6 +14,11 @@
|
||||
include:
|
||||
{% if GLOBALS.role not in ['so-receiver','so-fleet'] %}
|
||||
- elasticsearch.ca
|
||||
{% endif %}
|
||||
{# Kafka ca runs on nodes that can run logstash for Kafka input / output. Only when Kafka is global pipeline #}
|
||||
{% if GLOBALS.role in ['so-searchnode', 'so-manager', 'so-managersearch', 'so-receiver', 'so-standalone'] and GLOBALS.pipeline == 'KAFKA' %}
|
||||
- kafka.ca
|
||||
- kafka.ssl
|
||||
{% endif %}
|
||||
- logstash.config
|
||||
- logstash.sostatus
|
||||
@@ -75,10 +80,14 @@ so-logstash:
|
||||
{% else %}
|
||||
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
|
||||
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
|
||||
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.pipeline == "KAFKA" and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
|
||||
- /opt/so/conf/kafka/kafka-truststore.jks:/etc/pki/kafka-truststore.jks:ro
|
||||
{% endif %}
|
||||
{% if GLOBALS.role == 'so-eval' %}
|
||||
- /nsm/zeek:/nsm/zeek:ro
|
||||
- /nsm/suricata:/suricata:ro
|
||||
@@ -102,6 +111,9 @@ so-logstash:
|
||||
- file: ls_pipeline_{{assigned_pipeline}}_{{CONFIGFILE.split('.')[0] | replace("/","_") }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
- file: kafkacertz
|
||||
{% endif %}
|
||||
- require:
|
||||
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-receiver'] %}
|
||||
- x509: etc_filebeat_crt
|
||||
@@ -115,6 +127,9 @@ so-logstash:
|
||||
- file: cacertz
|
||||
- file: capemz
|
||||
{% endif %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
|
||||
- file: kafkacertz
|
||||
{% endif %}
|
||||
|
||||
delete_so-logstash_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
@@ -6,24 +6,40 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'logstash/defaults.yaml' as LOGSTASH_DEFAULTS %}
|
||||
{% set LOGSTASH_MERGED = salt['pillar.get']('logstash', LOGSTASH_DEFAULTS.logstash, merge=True) %}
|
||||
{% set KAFKA_LOGSTASH = salt['pillar.get']('kafka:logstash', []) %}
|
||||
|
||||
{% set REDIS_NODES = [] %}
|
||||
{# LOGSTASH_NODES is the same as ES_LOGSTASH_NODES from elasticsearch/config.map.jinja but heavynodes are present #}
|
||||
{# used to store the redis nodes that logstash needs to know about to pull from the queue #}
|
||||
{% set LOGSTASH_REDIS_NODES = [] %}
|
||||
{# stores all logstash nodes #}
|
||||
{% set LOGSTASH_NODES = [] %}
|
||||
{% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{% set logstash_node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
{% set redis_node_data = salt['pillar.get']('redis:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
|
||||
{% for node_type, node_details in node_data.items() | sort %}
|
||||
{% for node_type, node_details in redis_node_data.items() | sort %}
|
||||
{% if GLOBALS.role in ['so-searchnode', 'so-standalone', 'so-managersearch', 'so-fleet'] %}
|
||||
{% if node_type in ['manager', 'managersearch', 'standalone', 'receiver' ] %}
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% do REDIS_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% for hostname in redis_node_data[node_type].keys() %}
|
||||
{% do LOGSTASH_REDIS_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{% do REDIS_NODES.append({GLOBALS.hostname:GLOBALS.node_ip}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% for hostname in node_data[node_type].keys() %}
|
||||
{% for node_type, node_details in logstash_node_data.items() | sort %}
|
||||
{% for hostname in logstash_node_data[node_type].keys() %}
|
||||
{% do LOGSTASH_NODES.append({hostname:node_details[hostname].ip}) %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{# Append Kafka input pipeline when Kafka is enabled #}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' %}
|
||||
{% do LOGSTASH_MERGED.defined_pipelines.search.remove('so/0900_input_redis.conf.jinja') %}
|
||||
{% do LOGSTASH_MERGED.defined_pipelines.search.append('so/0800_input_kafka.conf.jinja') %}
|
||||
{% do LOGSTASH_MERGED.defined_pipelines.manager.append('so/0800_input_kafka.conf.jinja') %}
|
||||
{# Disable logstash on manager & receiver nodes unless it has an override configured #}
|
||||
{% if not KAFKA_LOGSTASH %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-receiver'] and GLOBALS.hostname not in KAFKA_LOGSTASH %}
|
||||
{% do LOGSTASH_MERGED.update({'enabled': False}) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
@@ -0,0 +1,39 @@
|
||||
{%- set kafka_password = salt['pillar.get']('kafka:config:password') %}
|
||||
{%- set kafka_trustpass = salt['pillar.get']('kafka:config:trustpass') %}
|
||||
{%- set kafka_brokers = salt['pillar.get']('kafka:nodes', {}) %}
|
||||
{%- set brokers = [] %}
|
||||
|
||||
{%- if kafka_brokers %}
|
||||
{%- for key, values in kafka_brokers.items() %}
|
||||
{%- if 'broker' in values['role'] %}
|
||||
{%- do brokers.append(key ~ ':9092') %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
{%- set bootstrap_servers = ','.join(brokers) %}
|
||||
|
||||
input {
|
||||
kafka {
|
||||
codec => json
|
||||
topics_pattern => '.*-securityonion$'
|
||||
group_id => 'searchnodes'
|
||||
consumer_threads => 3
|
||||
client_id => '{{ GLOBALS.hostname }}'
|
||||
security_protocol => 'SSL'
|
||||
bootstrap_servers => '{{ bootstrap_servers }}'
|
||||
ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12'
|
||||
ssl_keystore_password => '{{ kafka_password }}'
|
||||
ssl_keystore_type => 'PKCS12'
|
||||
ssl_truststore_location => '/etc/pki/kafka-truststore.jks'
|
||||
ssl_truststore_password => '{{ kafka_trustpass }}'
|
||||
decorate_events => true
|
||||
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname}}", "kafka" ]
|
||||
}
|
||||
}
|
||||
filter {
|
||||
if ![metadata] {
|
||||
mutate {
|
||||
rename => { "@metadata" => "metadata" }
|
||||
}
|
||||
}
|
||||
}
|
||||
{% endif %}
|
||||
@@ -1,8 +1,8 @@
|
||||
{%- from 'logstash/map.jinja' import REDIS_NODES with context %}
|
||||
{%- from 'logstash/map.jinja' import LOGSTASH_REDIS_NODES with context %}
|
||||
{%- set REDIS_PASS = salt['pillar.get']('redis:config:requirepass') %}
|
||||
|
||||
{%- for index in range(REDIS_NODES|length) %}
|
||||
{%- for host in REDIS_NODES[index] %}
|
||||
{%- for index in range(LOGSTASH_REDIS_NODES|length) %}
|
||||
{%- for host in LOGSTASH_REDIS_NODES[index] %}
|
||||
input {
|
||||
redis {
|
||||
host => '{{ host }}'
|
||||
|
||||
@@ -3,3 +3,5 @@ manager:
|
||||
enabled: True
|
||||
hour: 3
|
||||
minute: 0
|
||||
additionalCA: ''
|
||||
insecureSkipVerify: False
|
||||
|
||||
@@ -73,6 +73,15 @@ manager_sbin:
|
||||
- exclude_pat:
|
||||
- "*_test.py"
|
||||
|
||||
manager_sbin_jinja:
|
||||
file.recurse:
|
||||
- name: /usr/sbin/
|
||||
- source: salt://manager/tools/sbin_jinja/
|
||||
- user: socore
|
||||
- group: socore
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
|
||||
so-repo-file:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/reposync/repodownload.conf
|
||||
|
||||
7
salt/manager/map.jinja
Normal file
7
salt/manager/map.jinja
Normal file
@@ -0,0 +1,7 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'manager/defaults.yaml' as MANAGERDEFAULTS %}
|
||||
{% set MANAGERMERGED = salt['pillar.get']('manager', MANAGERDEFAULTS.manager, merge=True) %}
|
||||
@@ -24,3 +24,16 @@ manager:
|
||||
description: Proxy server to use for updates.
|
||||
global: True
|
||||
helpLink: proxy.html
|
||||
additionalCA:
|
||||
description: Additional CA certificates to trust in PEM format.
|
||||
global: True
|
||||
advanced: True
|
||||
multiline: True
|
||||
forcedType: string
|
||||
helpLink: proxy.html
|
||||
insecureSkipVerify:
|
||||
description: Disable TLS verification for outgoing requests. This will make your installation less secure to MITM attacks. Recommended only for debugging purposes.
|
||||
advanced: True
|
||||
forcedType: bool
|
||||
global: True
|
||||
helpLink: proxy.html
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user