mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Compare commits
593 Commits
2.4.0-2023
...
2.4.2-2023
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
58dc073678 | ||
|
|
8c9186d8dd | ||
|
|
aee842b912 | ||
|
|
3a5a59af59 | ||
|
|
8f3a874e61 | ||
|
|
66dc6274e6 | ||
|
|
302e580d8f | ||
|
|
4cf60a6054 | ||
|
|
8f6d82af97 | ||
|
|
8ab54dcead | ||
|
|
9704c8917e | ||
|
|
540ee156db | ||
|
|
344e2bf1d0 | ||
|
|
3441c0684e | ||
|
|
ed560f19d3 | ||
|
|
b3f6012856 | ||
|
|
9ae26ec866 | ||
|
|
20aaa79476 | ||
|
|
2bb77251b0 | ||
|
|
36791665f3 | ||
|
|
4d4744a89b | ||
|
|
f3be63051b | ||
|
|
743ed316f8 | ||
|
|
e4b4bbcfdc | ||
|
|
b6e090f29f | ||
|
|
25006ed20b | ||
|
|
4469a93a75 | ||
|
|
0027016b5a | ||
|
|
0143e2412d | ||
|
|
20212414c4 | ||
|
|
8a63ed5124 | ||
|
|
096dadf9bd | ||
|
|
b441fe662f | ||
|
|
e5117a343d | ||
|
|
b9d692eb0e | ||
|
|
36a7f54160 | ||
|
|
96134684dc | ||
|
|
374ab0779a | ||
|
|
d0d1cc9106 | ||
|
|
9035fa3037 | ||
|
|
b4b87e5620 | ||
|
|
97c53d70a4 | ||
|
|
53b4f7bd5c | ||
|
|
192c8c78c7 | ||
|
|
62a063dae4 | ||
|
|
79014a53ec | ||
|
|
e910f04beb | ||
|
|
ef5b63337b | ||
|
|
799e92e595 | ||
|
|
c835c523a9 | ||
|
|
9ec1492fad | ||
|
|
5af1bfe142 | ||
|
|
482c5324db | ||
|
|
3c1f1cd50e | ||
|
|
aecd900203 | ||
|
|
89f5d9f292 | ||
|
|
de43a202a3 | ||
|
|
6176fa7ca5 | ||
|
|
9ff27e5b6a | ||
|
|
5922fc0e45 | ||
|
|
b48e259fee | ||
|
|
b4d85a7bf8 | ||
|
|
38881231ac | ||
|
|
b2d2a9f0ed | ||
|
|
32021cf272 | ||
|
|
4410e136b1 | ||
|
|
81d4584819 | ||
|
|
657ef97d17 | ||
|
|
8f247f962a | ||
|
|
bcbdab1682 | ||
|
|
5b4ec70ca6 | ||
|
|
ce114a2601 | ||
|
|
5de59a879a | ||
|
|
a2e6469a38 | ||
|
|
5c933910aa | ||
|
|
a3c3f08511 | ||
|
|
9aa58be286 | ||
|
|
db56b3d6a3 | ||
|
|
7d6182a18f | ||
|
|
074f84ae4d | ||
|
|
8ce0d76287 | ||
|
|
3be3df00d1 | ||
|
|
d99d4756c3 | ||
|
|
0d83b13585 | ||
|
|
6505d3e2ce | ||
|
|
6edfadd18b | ||
|
|
9552510c7d | ||
|
|
36ddcfa4e5 | ||
|
|
fcc1337e1a | ||
|
|
10f9d0f4bd | ||
|
|
edf531739c | ||
|
|
11d7e66ea0 | ||
|
|
caaedee5a7 | ||
|
|
1bdd79c578 | ||
|
|
c199acc64e | ||
|
|
a01704a1d7 | ||
|
|
53f258b08f | ||
|
|
a308a39bbe | ||
|
|
5c00655ad0 | ||
|
|
67a608ea56 | ||
|
|
01d983fc00 | ||
|
|
d6f1bcfdf0 | ||
|
|
f156573f8d | ||
|
|
b3e0e68896 | ||
|
|
86803f1fb5 | ||
|
|
aad08a830b | ||
|
|
c9db6c0f18 | ||
|
|
d9a9c8738c | ||
|
|
cb0ed9ae6d | ||
|
|
4f72fca2d7 | ||
|
|
1dc426b8ce | ||
|
|
8995012c80 | ||
|
|
2c4ba2e8b2 | ||
|
|
c42959d040 | ||
|
|
fa6dcd7f83 | ||
|
|
9c6365aa2f | ||
|
|
6e4c4febfb | ||
|
|
732d2aadf8 | ||
|
|
cace817c79 | ||
|
|
e1c361e555 | ||
|
|
502277b1b7 | ||
|
|
57f5a22f0f | ||
|
|
4b18a0e758 | ||
|
|
f6a9a764de | ||
|
|
e65214b097 | ||
|
|
cc47f9a595 | ||
|
|
eb633be437 | ||
|
|
df0dc2e4d1 | ||
|
|
766f4dd661 | ||
|
|
f53fb69ffb | ||
|
|
ba0ec18a33 | ||
|
|
79182cecfd | ||
|
|
8cf82c4b6a | ||
|
|
78d4586033 | ||
|
|
02cf1074f2 | ||
|
|
a881cab469 | ||
|
|
00bd93c026 | ||
|
|
2c10ad7eec | ||
|
|
167051af28 | ||
|
|
eb9c5e9af0 | ||
|
|
2f942a3e37 | ||
|
|
03f97b309a | ||
|
|
c6a962a46b | ||
|
|
1ddf45bbbe | ||
|
|
f0c4cebaca | ||
|
|
87c42ece00 | ||
|
|
4f8fcd3369 | ||
|
|
5b2d91b5b5 | ||
|
|
a84322f9b7 | ||
|
|
2de95bcb63 | ||
|
|
1e9e2facde | ||
|
|
592c67d1f2 | ||
|
|
e91dd29cb2 | ||
|
|
13c9142814 | ||
|
|
ef4f2491f3 | ||
|
|
645555b990 | ||
|
|
839275814c | ||
|
|
9b973e07e2 | ||
|
|
0027385da9 | ||
|
|
4ef77f9050 | ||
|
|
debbdec350 | ||
|
|
bf4ac0c2dd | ||
|
|
cb9e7e63db | ||
|
|
32560af767 | ||
|
|
1e5ac61ff5 | ||
|
|
5315c51197 | ||
|
|
8917f9b9d2 | ||
|
|
c0dc05f26a | ||
|
|
2aa801d906 | ||
|
|
c192ec9109 | ||
|
|
7ab31e36af | ||
|
|
0fd9fb9294 | ||
|
|
059f80bfc4 | ||
|
|
bab2f7282c | ||
|
|
02920b5ac9 | ||
|
|
25b0934cda | ||
|
|
d3c7ea4805 | ||
|
|
82c3d78672 | ||
|
|
97b68609bc | ||
|
|
1d611e618f | ||
|
|
f4b8d385ee | ||
|
|
b7e0923ec4 | ||
|
|
4930ae4ba6 | ||
|
|
d11479ec5f | ||
|
|
901e3c4a20 | ||
|
|
81842462ba | ||
|
|
e15c14cc2e | ||
|
|
f7ddf57f39 | ||
|
|
47e67fda46 | ||
|
|
7d0251952c | ||
|
|
5536f5a8c2 | ||
|
|
2c932fae9d | ||
|
|
24445cf36a | ||
|
|
0feb25c962 | ||
|
|
3abb4d79ba | ||
|
|
1df183deb3 | ||
|
|
77834c1e58 | ||
|
|
d6207705cd | ||
|
|
e4b61aa08d | ||
|
|
736ff2930d | ||
|
|
6aff526d9e | ||
|
|
8101171c97 | ||
|
|
000507c366 | ||
|
|
82fdee45aa | ||
|
|
2419fa43b6 | ||
|
|
acc7619023 | ||
|
|
dcd761ad74 | ||
|
|
9871ecd223 | ||
|
|
56a7fdcfcd | ||
|
|
6325f6db16 | ||
|
|
b253cd45ca | ||
|
|
1724565331 | ||
|
|
00a7beaca2 | ||
|
|
c129bba7e5 | ||
|
|
fb298224fc | ||
|
|
1feed47185 | ||
|
|
923de356e1 | ||
|
|
cea9af4e01 | ||
|
|
0f6d894322 | ||
|
|
9f879164ec | ||
|
|
1ddc4b6ff8 | ||
|
|
58f80120bd | ||
|
|
a0e08e4f41 | ||
|
|
2813d67670 | ||
|
|
c49b134122 | ||
|
|
48ce377b02 | ||
|
|
40de01e8c4 | ||
|
|
2fe88a1e66 | ||
|
|
214117e0e0 | ||
|
|
bc2d3e43f0 | ||
|
|
b3528b2139 | ||
|
|
7ecd067e2b | ||
|
|
576c1d7cc1 | ||
|
|
6320528263 | ||
|
|
6528632861 | ||
|
|
928b3b5471 | ||
|
|
f1c8467e9b | ||
|
|
f5337eba1a | ||
|
|
de28e15805 | ||
|
|
09ba15f9bb | ||
|
|
ba9892941d | ||
|
|
b381c51246 | ||
|
|
64726af69c | ||
|
|
7a4fea7a12 | ||
|
|
db47256cdd | ||
|
|
ba2392997b | ||
|
|
1a1bcb3526 | ||
|
|
997e6c141a | ||
|
|
9a3c997779 | ||
|
|
53ed4d49c2 | ||
|
|
0cee5b54a1 | ||
|
|
3f8e15d16f | ||
|
|
f8f6a1433a | ||
|
|
83188401c5 | ||
|
|
b01367a294 | ||
|
|
d8e0e320f4 | ||
|
|
b033f0d20f | ||
|
|
b71b4225c4 | ||
|
|
2a39f5f0b5 | ||
|
|
e27e690bc8 | ||
|
|
57371ffe5a | ||
|
|
4440ecd433 | ||
|
|
277ad61920 | ||
|
|
0860b1501e | ||
|
|
b06610088a | ||
|
|
aa2f168b73 | ||
|
|
d1f7e5f4a7 | ||
|
|
05a81596e5 | ||
|
|
00d1ca0b62 | ||
|
|
dbd4a5bd98 | ||
|
|
3db34a3346 | ||
|
|
f9890778ad | ||
|
|
e342dae818 | ||
|
|
64e294ef48 | ||
|
|
992bbdfac1 | ||
|
|
a4cd695cc8 | ||
|
|
9f85b3cb4f | ||
|
|
e9fd7d8b8b | ||
|
|
fa1a428133 | ||
|
|
8e18986671 | ||
|
|
a3b97b40ba | ||
|
|
634dd9907d | ||
|
|
1d12dcd243 | ||
|
|
2ec8d6abf0 | ||
|
|
98c19e5934 | ||
|
|
03e7636a18 | ||
|
|
6ce9561ba7 | ||
|
|
b80dd996cc | ||
|
|
63cea88c1d | ||
|
|
f41c75c633 | ||
|
|
20f706f165 | ||
|
|
c74b440922 | ||
|
|
badaab94de | ||
|
|
2be6c603ab | ||
|
|
7700a5a1bf | ||
|
|
687a89e30b | ||
|
|
06a0492226 | ||
|
|
4e4034e054 | ||
|
|
5b06aa518e | ||
|
|
c91fb438bb | ||
|
|
54c9a3ec71 | ||
|
|
cc1babbea6 | ||
|
|
bde67266d4 | ||
|
|
1de1e2fdc2 | ||
|
|
2293574f2e | ||
|
|
3077c21bd9 | ||
|
|
a52ca6e298 | ||
|
|
02e1a29f0c | ||
|
|
1b9ed1c72b | ||
|
|
9564158c32 | ||
|
|
ce1f75aab6 | ||
|
|
a0ce46e702 | ||
|
|
f501fac9cd | ||
|
|
8b95edd91a | ||
|
|
c5e5763014 | ||
|
|
2322ed4b6d | ||
|
|
38d69701a4 | ||
|
|
4dc0f06331 | ||
|
|
ec7bcd9b0c | ||
|
|
24140c4cda | ||
|
|
6909d3ed14 | ||
|
|
cf5feafb1e | ||
|
|
ebc20a86eb | ||
|
|
e792fbe023 | ||
|
|
02b619193d | ||
|
|
e5aab3b707 | ||
|
|
089fcbd0c5 | ||
|
|
62bafb94f9 | ||
|
|
9d6fb98e3b | ||
|
|
7bd9a84aa1 | ||
|
|
328b714306 | ||
|
|
2a979197a0 | ||
|
|
6f7f09f1cd | ||
|
|
f9804c218d | ||
|
|
dfc4498921 | ||
|
|
9049f9cf03 | ||
|
|
79a5f3a89f | ||
|
|
c7cb11e919 | ||
|
|
da81d93930 | ||
|
|
44344612b7 | ||
|
|
7ac4bc52a3 | ||
|
|
9aaa33c224 | ||
|
|
a13e6257c3 | ||
|
|
ef18cb3704 | ||
|
|
d5c7eec4ef | ||
|
|
a2c444e03b | ||
|
|
40c3f9a156 | ||
|
|
bd23d1ab7b | ||
|
|
a1e0041b14 | ||
|
|
7483dbf442 | ||
|
|
0f30e787b3 | ||
|
|
5d50dbb69e | ||
|
|
867ea5a1ac | ||
|
|
52cfc59113 | ||
|
|
789eafa8c2 | ||
|
|
ed712477d6 | ||
|
|
e3cb0a9953 | ||
|
|
743bbfea35 | ||
|
|
e8a5a5bffb | ||
|
|
a97fa9675b | ||
|
|
2418d9a096 | ||
|
|
2a8ed24045 | ||
|
|
f1c91e91b1 | ||
|
|
5405bc4e20 | ||
|
|
47a580d110 | ||
|
|
61a43f7df5 | ||
|
|
21ffcbf2fd | ||
|
|
563c0631ba | ||
|
|
77cbf35625 | ||
|
|
d7972032e4 | ||
|
|
f6dcefe0f8 | ||
|
|
d5a1406095 | ||
|
|
3d3be6bd29 | ||
|
|
52fec5fef0 | ||
|
|
ddb776c80e | ||
|
|
469258ee5e | ||
|
|
4fec2a18a5 | ||
|
|
c7ed29dfa8 | ||
|
|
80cbe5f6e8 | ||
|
|
a64eb0ba97 | ||
|
|
dbb1b82e1b | ||
|
|
f34627f709 | ||
|
|
59451fc4d0 | ||
|
|
dc77b20723 | ||
|
|
51869ce5b2 | ||
|
|
98705608a6 | ||
|
|
8055088d25 | ||
|
|
d0cfaaeb26 | ||
|
|
fbacfce0e4 | ||
|
|
082704ce1f | ||
|
|
71b6311edc | ||
|
|
7e71c60334 | ||
|
|
c5c2600799 | ||
|
|
c6c3cc82e4 | ||
|
|
b17b68034e | ||
|
|
cbd1c05929 | ||
|
|
b14d33ced8 | ||
|
|
a5b1660778 | ||
|
|
d5c4a2887e | ||
|
|
b4b84038ed | ||
|
|
85ce0bb472 | ||
|
|
b0bd64bc10 | ||
|
|
17dd21703d | ||
|
|
767c922083 | ||
|
|
a57ba7e35d | ||
|
|
81c1678ec7 | ||
|
|
1593da4597 | ||
|
|
8359f1983c | ||
|
|
87a20ffede | ||
|
|
c597766390 | ||
|
|
3d10a60502 | ||
|
|
220c534ad4 | ||
|
|
c7604e893e | ||
|
|
b56486d88e | ||
|
|
c99f19251b | ||
|
|
544fa824ea | ||
|
|
dd034edad6 | ||
|
|
2419cf86ee | ||
|
|
61f9573ace | ||
|
|
7595072e85 | ||
|
|
e60e21d9ff | ||
|
|
b46a5c4b2a | ||
|
|
40ff2677c4 | ||
|
|
80b40503fb | ||
|
|
6a501efa75 | ||
|
|
1f6463a9bb | ||
|
|
2d4f4791e0 | ||
|
|
102906f5dd | ||
|
|
6c151d3ebd | ||
|
|
17e6f5b899 | ||
|
|
a38495ce39 | ||
|
|
38629a7676 | ||
|
|
9a4ae2b832 | ||
|
|
3fdcb92dfe | ||
|
|
725f5414ba | ||
|
|
73aceb9697 | ||
|
|
03c89a02ad | ||
|
|
666d4ea260 | ||
|
|
4c58aa2ccf | ||
|
|
26619e5f8d | ||
|
|
57d90a62f7 | ||
|
|
a8b8a1d0b7 | ||
|
|
e4375a6568 | ||
|
|
b8f9a9a311 | ||
|
|
3d7f2bc691 | ||
|
|
e799edaf49 | ||
|
|
be003f7ee4 | ||
|
|
868cb8183c | ||
|
|
b3f94961ea | ||
|
|
12120e94c8 | ||
|
|
49a60bac76 | ||
|
|
f07f0775ac | ||
|
|
e93e58fedb | ||
|
|
8459054ff8 | ||
|
|
43ec897397 | ||
|
|
4b73f859d1 | ||
|
|
969cf25818 | ||
|
|
e25bbd8a0d | ||
|
|
5b11c41434 | ||
|
|
99f21ce46f | ||
|
|
9dc31b6db4 | ||
|
|
083d96fab2 | ||
|
|
f21e717dcd | ||
|
|
87e9d2997b | ||
|
|
288b5ac4d2 | ||
|
|
533c3b7569 | ||
|
|
32874d2e9d | ||
|
|
fca7753f73 | ||
|
|
fcdb02d61e | ||
|
|
4dcc79d245 | ||
|
|
6c7b4e5492 | ||
|
|
a341f1b7b7 | ||
|
|
01bd3545d0 | ||
|
|
d823d5dcc9 | ||
|
|
9fed2ac616 | ||
|
|
d5ab8ff191 | ||
|
|
2b28283095 | ||
|
|
499b889b56 | ||
|
|
aa5063c5df | ||
|
|
9f07388fa4 | ||
|
|
cd674947bb | ||
|
|
976ad4152d | ||
|
|
2633f348ac | ||
|
|
1ab72e9288 | ||
|
|
ef92fba867 | ||
|
|
36c96c4beb | ||
|
|
d79ad53daf | ||
|
|
4c4b873eca | ||
|
|
a062939705 | ||
|
|
3f14885539 | ||
|
|
393077ba9e | ||
|
|
b0f9585da1 | ||
|
|
7c8ba04820 | ||
|
|
31f83c6dee | ||
|
|
8cccaef664 | ||
|
|
1944d09978 | ||
|
|
a7d282b412 | ||
|
|
aade62491c | ||
|
|
b901555793 | ||
|
|
debe146dcf | ||
|
|
c8ef8cc88e | ||
|
|
9bd176621d | ||
|
|
05baaacc83 | ||
|
|
9bc44c122f | ||
|
|
1fdd8acd0c | ||
|
|
92a6eac976 | ||
|
|
dc227df229 | ||
|
|
ff35a58f3f | ||
|
|
64fde6b02e | ||
|
|
1047462898 | ||
|
|
76ba89c356 | ||
|
|
f3b4ee6a0b | ||
|
|
d6421ee7cc | ||
|
|
148ef5833e | ||
|
|
a67cbb3276 | ||
|
|
0485c83388 | ||
|
|
a8d3363a6f | ||
|
|
dba7b84adb | ||
|
|
2567ceea74 | ||
|
|
4ec31dbf35 | ||
|
|
e4e326cd06 | ||
|
|
0d17f4f486 | ||
|
|
7838393b9f | ||
|
|
c90c72dbba | ||
|
|
04eb73ac27 | ||
|
|
de082f6100 | ||
|
|
2c44c8e468 | ||
|
|
06b60ca96b | ||
|
|
4d64a9777e | ||
|
|
26a12477ac | ||
|
|
43447e5df5 | ||
|
|
c66f595666 | ||
|
|
ad64b873c0 | ||
|
|
c6be0a48a1 | ||
|
|
5eb0364a98 | ||
|
|
8d0074c712 | ||
|
|
3883a89212 | ||
|
|
cfa61a6c26 | ||
|
|
7f28cdd2a3 | ||
|
|
9ea3eaafae | ||
|
|
16249cc80d | ||
|
|
2589670755 | ||
|
|
17bc96c3b3 | ||
|
|
b87ee4904f | ||
|
|
7519a8c39d | ||
|
|
df4bf95b93 | ||
|
|
602e00058a | ||
|
|
6aba7b6bcf | ||
|
|
ff7aaa95e1 | ||
|
|
f166919160 | ||
|
|
aecbfd28ee | ||
|
|
b24e3ff6c4 | ||
|
|
cda67b2894 | ||
|
|
6040c5062b | ||
|
|
d83266c546 | ||
|
|
6039a1430e | ||
|
|
c2d4e870c8 | ||
|
|
1faceddc40 | ||
|
|
471f467e63 | ||
|
|
a0d8be4dc6 | ||
|
|
035451cdb8 | ||
|
|
af392681e3 | ||
|
|
a0bb6a700a | ||
|
|
ad000550a6 | ||
|
|
0fc6a74b6d | ||
|
|
0b96635bcc | ||
|
|
5b2e39f80d | ||
|
|
a8b6470a14 | ||
|
|
e945f1c38f | ||
|
|
d0dff9572d | ||
|
|
68e8c159ce | ||
|
|
a8038c90ce | ||
|
|
91c990e30a | ||
|
|
b6b49c876b | ||
|
|
cf98a95dd1 | ||
|
|
921e79c56c | ||
|
|
2cfbf30f05 | ||
|
|
3e08506c4e | ||
|
|
d4cba6908e | ||
|
|
dfd3456343 | ||
|
|
3cd1598067 | ||
|
|
1be86cdf8e | ||
|
|
bdae8d5017 | ||
|
|
d5e17da9d3 | ||
|
|
e4b10aa28c | ||
|
|
1c1b079058 | ||
|
|
967a0807ad | ||
|
|
b8d8a5fd6b | ||
|
|
18a54b86f4 | ||
|
|
17af095e14 | ||
|
|
a71cbcfc9b | ||
|
|
29aa6dceed | ||
|
|
81ee333b07 |
@@ -1,6 +1,6 @@
|
||||
## Security Onion 2.4
|
||||
## Security Onion 2.4 Beta 3
|
||||
|
||||
Security Onion 2.4 is here!
|
||||
Security Onion 2.4 Beta 3 is here!
|
||||
|
||||
## Screenshots
|
||||
|
||||
|
||||
@@ -1,47 +1,47 @@
|
||||
### 2.3.120-20220425 ISO image built on 2022/04/25
|
||||
### 2.4.2-20230531 ISO image built on 2023/05/31
|
||||
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.3.120-20220425 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.120-20220425.iso
|
||||
2.4.2-20230531 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.2-20230531.iso
|
||||
|
||||
MD5: C99729E452B064C471BEF04532F28556
|
||||
SHA1: 60BF07D5347C24568C7B793BFA9792E98479CFBF
|
||||
SHA256: CD17D0D7CABE21D45FA45E1CF91C5F24EB9608C79FF88480134E5592AFDD696E
|
||||
MD5: EB861EFB7F7DA6FB418075B4C452E4EB
|
||||
SHA1: 479A72DBB0633CB23608122F7200A24E2C3C3128
|
||||
SHA256: B69C1AE4C576BBBC37F4B87C2A8379903421E65B2C4F24C90FABB0EAD6F0471B
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.120-20220425.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.2-20230531.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
|
||||
For example, here are the steps you can use on most Linux distributions to download and verify our Security Onion ISO image.
|
||||
|
||||
Download and import the signing key:
|
||||
```
|
||||
wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS -O - | gpg --import -
|
||||
wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS -O - | gpg --import -
|
||||
```
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.120-20220425.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.2-20230531.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.120-20220425.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.2-20230531.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.3.120-20220425.iso.sig securityonion-2.3.120-20220425.iso
|
||||
gpg --verify securityonion-2.4.2-20230531.iso.sig securityonion-2.4.2-20230531.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Mon 25 Apr 2022 08:20:40 AM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Wed 31 May 2023 05:01:41 PM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
@@ -49,4 +49,4 @@ Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
|
||||
```
|
||||
|
||||
Once you've verified the ISO image, you're ready to proceed to our Installation guide:
|
||||
https://docs.securityonion.net/en/2.3/installation.html
|
||||
https://docs.securityonion.net/en/2.4/installation.html
|
||||
@@ -1,42 +0,0 @@
|
||||
logstash:
|
||||
pipelines:
|
||||
helix:
|
||||
config:
|
||||
- so/0010_input_hhbeats.conf
|
||||
- so/1033_preprocess_snort.conf
|
||||
- so/1100_preprocess_bro_conn.conf
|
||||
- so/1101_preprocess_bro_dhcp.conf
|
||||
- so/1102_preprocess_bro_dns.conf
|
||||
- so/1103_preprocess_bro_dpd.conf
|
||||
- so/1104_preprocess_bro_files.conf
|
||||
- so/1105_preprocess_bro_ftp.conf
|
||||
- so/1106_preprocess_bro_http.conf
|
||||
- so/1107_preprocess_bro_irc.conf
|
||||
- so/1108_preprocess_bro_kerberos.conf
|
||||
- so/1109_preprocess_bro_notice.conf
|
||||
- so/1110_preprocess_bro_rdp.conf
|
||||
- so/1111_preprocess_bro_signatures.conf
|
||||
- so/1112_preprocess_bro_smtp.conf
|
||||
- so/1113_preprocess_bro_snmp.conf
|
||||
- so/1114_preprocess_bro_software.conf
|
||||
- so/1115_preprocess_bro_ssh.conf
|
||||
- so/1116_preprocess_bro_ssl.conf
|
||||
- so/1117_preprocess_bro_syslog.conf
|
||||
- so/1118_preprocess_bro_tunnel.conf
|
||||
- so/1119_preprocess_bro_weird.conf
|
||||
- so/1121_preprocess_bro_mysql.conf
|
||||
- so/1122_preprocess_bro_socks.conf
|
||||
- so/1123_preprocess_bro_x509.conf
|
||||
- so/1124_preprocess_bro_intel.conf
|
||||
- so/1125_preprocess_bro_modbus.conf
|
||||
- so/1126_preprocess_bro_sip.conf
|
||||
- so/1127_preprocess_bro_radius.conf
|
||||
- so/1128_preprocess_bro_pe.conf
|
||||
- so/1129_preprocess_bro_rfb.conf
|
||||
- so/1130_preprocess_bro_dnp3.conf
|
||||
- so/1131_preprocess_bro_smb_files.conf
|
||||
- so/1132_preprocess_bro_smb_mapping.conf
|
||||
- so/1133_preprocess_bro_ntlm.conf
|
||||
- so/1134_preprocess_bro_dce_rpc.conf
|
||||
- so/8001_postprocess_common_ip_augmentation.conf
|
||||
- so/9997_output_helix.conf.jinja
|
||||
@@ -4,6 +4,7 @@ logstash:
|
||||
- 0.0.0.0:3765:3765
|
||||
- 0.0.0.0:5044:5044
|
||||
- 0.0.0.0:5055:5055
|
||||
- 0.0.0.0:5056:5056
|
||||
- 0.0.0.0:5644:5644
|
||||
- 0.0.0.0:6050:6050
|
||||
- 0.0.0.0:6051:6051
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
logstash:
|
||||
pipelines:
|
||||
manager:
|
||||
config:
|
||||
- so/0011_input_endgame.conf
|
||||
- so/0012_input_elastic_agent.conf
|
||||
- so/9999_output_redis.conf.jinja
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
logstash:
|
||||
pipelines:
|
||||
receiver:
|
||||
config:
|
||||
- so/0011_input_endgame.conf
|
||||
- so/0012_input_elastic_agent.conf
|
||||
- so/9999_output_redis.conf.jinja
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
logstash:
|
||||
pipelines:
|
||||
search:
|
||||
config:
|
||||
- so/0900_input_redis.conf.jinja
|
||||
- so/9805_output_elastic_agent.conf.jinja
|
||||
- so/9900_output_endgame.conf.jinja
|
||||
202
pillar/top.sls
202
pillar/top.sls
@@ -1,44 +1,26 @@
|
||||
base:
|
||||
'*':
|
||||
- patch.needs_restarting
|
||||
- ntp.soc_ntp
|
||||
- ntp.adv_ntp
|
||||
- logrotate
|
||||
- global.soc_global
|
||||
- global.adv_global
|
||||
- docker.soc_docker
|
||||
- docker.adv_docker
|
||||
- firewall.soc_firewall
|
||||
- firewall.adv_firewall
|
||||
- influxdb.token
|
||||
- logrotate.soc_logrotate
|
||||
- logrotate.adv_logrotate
|
||||
- nginx.soc_nginx
|
||||
- nginx.adv_nginx
|
||||
- node_data.ips
|
||||
- ntp.soc_ntp
|
||||
- ntp.adv_ntp
|
||||
- patch.needs_restarting
|
||||
- patch.soc_patch
|
||||
- patch.adv_patch
|
||||
- sensoroni.soc_sensoroni
|
||||
- sensoroni.adv_sensoroni
|
||||
- telegraf.soc_telegraf
|
||||
- telegraf.adv_telegraf
|
||||
- influxdb.token
|
||||
- node_data.ips
|
||||
|
||||
'* and not *_eval and not *_import':
|
||||
- logstash.nodes
|
||||
|
||||
'*_eval or *_heavynode or *_sensor or *_standalone or *_import':
|
||||
- match: compound
|
||||
- zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
|
||||
'*_managersearch or *_heavynode':
|
||||
- match: compound
|
||||
- logstash
|
||||
- logstash.manager
|
||||
- logstash.search
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- elasticsearch.index_templates
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
|
||||
'*_manager':
|
||||
- logstash
|
||||
- logstash.manager
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- elasticsearch.index_templates
|
||||
|
||||
'*_manager or *_managersearch':
|
||||
- match: compound
|
||||
@@ -49,14 +31,19 @@ base:
|
||||
- kibana.secrets
|
||||
{% endif %}
|
||||
- secrets
|
||||
- soc_global
|
||||
- adv_global
|
||||
- manager.soc_manager
|
||||
- manager.adv_manager
|
||||
- idstools.soc_idstools
|
||||
- idstools.adv_idstools
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- kratos.soc_kratos
|
||||
- kratos.adv_kratos
|
||||
- redis.soc_redis
|
||||
@@ -65,17 +52,31 @@ base:
|
||||
- influxdb.adv_influxdb
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
- elasticfleet.soc_elasticfleet
|
||||
- elasticfleet.adv_elasticfleet
|
||||
- elastalert.soc_elastalert
|
||||
- elastalert.adv_elastalert
|
||||
- backup.soc_backup
|
||||
- backup.adv_backup
|
||||
- firewall.soc_firewall
|
||||
- firewall.adv_firewall
|
||||
- curator.soc_curator
|
||||
- curator.adv_curator
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_sensor':
|
||||
- healthcheck.sensor
|
||||
- soc_global
|
||||
- adv_global
|
||||
- strelka.soc_strelka
|
||||
- strelka.adv_strelka
|
||||
- zeek.soc_zeek
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
- pcap.soc_pcap
|
||||
- pcap.adv_pcap
|
||||
- suricata.soc_suricata
|
||||
- suricata.adv_suricata
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
@@ -89,15 +90,27 @@ base:
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||
- kibana.secrets
|
||||
{% endif %}
|
||||
- soc_global
|
||||
- kratos.soc_kratos
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
- elasticfleet.soc_elasticfleet
|
||||
- elasticfleet.adv_elasticfleet
|
||||
- elastalert.soc_elastalert
|
||||
- elastalert.adv_elastalert
|
||||
- manager.soc_manager
|
||||
- manager.adv_manager
|
||||
- idstools.soc_idstools
|
||||
- idstools.adv_idstools
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
- strelka.adv_strelka
|
||||
- curator.soc_curator
|
||||
- curator.adv_curator
|
||||
- kratos.soc_kratos
|
||||
- kratos.adv_kratos
|
||||
- redis.soc_redis
|
||||
@@ -106,15 +119,19 @@ base:
|
||||
- influxdb.adv_influxdb
|
||||
- backup.soc_backup
|
||||
- backup.adv_backup
|
||||
- firewall.soc_firewall
|
||||
- firewall.adv_firewall
|
||||
- zeek.soc_zeek
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
- pcap.soc_pcap
|
||||
- pcap.adv_pcap
|
||||
- suricata.soc_suricata
|
||||
- suricata.adv_suricata
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_standalone':
|
||||
- logstash
|
||||
- logstash.manager
|
||||
- logstash.search
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- elasticsearch.index_templates
|
||||
@@ -126,7 +143,6 @@ base:
|
||||
{% endif %}
|
||||
- secrets
|
||||
- healthcheck.standalone
|
||||
- soc_global
|
||||
- idstools.soc_idstools
|
||||
- idstools.adv_idstools
|
||||
- kratos.soc_kratos
|
||||
@@ -137,51 +153,81 @@ base:
|
||||
- influxdb.adv_influxdb
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
- elasticfleet.soc_elasticfleet
|
||||
- elasticfleet.adv_elasticfleet
|
||||
- elastalert.soc_elastalert
|
||||
- elastalert.adv_elastalert
|
||||
- manager.soc_manager
|
||||
- manager.adv_manager
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
- strelka.adv_strelka
|
||||
- curator.soc_curator
|
||||
- curator.adv_curator
|
||||
- backup.soc_backup
|
||||
- backup.adv_backup
|
||||
- firewall.soc_firewall
|
||||
- firewall.adv_firewall
|
||||
- zeek.soc_zeek
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
- pcap.soc_pcap
|
||||
- pcap.adv_pcap
|
||||
- suricata.soc_suricata
|
||||
- suricata.adv_suricata
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_heavynode':
|
||||
- elasticsearch.auth
|
||||
- soc_global
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
- curator.soc_curator
|
||||
- curator.adv_curator
|
||||
- redis.soc_redis
|
||||
- redis.adv_redis
|
||||
- zeek.soc_zeek
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
- pcap.soc_pcap
|
||||
- pcap.adv_pcap
|
||||
- suricata.soc_suricata
|
||||
- suricata.adv_suricata
|
||||
- strelka.soc_strelka
|
||||
- strelka.adv_strelka
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_idh':
|
||||
- soc_global
|
||||
- adv_global
|
||||
- idh.soc_idh
|
||||
- idh.adv_idh
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_searchnode':
|
||||
- logstash
|
||||
- logstash.search
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- elasticsearch.index_templates
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- redis.soc_redis
|
||||
- soc_global
|
||||
- adv_global
|
||||
- redis.adv_redis
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_receiver':
|
||||
- logstash
|
||||
- logstash.receiver
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
@@ -189,8 +235,6 @@ base:
|
||||
{% endif %}
|
||||
- redis.soc_redis
|
||||
- redis.adv_redis
|
||||
- soc_global
|
||||
- adv_global
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
@@ -206,11 +250,20 @@ base:
|
||||
- kratos.soc_kratos
|
||||
- elasticsearch.soc_elasticsearch
|
||||
- elasticsearch.adv_elasticsearch
|
||||
- elasticfleet.soc_elasticfleet
|
||||
- elasticfleet.adv_elasticfleet
|
||||
- elastalert.soc_elastalert
|
||||
- elastalert.adv_elastalert
|
||||
- manager.soc_manager
|
||||
- manager.adv_manager
|
||||
- soc.soc_soc
|
||||
- soc_global
|
||||
- adv_global
|
||||
- soc.adv_soc
|
||||
- soctopus.soc_soctopus
|
||||
- soctopus.adv_soctopus
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- curator.soc_curator
|
||||
- curator.adv_curator
|
||||
- backup.soc_backup
|
||||
- backup.adv_backup
|
||||
- kratos.soc_kratos
|
||||
@@ -219,11 +272,30 @@ base:
|
||||
- redis.adv_redis
|
||||
- influxdb.soc_influxdb
|
||||
- influxdb.adv_influxdb
|
||||
- firewall.soc_firewall
|
||||
- firewall.adv_firewall
|
||||
- zeek.soc_zeek
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
- pcap.soc_pcap
|
||||
- pcap.adv_pcap
|
||||
- suricata.soc_suricata
|
||||
- suricata.adv_suricata
|
||||
- strelka.soc_strelka
|
||||
- strelka.adv_strelka
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_workstation':
|
||||
'*_fleet':
|
||||
- backup.soc_backup
|
||||
- backup.adv_backup
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- elasticfleet.soc_elasticfleet
|
||||
- elasticfleet.adv_elasticfleet
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_desktop':
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
@@ -3,16 +3,6 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
{% set ZEEKVER = salt['pillar.get']('global:mdengine', '') %}
|
||||
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
|
||||
{% set ELASTALERT = salt['pillar.get']('elastalert:enabled', True) %}
|
||||
{% set ELASTICSEARCH = salt['pillar.get']('elasticsearch:enabled', True) %}
|
||||
{% set KIBANA = salt['pillar.get']('kibana:enabled', True) %}
|
||||
{% set LOGSTASH = salt['pillar.get']('logstash:enabled', True) %}
|
||||
{% set CURATOR = salt['pillar.get']('curator:enabled', True) %}
|
||||
{% set REDIS = salt['pillar.get']('redis:enabled', True) %}
|
||||
{% set STRELKA = salt['pillar.get']('strelka:enabled', '0') %}
|
||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
|
||||
{% set saltversion = saltversion.salt.minion.version %}
|
||||
@@ -35,6 +25,7 @@
|
||||
'soc',
|
||||
'kratos',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
@@ -105,7 +96,8 @@
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'elasticfleet'
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry'
|
||||
],
|
||||
'so-manager': [
|
||||
'salt.master',
|
||||
@@ -119,6 +111,7 @@
|
||||
'soc',
|
||||
'kratos',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
@@ -137,6 +130,7 @@
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'elastic-fleet-package-registry',
|
||||
'elasticfleet',
|
||||
'firewall',
|
||||
'manager',
|
||||
@@ -166,6 +160,7 @@
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'elastic-fleet-package-registry',
|
||||
'elasticfleet',
|
||||
'firewall',
|
||||
'idstools',
|
||||
@@ -191,6 +186,16 @@
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-fleet': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'logstash',
|
||||
'healthcheck',
|
||||
'schedule',
|
||||
'elasticfleet',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-receiver': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
@@ -202,27 +207,27 @@
|
||||
],
|
||||
}, grain='role') %}
|
||||
|
||||
{% if (PLAYBOOK != 0) and grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone'] %}
|
||||
{% do allowed_states.append('mysql') %}
|
||||
{% endif %}
|
||||
|
||||
{%- if ZEEKVER != 'SURICATA' and grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('zeek') %}
|
||||
{%- endif %}
|
||||
|
||||
{% if STRELKA and grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('strelka') %}
|
||||
{% endif %}
|
||||
|
||||
{% if ELASTICSEARCH and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch') %}
|
||||
{% endif %}
|
||||
|
||||
{% if ELASTICSEARCH and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch.auth') %}
|
||||
{% endif %}
|
||||
|
||||
{% if KIBANA and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('kibana') %}
|
||||
{% do allowed_states.append('kibana.secrets') %}
|
||||
{% endif %}
|
||||
@@ -231,23 +236,19 @@
|
||||
{% do allowed_states.append('curator') %}
|
||||
{% endif %}
|
||||
|
||||
{% if ELASTALERT and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% do allowed_states.append('elastalert') %}
|
||||
{% endif %}
|
||||
|
||||
{% if (PLAYBOOK !=0) and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% do allowed_states.append('playbook') %}
|
||||
{% endif %}
|
||||
|
||||
{% if (PLAYBOOK !=0) and grains.role in ['so-eval'] %}
|
||||
{% do allowed_states.append('redis') %}
|
||||
{% endif %}
|
||||
|
||||
{% if LOGSTASH and grains.role in ['so-helixsensor', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% if grains.role in ['so-helixsensor', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% do allowed_states.append('logstash') %}
|
||||
{% endif %}
|
||||
|
||||
{% if REDIS and grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver', 'so-eval'] %}
|
||||
{% do allowed_states.append('redis') %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ config_backup_script:
|
||||
so_config_backup:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-config-backup > /dev/null 2>&1
|
||||
- identifier: so_config_backup
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '0'
|
||||
|
||||
@@ -18,7 +18,7 @@ include:
|
||||
pki_private_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/ca.key
|
||||
- bits: 4096
|
||||
- keysize: 4096
|
||||
- passphrase:
|
||||
- cipher: aes_256_cbc
|
||||
- backup: True
|
||||
@@ -39,7 +39,7 @@ pki_public_ca_crt:
|
||||
- keyUsage: "critical cRLSign, keyCertSign"
|
||||
- extendedkeyUsage: "serverAuth, clientAuth"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid,issuer:always
|
||||
- authorityKeyIdentifier: keyid:always, issuer
|
||||
- days_valid: 3650
|
||||
- days_remaining: 0
|
||||
- backup: True
|
||||
|
||||
@@ -49,13 +49,12 @@ so-status.conf:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- unless: ls /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
sosaltstackperms:
|
||||
socore_opso_perms:
|
||||
file.directory:
|
||||
- name: /opt/so/saltstack
|
||||
- name: /opt/so
|
||||
- user: 939
|
||||
- group: 939
|
||||
- dir_mode: 770
|
||||
|
||||
|
||||
so_log_perms:
|
||||
file.directory:
|
||||
- name: /opt/so/log
|
||||
@@ -97,6 +96,8 @@ alwaysupdated:
|
||||
Etc/UTC:
|
||||
timezone.system
|
||||
|
||||
# Sync curl configuration for Elasticsearch authentication
|
||||
{% if GLOBALS.role in ['so-eval', 'so-heavynode', 'so-import', 'so-manager', 'so-managersearch', 'so-searchnode', 'so-standalone'] %}
|
||||
elastic_curl_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/curl.config
|
||||
@@ -108,22 +109,25 @@ elastic_curl_config:
|
||||
- require:
|
||||
- file: elastic_curl_config_distributed
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
# Sync some Utilities
|
||||
utilsyncscripts:
|
||||
|
||||
common_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- user: root
|
||||
- group: root
|
||||
- source: salt://common/tools/sbin
|
||||
- user: 939
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
common_sbin_jinja:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://common/tools/sbin_jinja
|
||||
- user: 939
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- source: salt://common/tools/sbin
|
||||
- exclude_pat:
|
||||
- so-common
|
||||
- so-firewall
|
||||
- so-image-common
|
||||
- soup
|
||||
- so-status
|
||||
|
||||
so-status_script:
|
||||
file.managed:
|
||||
@@ -133,8 +137,10 @@ so-status_script:
|
||||
|
||||
{% if GLOBALS.role in GLOBALS.sensor_roles %}
|
||||
# Add sensor cleanup
|
||||
/usr/sbin/so-sensor-clean:
|
||||
so-sensor-clean:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-sensor-clean
|
||||
- identifier: so-sensor-clean
|
||||
- user: root
|
||||
- minute: '*'
|
||||
- hour: '*'
|
||||
@@ -154,8 +160,10 @@ sensorrotateconf:
|
||||
- source: salt://common/files/sensor-rotate.conf
|
||||
- mode: 644
|
||||
|
||||
/usr/local/bin/sensor-rotate:
|
||||
sensor-rotate:
|
||||
cron.present:
|
||||
- name: /usr/local/bin/sensor-rotate
|
||||
- identifier: sensor-rotate
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '0'
|
||||
@@ -178,8 +186,10 @@ commonlogrotateconf:
|
||||
- template: jinja
|
||||
- mode: 644
|
||||
|
||||
/usr/local/bin/common-rotate:
|
||||
common-rotate:
|
||||
cron.present:
|
||||
- name: /usr/local/bin/common-rotate
|
||||
- identifier: common-rotate
|
||||
- user: root
|
||||
- minute: '1'
|
||||
- hour: '0'
|
||||
@@ -200,17 +210,11 @@ sostatus_log:
|
||||
- name: /opt/so/log/sostatus/status.log
|
||||
- mode: 644
|
||||
|
||||
common_pip_dependencies:
|
||||
pip.installed:
|
||||
- user: root
|
||||
- pkgs:
|
||||
- rich
|
||||
- target: /usr/lib64/python3.6/site-packages
|
||||
|
||||
# Install sostatus check cron
|
||||
sostatus_check_cron:
|
||||
# Install sostatus check cron. This is used to populate Grid.
|
||||
so-status_check_cron:
|
||||
cron.present:
|
||||
- name: '/usr/sbin/so-status -j > /opt/so/log/sostatus/status.log 2>&1'
|
||||
- identifier: so-status_check_cron
|
||||
- user: root
|
||||
- minute: '*/1'
|
||||
- hour: '*'
|
||||
@@ -220,7 +224,7 @@ sostatus_check_cron:
|
||||
|
||||
remove_post_setup_cron:
|
||||
cron.absent:
|
||||
- name: 'salt-call state.highstate'
|
||||
- name: 'PATH=$PATH:/usr/sbin salt-call state.highstate'
|
||||
- identifier: post_setup_cron
|
||||
|
||||
{% if GLOBALS.role not in ['eval', 'manager', 'managersearch', 'standalone'] %}
|
||||
@@ -234,7 +238,7 @@ soversionfile:
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.so_model %}
|
||||
{% if GLOBALS.so_model and GLOBALS.so_model not in ['SO2AMI01', 'SO2AZI01', 'SO2GCI01'] %}
|
||||
{% if GLOBALS.os == 'Rocky' %}
|
||||
# Install Raid tools
|
||||
raidpkgs:
|
||||
@@ -246,9 +250,10 @@ raidpkgs:
|
||||
{% endif %}
|
||||
|
||||
# Install raid check cron
|
||||
so_raid_status:
|
||||
so-raid-status:
|
||||
cron.present:
|
||||
- name: '/usr/sbin/so-raid-status > /dev/null 2>&1'
|
||||
- identifier: so-raid-status
|
||||
- user: root
|
||||
- minute: '*/15'
|
||||
- hour: '*'
|
||||
|
||||
@@ -5,28 +5,37 @@ commonpkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: True
|
||||
- pkgs:
|
||||
- chrony
|
||||
- apache2-utils
|
||||
- wget
|
||||
- ntpdate
|
||||
- jq
|
||||
- python3-docker
|
||||
- curl
|
||||
- ca-certificates
|
||||
- software-properties-common
|
||||
- apt-transport-https
|
||||
- openssl
|
||||
- netcat
|
||||
- python3-mysqldb
|
||||
- sqlite3
|
||||
- libssl-dev
|
||||
- python3-dateutil
|
||||
- python3-m2crypto
|
||||
- python3-mysqldb
|
||||
- python3-packaging
|
||||
- python3-watchdog
|
||||
- python3-lxml
|
||||
- git
|
||||
- vim
|
||||
|
||||
# since Ubuntu requires and internet connection we can use pip to install modules
|
||||
python3-pip:
|
||||
pkg.installed
|
||||
|
||||
python-rich:
|
||||
pip.installed:
|
||||
- name: rich
|
||||
- target: /usr/local/lib/python3.8/dist-packages/
|
||||
- require:
|
||||
- pkg: python3-pip
|
||||
|
||||
|
||||
{% elif GLOBALS.os == 'Rocky' %}
|
||||
commonpkgs:
|
||||
pkg.installed:
|
||||
@@ -51,6 +60,8 @@ commonpkgs:
|
||||
- python3-m2crypto
|
||||
- rsync
|
||||
- python3-rich
|
||||
- python3-pyyaml
|
||||
- python3-watchdog
|
||||
- python3-packaging
|
||||
- unzip
|
||||
{% endif %}
|
||||
|
||||
@@ -54,33 +54,37 @@ add_interface_bond0() {
|
||||
ethtool -K "$BNIC" $i off &>/dev/null
|
||||
fi
|
||||
done
|
||||
# Check if the bond slave connection has already been created
|
||||
nmcli -f name,uuid -p con | grep -q "bond0-slave-$BNIC"
|
||||
local found_int=$?
|
||||
|
||||
if [[ $found_int != 0 ]]; then
|
||||
# Create the slave interface and assign it to the bond
|
||||
nmcli con add type ethernet ifname "$BNIC" con-name "bond0-slave-$BNIC" master bond0 -- \
|
||||
ethernet.mtu "$MTU" \
|
||||
connection.autoconnect "yes"
|
||||
else
|
||||
local int_uuid
|
||||
int_uuid=$(nmcli -f name,uuid -p con | sed -n "s/bond0-slave-$BNIC //p" | tr -d ' ')
|
||||
if ! [[ $is_cloud ]]; then
|
||||
# Check if the bond slave connection has already been created
|
||||
nmcli -f name,uuid -p con | grep -q "bond0-slave-$BNIC"
|
||||
local found_int=$?
|
||||
|
||||
nmcli con mod "$int_uuid" \
|
||||
ethernet.mtu "$MTU" \
|
||||
connection.autoconnect "yes"
|
||||
fi
|
||||
if [[ $found_int != 0 ]]; then
|
||||
# Create the slave interface and assign it to the bond
|
||||
nmcli con add type ethernet ifname "$BNIC" con-name "bond0-slave-$BNIC" master bond0 -- \
|
||||
ethernet.mtu "$MTU" \
|
||||
connection.autoconnect "yes"
|
||||
else
|
||||
local int_uuid
|
||||
int_uuid=$(nmcli -f name,uuid -p con | sed -n "s/bond0-slave-$BNIC //p" | tr -d ' ')
|
||||
|
||||
nmcli con mod "$int_uuid" \
|
||||
ethernet.mtu "$MTU" \
|
||||
connection.autoconnect "yes"
|
||||
fi
|
||||
fi
|
||||
|
||||
ip link set dev "$BNIC" arp off multicast off allmulticast off promisc on
|
||||
|
||||
# Bring the slave interface up
|
||||
if [[ $verbose == true ]]; then
|
||||
nmcli con up "bond0-slave-$BNIC"
|
||||
else
|
||||
nmcli con up "bond0-slave-$BNIC" &>/dev/null
|
||||
|
||||
if ! [[ $is_cloud ]]; then
|
||||
# Bring the slave interface up
|
||||
if [[ $verbose == true ]]; then
|
||||
nmcli con up "bond0-slave-$BNIC"
|
||||
else
|
||||
nmcli con up "bond0-slave-$BNIC" &>/dev/null
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$nic_error" != 0 ]; then
|
||||
return "$nic_error"
|
||||
fi
|
||||
@@ -201,7 +205,7 @@ gpg_rpm_import() {
|
||||
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/rocky/keys"
|
||||
fi
|
||||
|
||||
RPMKEYS=('RPM-GPG-KEY-EPEL-9' 'SALTSTACK-GPG-KEY2.pub' 'docker.pub' 'securityonion.pub')
|
||||
RPMKEYS=('RPM-GPG-KEY-rockyofficial' 'RPM-GPG-KEY-EPEL-9' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub')
|
||||
|
||||
for RPMKEY in "${RPMKEYS[@]}"; do
|
||||
rpm --import $RPMKEYSLOC/$RPMKEY
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
|
||||
#so-elastic-agent-gen-installers $FleetHost $EnrollmentToken
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints")) | .api_key')
|
||||
|
||||
FLEETHOST=$(lookup_pillar "server:url" "elasticfleet")
|
||||
|
||||
#FLEETHOST=$1
|
||||
#ENROLLMENTOKEN=$2
|
||||
CONTAINERGOOS=( "linux" "darwin" "windows" )
|
||||
|
||||
rm -rf /tmp/elastic-agent-workspace
|
||||
mkdir -p /tmp/elastic-agent-workspace
|
||||
|
||||
for OS in "${CONTAINERGOOS[@]}"
|
||||
do
|
||||
printf "\n\nGenerating $OS Installer..."
|
||||
cp /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/so-elastic-agent-*-$OS-x86_64.tar.gz /tmp/elastic-agent-workspace/$OS.tar.gz
|
||||
docker run -e CGO_ENABLED=0 -e GOOS=$OS \
|
||||
--mount type=bind,source=/etc/ssl/certs/,target=/workspace/files/cert/ \
|
||||
--mount type=bind,source=/tmp/elastic-agent-workspace/,target=/workspace/files/elastic-agent/ \
|
||||
--mount type=bind,source=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/,target=/output/ \
|
||||
{{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent-builder:{{ GLOBALS.so_version }} go build -ldflags "-X main.fleetHost=$FLEETHOST -X main.enrollmentToken=$ENROLLMENTOKEN" -o /output/so-elastic-agent_$OS
|
||||
printf "\n $OS Installer Generated..."
|
||||
done
|
||||
@@ -1,94 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{%- set NODEIP = salt['pillar.get']('host:mainip', '') %}
|
||||
. /usr/sbin/so-common
|
||||
|
||||
SKIP=0
|
||||
#########################################
|
||||
# Options
|
||||
#########################################
|
||||
usage()
|
||||
{
|
||||
cat <<EOF
|
||||
Security Onion Elastic Clear
|
||||
Options:
|
||||
-h This message
|
||||
-y Skip interactive mode
|
||||
EOF
|
||||
}
|
||||
while getopts "h:y" OPTION
|
||||
do
|
||||
case $OPTION in
|
||||
h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
|
||||
y)
|
||||
SKIP=1
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [ $SKIP -ne 1 ]; then
|
||||
# List indices
|
||||
echo
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -k -L https://{{ NODEIP }}:9200/_cat/indices?v
|
||||
echo
|
||||
# Inform user we are about to delete all data
|
||||
echo
|
||||
echo "This script will delete all data (documents, indices, etc.) in the Elasticsearch database."
|
||||
echo
|
||||
echo "If you would like to proceed, please type "AGREE" and hit ENTER."
|
||||
echo
|
||||
# Read user input
|
||||
read INPUT
|
||||
if [ "$INPUT" != "AGREE" ] ; then exit 0; fi
|
||||
fi
|
||||
|
||||
# Check to see if Logstash are running
|
||||
LS_ENABLED=$(so-status | grep logstash)
|
||||
EA_ENABLED=$(so-status | grep elastalert)
|
||||
|
||||
if [ ! -z "$LS_ENABLED" ]; then
|
||||
|
||||
/usr/sbin/so-logstash-stop
|
||||
|
||||
fi
|
||||
|
||||
if [ ! -z "$EA_ENABLED" ]; then
|
||||
|
||||
/usr/sbin/so-elastalert-stop
|
||||
|
||||
fi
|
||||
|
||||
# Delete data
|
||||
echo "Deleting data..."
|
||||
|
||||
INDXS=$(curl -K /opt/so/conf/elasticsearch/curl.config -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
for INDX in ${INDXS}
|
||||
do
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config-XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
done
|
||||
|
||||
#Start Logstash
|
||||
if [ ! -z "$LS_ENABLED" ]; then
|
||||
|
||||
/usr/sbin/so-logstash-start
|
||||
|
||||
fi
|
||||
|
||||
if [ ! -z "$EA_ENABLED" ]; then
|
||||
|
||||
/usr/sbin/so-elastalert-start
|
||||
|
||||
fi
|
||||
|
||||
@@ -1,137 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
{%- set ZEEKVER = salt['pillar.get']('global:mdengine', '') %}
|
||||
{%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %}
|
||||
{%- set RITAENABLED = salt['pillar.get']('rita:enabled', False) %}
|
||||
|
||||
wait_for_web_response "http://localhost:5601/api/spaces/space/default" "default" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
||||
|
||||
# Let's snag a cookie from Kibana
|
||||
SESSIONCOOKIE=$(curl -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||
|
||||
# Disable certain Features from showing up in the Kibana UI
|
||||
echo
|
||||
echo "Disable certain Features from showing up in the Kibana UI"
|
||||
so-kibana-space-defaults
|
||||
echo
|
||||
|
||||
# Suricata logs
|
||||
echo
|
||||
echo "Setting up Suricata package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "suricata-logs", "name": "suricata-logs", "description": "Suricata integration", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/suricata/eve*.json" ], "data_stream.dataset": "suricata", "tags": [],"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata", "custom": "pipeline: suricata.common" }}}}}}'
|
||||
echo
|
||||
|
||||
# Zeek logs
|
||||
echo
|
||||
echo "Setting up Zeek package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "name": "zeek-logs", "description": "Zeek logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/zeek/logs/current/*.log"], "data_stream.dataset": "zeek", "tags": [], "processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"", "custom": "exclude_files: [\"broker|capture_loss|ecat_arp_info|loaded_scripts|packet_filter|stats|stderr|stdout.log$\"]\n" } } } } } }'
|
||||
echo
|
||||
|
||||
|
||||
# Import - EVTX
|
||||
echo
|
||||
echo "Setting up EVTX import package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "package": { "name": "log", "version": "1.1.0" }, "name": "import-evtx-logs", "namespace": "so", "description": "Import Windows EVTX logs", "policy_id": "so-grid-nodes", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/import/*/evtx/data.json" ], "data_stream.dataset": "import", "custom": "pipeline: import.wel", "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: windows_eventlog\n imported: true", "tags": [] } } } } } }'
|
||||
echo
|
||||
|
||||
# Import - Suricata logs
|
||||
echo
|
||||
echo "Setting up Suricata import package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "import-suricata-logs", "name": "import-suricata-logs", "description": "Import Suricata logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/import/*/suricata/eve*.json"], "data_stream.dataset": "import", "tags": [], "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"", "custom": "pipeline: suricata.common" } } } } } }'
|
||||
echo
|
||||
|
||||
# Import - Zeek logs
|
||||
echo
|
||||
echo "Setting up Zeek import package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "name": "import-zeek-logs", "description": "Zeek Import logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": ["/nsm/import/*/zeek/logs/*.log"], "data_stream.dataset": "import", "tags": [], "processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"", "custom": "exclude_files: [\"broker|capture_loss|ecat_arp_info|loaded_scripts|packet_filter|stats|stderr|stdout.log$\"]\n" } } } } } }'
|
||||
echo
|
||||
|
||||
# Strelka logs
|
||||
echo
|
||||
echo "Setting up Strelka package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "strelka-logs", "name": "strelka-logs", "description": "Strelka logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/strelka/log/strelka.log" ], "data_stream.dataset": "strelka", "tags": [],"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka", "custom": "pipeline: strelka.file" }}}}}}'
|
||||
echo
|
||||
|
||||
# Syslog TCP Port 514
|
||||
echo
|
||||
echo "Setting up Syslog TCP package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "policy_id": "so-grid-nodes", "package": { "name": "tcp", "version": "1.5.0" }, "id": "syslog-tcp-514", "name": "syslog-tcp-514", "description": "Syslog Over TCP Port 514", "namespace": "so", "inputs": { "tcp-tcp": { "enabled": true, "streams": { "tcp.generic": { "enabled": true, "vars": { "listen_address": "0.0.0.0", "listen_port": "514", "data_stream.dataset": "syslog", "pipeline": "syslog", "processors": "- add_fields:\n target: event\n fields:\n module: syslog", "tags": [ "syslog" ], "syslog_options": "field: message\n#format: auto\n#timezone: Local" } } } } } }'
|
||||
echo
|
||||
|
||||
# Syslog UDP Port 514
|
||||
echo
|
||||
echo "Setting up Syslog UDP package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{ "policy_id": "so-grid-nodes", "package": { "name": "udp", "version": "1.5.0" }, "id": "syslog-udp-514", "name": "syslog-udp-514", "description": "Syslog over UDP Port 514", "namespace": "so", "inputs": { "udp-udp": { "enabled": true, "streams": { "udp.generic": { "enabled": true, "vars": { "listen_address": "0.0.0.0", "listen_port": "514", "data_stream.dataset": "syslog", "pipeline": "syslog", "max_message_size": "10KiB", "keep_null": false, "processors": "- add_fields:\n target: event\n fields: \n module: syslog\n", "tags": [ "syslog" ], "syslog_options": "field: message\n#format: auto\n#timezone: Local" } } } } } }'
|
||||
echo
|
||||
|
||||
# Kratos logs
|
||||
echo
|
||||
echo "Setting up Kratos package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "kratos-logs", "name": "kratos-logs", "description": "Kratos logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/kratos/kratos.log" ], "data_stream.dataset": "kratos", "tags": [],"custom":"pipeline: kratos","processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos" }}}}}}'
|
||||
echo
|
||||
|
||||
# RITA Logs
|
||||
#echo
|
||||
#echo "Setting up RITA package policy..."
|
||||
#curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "log", "version": "1.1.0" }, "id": "rita-logs", "name": "rita-logs", "description": "RITA Beacon logs", "namespace": "so", "inputs": { "logs-logfile": { "enabled": true, "streams": { "log.log": { "enabled": true, "vars": { "paths": [ "/nsm/rita/beacons.csv", "/nsm/rita/long-connections.csv", "/nsm/rita/short-connections.csv", "/nsm/rita/exploded-dns.csv" ], "data_stream.dataset": "rita", "tags": [], "processors": "- add_fields:\n target: event\n fields:\n category: network\n module: rita\n- if:\n log.file.path: beacons.csv\n then: \n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: rita.beacon\n- if:\n regexp:\n log.file.path: \"*connections.csv\"\n then: \n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: rita.connection\n- if:\n log.file.path: \"exploded-dns.csv\"\n then: \n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: rita.dns" }}}}}}'
|
||||
#echo
|
||||
|
||||
# Elasticsearch logs
|
||||
echo
|
||||
echo "Setting up Elasticsearch package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "elasticsearch", "version": "1.0.0" }, "id": "elasticsearch-logs", "name": "elasticsearch-logs", "description": "Elasticsearch Logs", "namespace": "default", "inputs": { "elasticsearch-logfile": { "enabled": true, "streams": { "elasticsearch.audit": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_audit.json" ] } }, "elasticsearch.deprecation": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_deprecation.json" ] } }, "elasticsearch.gc": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/gc.log.[0-9]*", "/var/log/elasticsearch/gc.log" ] } }, "elasticsearch.server": { "enabled": true, "vars": { "paths": [ "/opt/so/log/elasticsearch/*.log" ] } }, "elasticsearch.slowlog": { "enabled": false, "vars": { "paths": [ "/var/log/elasticsearch/*_index_search_slowlog.json", "/var/log/elasticsearch/*_index_indexing_slowlog.json" ] } } } }, "elasticsearch-elasticsearch/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:9200" ], "scope": "node" }, "streams": { "elasticsearch.stack_monitoring.ccr": { "enabled": false }, "elasticsearch.stack_monitoring.cluster_stats": { "enabled": false }, "elasticsearch.stack_monitoring.enrich": { "enabled": false }, "elasticsearch.stack_monitoring.index": { "enabled": false }, "elasticsearch.stack_monitoring.index_recovery": { "enabled": false, "vars": { "active.only": true } }, "elasticsearch.stack_monitoring.index_summary": { "enabled": false }, "elasticsearch.stack_monitoring.ml_job": { "enabled": false }, "elasticsearch.stack_monitoring.node": { "enabled": false }, "elasticsearch.stack_monitoring.node_stats": { "enabled": false }, "elasticsearch.stack_monitoring.pending_tasks": { "enabled": false }, "elasticsearch.stack_monitoring.shard": { "enabled": false } } } } }'
|
||||
echo
|
||||
|
||||
# Logstash logs
|
||||
#echo
|
||||
#echo "Setting up Logstash package policy..."
|
||||
#curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "logstash", "version": "2.0.0" }, "id": "logstash-logs", "name": "logstash-logs", "description": "Logstash logs", "namespace": "default", "inputs": { "logstash-logfile": { "enabled": true, "streams": { "logstash.log": { "enabled": true, "vars": { "paths": [ "/opt/so/logs/logstash/logstash.log" ] } }, "logstash.slowlog": { "enabled": false, "vars": { "paths": [ "/var/log/logstash/logstash-slowlog-plain*.log", "/var/log/logstash/logstash-slowlog-json*.log" ] } } } }, "logstash-logstash/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:9600" ], "period": "10s" }, "streams": { "logstash.stack_monitoring.node": { "enabled": false }, "logstash.stack_monitoring.node_stats": { "enabled": false } } } } }'
|
||||
#echo
|
||||
|
||||
# Kibana logs
|
||||
#echo
|
||||
#echo "Setting up Kibana package policy..."
|
||||
#curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "kibana", "version": "2.0.0" }, "id": "kibana-logs", "name": "kibana-logs", "description": "Kibana logs", "namespace": "default", "inputs": { "kibana-logfile": { "enabled": true, "streams": { "kibana.audit": { "enabled": false, "vars": { "paths": [ "/opt/so/log/kibana/kibana.log" ] } }, "kibana.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/kibana/kibana.log" ] } } } }, "kibana-kibana/metrics": { "enabled": false, "vars": { "hosts": [ "http://localhost:5601" ] }, "streams": { "kibana.stack_monitoring.cluster_actions": { "enabled": false }, "kibana.stack_monitoring.cluster_rules": { "enabled": false }, "kibana.stack_monitoring.node_actions": { "enabled": false }, "kibana.stack_monitoring.node_rules": { "enabled": false }, "kibana.stack_monitoring.stats": { "enabled": false }, "kibana.stack_monitoring.status": { "enabled": false } } } } }'
|
||||
#echo
|
||||
|
||||
# Redis logs
|
||||
echo
|
||||
echo "Setting up Redis package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{ "policy_id": "so-grid-nodes", "package": { "name": "redis", "version": "1.4.0" }, "id": "redis-logs", "name": "redis-logs", "description": "Redis logs", "namespace": "default", "inputs": { "redis-logfile": { "enabled": true, "streams": { "redis.log": { "enabled": true, "vars": { "paths": [ "/opt/so/log/redis/redis.log" ], "tags": [ "redis-log" ], "preserve_original_event": false } } } }, "redis-redis": { "enabled": false, "streams": { "redis.slowlog": { "enabled": false, "vars": { "hosts": [ "127.0.0.1:6379" ], "password": "" } } } }, "redis-redis/metrics": { "enabled": false, "vars": { "hosts": [ "127.0.0.1:6379" ], "idle_timeout": "20s", "maxconn": 10, "network": "tcp", "password": "" }, "streams": { "redis.info": { "enabled": false, "vars": { "period": "10s" } }, "redis.key": { "enabled": false, "vars": { "key.patterns": "- limit: 20\n pattern: '*'\n", "period": "10s" } }, "redis.keyspace": { "enabled": false, "vars": { "period": "10s" } } } } } }'
|
||||
echo
|
||||
|
||||
# IDH logs
|
||||
echo
|
||||
echo "Setting up IDH package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"policy_id":"so-grid-nodes","package":{"name":"log","version":"1.1.1"},"id":"idh-logs","name":"idh-logs","namespace":"so","description":"IDH integration","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/nsm/idh/opencanary.log"],"data_stream.dataset":"idh","custom":"pipeline: common","processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '\''[\"prospector\", \"input\", \"offset\", \"beat\"]'\''\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary","tags":[]}}}}}}'
|
||||
echo
|
||||
|
||||
# SOC - Server logs
|
||||
echo
|
||||
echo "Setting up SOC - Server Logs package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-server-logs","namespace":"so","description":"Security Onion Console Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/soc/sensoroni-server.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true","tags":[]}}}}}}'
|
||||
echo
|
||||
|
||||
# SOC - Sensoroni logs
|
||||
echo
|
||||
echo "Setting up SOC - Sensoroni Logs package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-sensoroni-logs","namespace":"so","description":"Security Onion - Sensoroni - Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/sensoroni/sensoroni.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true","tags":[]}}}}}}'
|
||||
echo
|
||||
|
||||
# SOC - Elastic Auth Sync logs
|
||||
echo
|
||||
echo "Setting up SOC - Elastic Auth Sync Logs package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-auth-sync-logs","namespace":"so","description":"Security Onion - Elastic Auth Sync - Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/soc/sync.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync","tags":[]}}}}}}'
|
||||
echo
|
||||
|
||||
# SOC - Salt Relay logs
|
||||
echo
|
||||
echo "Setting up SOC - Salt_Relay Logs package policy..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d'{"package":{"name":"log","version":"1.1.2"},"name":"soc-salt-relay-logs","namespace":"so","description":"Security Onion - Salt Relay - Logs","policy_id":"so-grid-nodes","inputs":{"logs-logfile":{"enabled":true,"streams":{"log.log":{"enabled":true,"vars":{"paths":["/opt/so/log/soc/salt-relay.log"],"data_stream.dataset":"soc","custom":"pipeline: common","processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay","tags":[]}}}}}}'
|
||||
echo
|
||||
@@ -1,109 +0,0 @@
|
||||
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
# Create ES Token
|
||||
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
|
||||
printf "ESTOKEN = $ESTOKEN \n"
|
||||
|
||||
# Add SO-Manager Fleet URL
|
||||
## This array replaces whatever URLs are currently configured
|
||||
printf "\n"
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/settings" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"fleet_server_hosts":["https://{{ GLOBALS.manager_ip }}:8220"]}'
|
||||
printf "\n\n"
|
||||
|
||||
# Configure certificates
|
||||
mkdir -p /opt/so/conf/elastic-fleet/certs
|
||||
cp /etc/ssl/certs/intca.crt /opt/so/conf/elastic-fleet/certs
|
||||
cp /etc/pki/elasticfleet* /opt/so/conf/elastic-fleet/certs
|
||||
|
||||
{% if grains.role in ['so-import', 'so-standalone', 'so-eval', 'so-manager', 'so-managersearch'] %}
|
||||
# Add SO-Manager Elasticsearch Ouput
|
||||
ESCACRT=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/intca.crt)
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg ESCACRT "$ESCACRT" \
|
||||
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
printf "\n\n"
|
||||
|
||||
{% else %}
|
||||
# Create Logstash Output payload
|
||||
LOGSTASHCRT=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/elasticfleet.crt)
|
||||
LOGSTASHKEY=$(openssl rsa -in /opt/so/conf/elastic-fleet/certs/elasticfleet.key)
|
||||
LOGSTASHCA=$(openssl x509 -in /opt/so/conf/elastic-fleet/certs/intca.crt)
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"so-manager_logstash","id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}'
|
||||
)
|
||||
|
||||
# Add SO-Manager Logstash Ouput
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
printf "\n\n"
|
||||
{%- endif %}
|
||||
|
||||
# Add Elastic Fleet Integrations
|
||||
|
||||
# Add Elastic Fleet Server Agent Policy
|
||||
#curl -vv -K /opt/so/conf/elasticsearch/curl.config -L \
|
||||
#-X POST "localhost:5601/api/fleet/agent_policies" \
|
||||
#-H 'kbn-xsrf: true' -H 'Content-Type: application/json' \
|
||||
#-d '{"name":"SO-Manager","id":"so-manager","description":"SO Manager Fleet Server Policy","namespace":"default","monitoring_enabled":["logs"],"has_fleet_server":true}'
|
||||
|
||||
# Add Agent Policy - SOS Grid Nodes
|
||||
#curl -vv -K /opt/so/conf/elasticsearch/curl.config -L \
|
||||
#-X POST "localhost:5601/api/fleet/agent_policies" \
|
||||
#-H 'kbn-xsrf: true' -H 'Content-Type: application/json' \
|
||||
#-d '{"name":"SO-Grid","id":"so-grid","description":"SO Grid Endpoint Policy","namespace":"default","monitoring_enabled":["logs"]}'
|
||||
|
||||
# Add Agent Policy - Default endpoints
|
||||
#curl -vv -K /opt/so/conf/elasticsearch/curl.config -L \
|
||||
#-X POST "localhost:5601/api/fleet/agent_policies" \
|
||||
#-H 'kbn-xsrf: true' -H 'Content-Type: application/json' \
|
||||
#-d '{"name":"Endpoints-Initalization","id":"endpoints","description":"Initial Endpoint Policy","namespace":"default","monitoring_enabled":["logs"]}'
|
||||
|
||||
ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-default")) | .api_key')
|
||||
GRIDNODESENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes")) | .api_key')
|
||||
|
||||
# Store needed data in minion pillar
|
||||
pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
|
||||
printf '%s\n'\
|
||||
"elasticfleet:"\
|
||||
" server:"\
|
||||
" es_token: '$ESTOKEN'"\
|
||||
" endpoints_enrollment: '$ENDPOINTSENROLLMENTOKEN'"\
|
||||
" grid_enrollment: '$GRIDNODESENROLLMENTOKEN'"\
|
||||
" url: '{{ GLOBALS.manager_ip }}'"\
|
||||
"" >> "$pillar_file"
|
||||
|
||||
#Store Grid Nodes Enrollment token in Global pillar
|
||||
global_pillar_file=/opt/so/saltstack/local/pillar/soc_global.sls
|
||||
printf '%s\n'\
|
||||
" fleet_grid_enrollment_token: '$GRIDNODESENROLLMENTOKEN'"\
|
||||
"" >> "$global_pillar_file"
|
||||
|
||||
# Call Elastic-Fleet Salt State
|
||||
salt-call state.apply elasticfleet queue=True
|
||||
|
||||
# Load Elastic Fleet integrations
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-load
|
||||
|
||||
# Temp
|
||||
wget --progress=bar:force:noscroll -P /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.6.2/so-elastic-agent-8.6.2-darwin-x86_64.tar.gz
|
||||
wget --progress=bar:force:noscroll -P /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.6.2/so-elastic-agent-8.6.2-linux-x86_64.tar.gz
|
||||
wget --progress=bar:force:noscroll -P /opt/so/saltstack/default/salt/elasticfleet/files/elastic-agent/ https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/so_elastic-agent-8.6.2/so-elastic-agent-8.6.2-windows-x86_64.tar.gz
|
||||
|
||||
#git clone -b 2.4-so-elastic-agent https://github.com/Security-Onion-Solutions/securityonion-image.git
|
||||
#cd securityonion-image/so-elastic-agent-builder
|
||||
#docker build -t so-elastic-agent-builder .
|
||||
|
||||
so-elastic-agent-gen-installers
|
||||
salt-call state.apply elasticfleet.install_agent_grid queue=True
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-import']%}
|
||||
/usr/sbin/so-restart elasticsearch $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
|
||||
/usr/sbin/so-restart kibana $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||
/usr/sbin/so-restart logstash $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||
/usr/sbin/so-restart curator $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
|
||||
/usr/sbin/so-restart elastalert $1
|
||||
{%- endif %}
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-import']%}
|
||||
/usr/sbin/so-start elasticsearch $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
|
||||
/usr/sbin/so-start kibana $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||
/usr/sbin/so-start logstash $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||
/usr/sbin/so-start curator $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
|
||||
/usr/sbin/so-start elastalert $1
|
||||
{%- endif %}
|
||||
@@ -1,31 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode', 'so-import']%}
|
||||
/usr/sbin/so-stop elasticsearch $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
|
||||
/usr/sbin/so-stop kibana $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||
/usr/sbin/so-stop logstash $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-searchnode']%}
|
||||
/usr/sbin/so-stop curator $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
|
||||
/usr/sbin/so-stop elastalert $1
|
||||
{%- endif %}
|
||||
@@ -1,15 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }}
|
||||
ESPORT=9200
|
||||
|
||||
echo "Removing read only attributes for indices..."
|
||||
echo
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -XPUT -H "Content-Type: application/json" -L https://$IP:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
@@ -1,104 +0,0 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "Usage: $0 --role=<ROLE> --ip=<IP ADDRESS> --apply=<true|false>"
|
||||
echo ""
|
||||
echo " Example: so-firewall --role=sensor --ip=192.168.254.100 --apply=true"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for i in "$@"; do
|
||||
case $i in
|
||||
-r=*|--role=*)
|
||||
ROLE="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-i=*|--ip=*)
|
||||
IP="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-a=*|--apply*)
|
||||
APPLY="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-*|--*)
|
||||
echo "Unknown option $i"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
ROLE=${ROLE,,}
|
||||
APPLY=${APPLY,,}
|
||||
|
||||
function rolecall() {
|
||||
THEROLE=$1
|
||||
THEROLES="analyst analyst_workstations beats_endpoint beats_endpoint_ssl elastic_agent_endpoint elasticsearch_rest endgame eval heavynodes idh manager managersearch receivers searchnodes sensors standalone strelka_frontend syslog"
|
||||
|
||||
for AROLE in $THEROLES; do
|
||||
if [ "$AROLE" = "$THEROLE" ]; then
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
# Make sure the required options are specified
|
||||
if [ -z "$ROLE" ]; then
|
||||
echo "Please specify a role with --role="
|
||||
exit 1
|
||||
fi
|
||||
if [ -z "$IP" ]; then
|
||||
echo "Please specify an IP address with --ip="
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Are we dealing with a role that this script supports?
|
||||
if rolecall "$ROLE"; then
|
||||
echo "$ROLE is a supported role"
|
||||
else
|
||||
echo "This is not a supported role"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Are we dealing with an IP?
|
||||
if verify_ip4 "$IP"; then
|
||||
echo "$IP is a valid IP or CIDR"
|
||||
else
|
||||
echo "$IP is not a valid IP or CIDR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local_salt_dir=/opt/so/saltstack/local/salt/firewall
|
||||
|
||||
# Let's see if the file exists and if it does, let's see if the IP exists.
|
||||
if [ -f "$local_salt_dir/hostgroups/$ROLE" ]; then
|
||||
if grep -q $IP "$local_salt_dir/hostgroups/$ROLE"; then
|
||||
echo "Host already exists"
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# If you have reached this part of your quest then let's add the IP
|
||||
echo "Adding $IP to the $ROLE role"
|
||||
echo "$IP" >> $local_salt_dir/hostgroups/$ROLE
|
||||
|
||||
# Check to see if we are applying this right away.
|
||||
if [ "$APPLY" = "true" ]; then
|
||||
echo "Applying the firewall rules"
|
||||
salt-call state.apply firewall queue=True
|
||||
echo "Firewall rules have been applied... Review logs further if there were errors."
|
||||
echo ""
|
||||
else
|
||||
echo "Firewall rules will be applied next salt run"
|
||||
fi
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
got_root() {
|
||||
|
||||
# Make sure you are root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
got_root
|
||||
if [ ! -f $local_salt_dir/pillar/fireeye/init.sls ]; then
|
||||
echo "This is nto configured for Helix Mode. Please re-install."
|
||||
exit
|
||||
else
|
||||
echo "Enter your Helix API Key: "
|
||||
read APIKEY
|
||||
sed -i "s/^ api_key.*/ api_key: $APIKEY/g" $local_salt_dir/pillar/fireeye/init.sls
|
||||
docker stop so-logstash
|
||||
docker rm so-logstash
|
||||
echo "Restarting Logstash for updated key"
|
||||
salt-call state.apply logstash queue=True
|
||||
fi
|
||||
@@ -38,6 +38,7 @@ container_list() {
|
||||
"so-zeek"
|
||||
"so-elastic-agent"
|
||||
"so-elastic-agent-builder"
|
||||
"so-elastic-fleet-package-registry"
|
||||
)
|
||||
elif [ $MANAGERCHECK != 'so-helix' ]; then
|
||||
TRUSTED_CONTAINERS=(
|
||||
@@ -45,6 +46,7 @@ container_list() {
|
||||
"so-elastalert"
|
||||
"so-elastic-agent"
|
||||
"so-elastic-agent-builder"
|
||||
"so-elastic-fleet-package-registry"
|
||||
"so-elasticsearch"
|
||||
"so-idh"
|
||||
"so-idstools"
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% set MAININT = salt['pillar.get']('host:mainint') -%}
|
||||
{% set NODEIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] -%}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
for i in $(curl -s -L http://{{ NODEIP }}:9600/_node/stats | jq .pipelines | jq '. | to_entries | .[].key' | sed 's/\"//g'); do echo ${i^}:; curl -s localhost:9600/_node/stats | jq .pipelines.$i.events; done
|
||||
else
|
||||
curl -s -L http://{{ NODEIP }}:9600/_node/stats | jq .pipelines.$1.events
|
||||
fi
|
||||
@@ -1,304 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
if [ -f /usr/sbin/so-common ]; then
|
||||
. /usr/sbin/so-common
|
||||
fi
|
||||
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "Usage: $0 -o=<operation> -m=[id]"
|
||||
echo ""
|
||||
echo " where <operation> is one of the following:"
|
||||
echo ""
|
||||
echo " list: Lists all keys with hashes"
|
||||
echo " accept: Accepts a new key and adds the minion files"
|
||||
echo " delete: Removes the key and deletes the minion files"
|
||||
echo " reject: Rejects a key"
|
||||
echo " test: Perform minion test"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for i in "$@"; do
|
||||
case $i in
|
||||
-o=*|--operation=*)
|
||||
OPERATION="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-m=*|--minionid=*)
|
||||
MINION_ID="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-e=*|--esheap=*)
|
||||
ES_HEAP_SIZE="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-n=*|--mgmtnic=*)
|
||||
MNIC="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-d=*|--description=*)
|
||||
NODE_DESCRIPTION="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-a=*|--monitor=*)
|
||||
INTERFACE="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-i=*|--ip=*)
|
||||
MAINIP="${i#*=}"
|
||||
shift
|
||||
;;
|
||||
-*|--*)
|
||||
echo "Unknown option $i"
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
PILLARFILE=/opt/so/saltstack/local/pillar/minions/$MINION_ID.sls
|
||||
ADVPILLARFILE=/opt/so/saltstack/local/pillar/minions/adv_$MINION_ID.sls
|
||||
|
||||
function getinstallinfo() {
|
||||
# Pull from file
|
||||
INSTALLVARS=$(sudo salt "$MINION_ID" cp.get_file_str /opt/so/install.txt --out=newline_values_only)
|
||||
source <(echo $INSTALLVARS)
|
||||
}
|
||||
|
||||
function testminion() {
|
||||
# Always run on the host, since this is going to be the manager of a distributed grid, or an eval/standalone.
|
||||
# Distributed managers must run this in order for the sensor nodes to have access to the so-tcpreplay image.
|
||||
so-test
|
||||
result=$?
|
||||
|
||||
# If this so-minion script is not running on the given minion ID, run so-test remotely on the sensor as well
|
||||
local_id=$(lookup_grain id)
|
||||
if [[ ! "$local_id" =~ "${MINION_ID}_" ]]; then
|
||||
salt "$MINION_ID" cmd.run 'so-test'
|
||||
result=$?
|
||||
fi
|
||||
|
||||
exit $result
|
||||
}
|
||||
|
||||
function listminions() {
|
||||
salt-key list -F --out=json
|
||||
exit $?
|
||||
}
|
||||
|
||||
function rejectminion() {
|
||||
salt-key -y -r $MINION_ID
|
||||
exit $?
|
||||
}
|
||||
|
||||
function acceptminion() {
|
||||
salt-key -y -a $MINION_ID
|
||||
}
|
||||
|
||||
function deleteminion() {
|
||||
salt-key -y -d $MINION_ID
|
||||
}
|
||||
|
||||
function deleteminionfiles () {
|
||||
rm -f $PILLARFILE
|
||||
rm -f $ADVPILLARFILE
|
||||
}
|
||||
|
||||
# Create the minion file
|
||||
function create_minion_files() {
|
||||
mkdir -p /opt/so/saltstack/local/pillar/minions
|
||||
touch $ADVPILLARFILE
|
||||
if [ -f "$PILLARFILE" ]; then
|
||||
rm $PILLARFILE
|
||||
fi
|
||||
}
|
||||
|
||||
# Add Elastic settings to the minion file
|
||||
function add_elastic_to_minion() {
|
||||
printf '%s\n'\
|
||||
"elasticsearch:"\
|
||||
" esheap: '$ES_HEAP_SIZE'"\
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
# Add IDH Services info to the minion file
|
||||
function add_idh_to_minion() {
|
||||
printf '%s\n'\
|
||||
"idh:"\
|
||||
" restrict_management_ip: $IDH_MGTRESTRICT"\
|
||||
" services:" >> "$PILLARFILE"
|
||||
IFS=',' read -ra IDH_SERVICES_ARRAY <<< "$IDH_SERVICES"
|
||||
for service in ${IDH_SERVICES_ARRAY[@]}; do
|
||||
echo " - $service" | tr '[:upper:]' '[:lower:]' | tr -d '"' >> "$PILLARFILE"
|
||||
done
|
||||
}
|
||||
|
||||
function add_logstash_to_minion() {
|
||||
# Create the logstash advanced pillar
|
||||
printf '%s\n'\
|
||||
"logstash_settings:"\
|
||||
" ls_host: '$LSHOSTNAME'"\
|
||||
" ls_pipeline_batch_size: 125"\
|
||||
" ls_input_threads: 1"\
|
||||
" lsheap: $LSHEAP"\
|
||||
" ls_pipeline_workers: $CPUCORES"\
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
# Analyst Workstation
|
||||
function add_analyst_to_minion() {
|
||||
printf '%s\n'\
|
||||
"host:"\
|
||||
" mainint: '$MNIC'"\
|
||||
"workstation:"\
|
||||
" gui:"\
|
||||
" enabled: true"\
|
||||
"sensoroni:"\
|
||||
" node_description: '${NODE_DESCRIPTION//\'/''}'" >> $PILLARFILE
|
||||
}
|
||||
|
||||
# Add basic host info to the minion file
|
||||
function add_host_to_minion() {
|
||||
printf '%s\n'\
|
||||
"host:"\
|
||||
" mainip: '$MAINIP'"\
|
||||
" mainint: '$MNIC'" >> $PILLARFILE
|
||||
}
|
||||
|
||||
# Add sensoroni specific information - Can we pull node_adrees from the host pillar?
|
||||
function add_sensoroni_to_minion() {
|
||||
|
||||
printf '%s\n'\
|
||||
"sensoroni:"\
|
||||
" node_description: '${NODE_DESCRIPTION//\'/''}'"\
|
||||
" " >> $PILLARFILE
|
||||
}
|
||||
|
||||
# Sensor settings for the minion pillar
|
||||
function add_sensor_to_minion() {
|
||||
echo "sensor:" >> $PILLARFILE
|
||||
echo " interface: '$INTERFACE'" >> $PILLARFILE
|
||||
echo " mtu: 9000" >> $PILLARFILE
|
||||
echo "zeek:" >> $PILLARFILE
|
||||
echo " config:" >> $PILLARFILE
|
||||
echo " node:" >> $PILLARFILE
|
||||
echo " lb_procs: '$CORECOUNT'" >> $PILLARFILE
|
||||
echo "suricata:" >> $PILLARFILE
|
||||
echo " config:" >> $PILLARFILE
|
||||
echo " af-packet:" >> $PILLARFILE
|
||||
echo " threads: '$CORECOUNT'" >> $PILLARFILE
|
||||
echo "pcap:" >> $PILLARFILE
|
||||
echo " enabled: True" >> $PILLARFILE
|
||||
}
|
||||
|
||||
function updateMine() {
|
||||
salt "$MINION_ID" mine.send network.ip_addrs interface="$MNIC"
|
||||
}
|
||||
function apply_ES_state() {
|
||||
salt-call state.apply elasticsearch concurrent=True
|
||||
}
|
||||
function createEVAL() {
|
||||
add_elastic_to_minion
|
||||
add_logstash_to_minion
|
||||
add_sensor_to_minion
|
||||
}
|
||||
|
||||
function createIDH() {
|
||||
add_idh_to_minion
|
||||
}
|
||||
|
||||
function createIMPORT() {
|
||||
add_elastic_to_minion
|
||||
add_logstash_to_minion
|
||||
add_sensor_to_minion
|
||||
}
|
||||
|
||||
function createHEAVYNODE() {
|
||||
add_elastic_to_minion
|
||||
add_logstash_to_minion
|
||||
add_sensor_to_minion
|
||||
}
|
||||
|
||||
function createMANAGER() {
|
||||
add_elastic_to_minion
|
||||
add_logstash_to_minion
|
||||
}
|
||||
|
||||
function createMANAGERSEARCH() {
|
||||
add_elastic_to_minion
|
||||
add_logstash_to_minion
|
||||
}
|
||||
|
||||
function createSENSOR() {
|
||||
add_sensor_to_minion
|
||||
}
|
||||
|
||||
function createSEARCHNODE() {
|
||||
add_elastic_to_minion
|
||||
add_logstash_to_minion
|
||||
updateMine
|
||||
apply_ES_state
|
||||
}
|
||||
|
||||
function createSTANDALONE() {
|
||||
add_elastic_to_minion
|
||||
add_logstash_to_minion
|
||||
add_sensor_to_minion
|
||||
}
|
||||
|
||||
function testConnection() {
|
||||
retry 15 3 "salt '$MINION_ID' test.ping" True
|
||||
local ret=$?
|
||||
if [[ $ret != 0 ]]; then
|
||||
echo "The Minion has been accepted but is not online. Try again later"
|
||||
echo "Deleting the key"
|
||||
deleteminion
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ "$OPERATION" = 'list' ]]; then
|
||||
listminions
|
||||
fi
|
||||
|
||||
if [[ "$OPERATION" = 'delete' ]]; then
|
||||
deleteminionfiles
|
||||
deleteminion
|
||||
fi
|
||||
|
||||
if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
|
||||
# Skip this if its setup
|
||||
if [ $OPERATION != 'setup' ]; then
|
||||
# Accept the salt key
|
||||
acceptminion
|
||||
# Test to see if the minion was accepted
|
||||
testConnection
|
||||
# Pull the info from the file to build what is needed
|
||||
getinstallinfo
|
||||
fi
|
||||
# Check to see if nodetype is set
|
||||
if [ -z $NODETYPE ]; then
|
||||
echo "No node type specified"
|
||||
exit 1
|
||||
fi
|
||||
create_minion_files
|
||||
add_host_to_minion
|
||||
add_sensoroni_to_minion
|
||||
create$NODETYPE
|
||||
echo "Minion file created for $MINION_ID"
|
||||
fi
|
||||
|
||||
if [[ "$OPERATION" = 'test' ]]; then
|
||||
testminion
|
||||
fi
|
||||
@@ -24,6 +24,7 @@ if [ $# -ge 1 ]; then
|
||||
|
||||
case $1 in
|
||||
"steno") docker stop so-steno && docker rm so-steno && salt-call state.apply pcap queue=True;;
|
||||
"elastic-fleet") docker stop so-elastic-fleet && docker rm so-elastic-fleet && salt-call state.apply elasticfleet queue=True;;
|
||||
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
|
||||
esac
|
||||
else
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
argstr=""
|
||||
for arg in "$@"; do
|
||||
argstr="${argstr} \"${arg}\""
|
||||
done
|
||||
|
||||
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"
|
||||
@@ -24,6 +24,7 @@ if [ $# -ge 1 ]; then
|
||||
case $1 in
|
||||
"all") salt-call state.highstate queue=True;;
|
||||
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
|
||||
"elastic-fleet") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply elasticfleet queue=True; fi ;;
|
||||
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
||||
esac
|
||||
else
|
||||
|
||||
@@ -170,7 +170,8 @@ def main():
|
||||
if "-h" in options or "--help" in options or "-?" in options:
|
||||
showUsage(options, None)
|
||||
|
||||
if os.environ["USER"] != "root":
|
||||
proc = subprocess.run(['id', '-u'], stdout=subprocess.PIPE, encoding="utf-8")
|
||||
if proc.stdout.strip() != "0":
|
||||
fail("This program must be run as root")
|
||||
|
||||
console = Console()
|
||||
|
||||
@@ -61,7 +61,7 @@ if [ -f "$pillar_file" ]; then
|
||||
|
||||
reboot;
|
||||
else
|
||||
echo "There was an issue applying the workstation state. Please review the log above or at /opt/so/logs/salt/minion."
|
||||
echo "There was an issue applying the workstation state. Please review the log above or at /opt/so/log/salt/minion."
|
||||
fi
|
||||
else # workstation is already added
|
||||
echo "The workstation pillar already exists in $pillar_file."
|
||||
81
salt/curator/config.sls
Normal file
81
salt/curator/config.sls
Normal file
@@ -0,0 +1,81 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from "curator/map.jinja" import CURATORMERGED %}
|
||||
|
||||
# Create the group
|
||||
curatorgroup:
|
||||
group.present:
|
||||
- name: curator
|
||||
- gid: 934
|
||||
|
||||
# Add user
|
||||
curator:
|
||||
user.present:
|
||||
- uid: 934
|
||||
- gid: 934
|
||||
- home: /opt/so/conf/curator
|
||||
- createhome: False
|
||||
|
||||
# Create the log directory
|
||||
curlogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/curator
|
||||
- user: 934
|
||||
- group: 939
|
||||
|
||||
curactiondir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/curator/action
|
||||
- user: 934
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
actionconfs:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/curator/action
|
||||
- source: salt://curator/files/action
|
||||
- user: 934
|
||||
- group: 939
|
||||
- template: jinja
|
||||
- defaults:
|
||||
CURATORMERGED: {{ CURATORMERGED.elasticsearch.index_settings }}
|
||||
|
||||
curconf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/curator/curator.yml
|
||||
- source: salt://curator/files/curator.yml
|
||||
- user: 934
|
||||
- group: 939
|
||||
- mode: 660
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
|
||||
curator_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://curator/tools/sbin
|
||||
- user: 934
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
curator_sbin_jinja:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://curator/tools/sbin_jinja
|
||||
- user: 934
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,98 +1,100 @@
|
||||
elasticsearch:
|
||||
index_settings:
|
||||
logs-import-so:
|
||||
close: 73000
|
||||
delete: 73001
|
||||
logs-strelka-so:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-suricata-so:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-syslog-so:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-zeek-so:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-elastic_agent-metricbeat-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-elastic_agent-osquerybeat-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-elastic_agent-fleet_server-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-elastic_agent-filebeat-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-elastic_agent-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-system-auth-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-system-application-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-system-security-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-system-system-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-system-syslog-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-windows-powershell-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-windows-sysmon_operational-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-beats:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-elasticsearch:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-firewall:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-ids:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-import:
|
||||
close: 73000
|
||||
delete: 73001
|
||||
so-kratos:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-kibana:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-logstash:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-netflow:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-osquery:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-ossec:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-redis:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-strelka:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-syslog:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-zeek:
|
||||
close: 30
|
||||
delete: 365
|
||||
curator:
|
||||
enabled: False
|
||||
elasticsearch:
|
||||
index_settings:
|
||||
logs-import-so:
|
||||
close: 73000
|
||||
delete: 73001
|
||||
logs-strelka-so:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-suricata-so:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-syslog-so:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-zeek-so:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-elastic_agent-metricbeat-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-elastic_agent-osquerybeat-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-elastic_agent-fleet_server-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-elastic_agent-filebeat-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-elastic_agent-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-system-auth-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-system-application-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-system-security-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-system-system-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-system-syslog-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-windows-powershell-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
logs-windows-sysmon_operational-default:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-beats:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-elasticsearch:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-firewall:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-ids:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-import:
|
||||
close: 73000
|
||||
delete: 73001
|
||||
so-kratos:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-kibana:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-logstash:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-netflow:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-osquery:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-ossec:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-redis:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-strelka:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-syslog:
|
||||
close: 30
|
||||
delete: 365
|
||||
so-zeek:
|
||||
close: 30
|
||||
delete: 365
|
||||
|
||||
35
salt/curator/disabled.sls
Normal file
35
salt/curator/disabled.sls
Normal file
@@ -0,0 +1,35 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- curator.sostatus
|
||||
|
||||
so-curator:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-curator_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-curator$
|
||||
|
||||
so-curator-cluster-close:
|
||||
cron.absent:
|
||||
- identifier: so-curator-cluster-close
|
||||
|
||||
so-curator-cluster-delete:
|
||||
cron.absent:
|
||||
- identifier: so-curator-cluster-delete
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
88
salt/curator/enabled.sls
Normal file
88
salt/curator/enabled.sls
Normal file
@@ -0,0 +1,88 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
|
||||
include:
|
||||
- curator.config
|
||||
- curator.sostatus
|
||||
|
||||
so-curator:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-curator:{{ GLOBALS.so_version }}
|
||||
- start: True
|
||||
- hostname: curator
|
||||
- name: so-curator
|
||||
- user: curator
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-curator'].ip }}
|
||||
- interactive: True
|
||||
- tty: True
|
||||
- binds:
|
||||
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
|
||||
- /opt/so/conf/curator/action/:/etc/curator/action:ro
|
||||
- /opt/so/log/curator:/var/log/curator:rw
|
||||
{% if DOCKER.containers['so-curator'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-curator'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-curator'].extra_hosts %}
|
||||
- extra_hosts:
|
||||
{% for XTRAHOST in DOCKER.containers['so-curator'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-curator'].extra_env %}
|
||||
- environment:
|
||||
{% for XTRAENV in DOCKER.containers['so-curator'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- require:
|
||||
- file: actionconfs
|
||||
- file: curconf
|
||||
- file: curlogdir
|
||||
- watch:
|
||||
- file: curconf
|
||||
|
||||
delete_so-curator_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-curator$
|
||||
|
||||
so-curator-cluster-close:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-cluster-close > /opt/so/log/curator/cron-close.log 2>&1
|
||||
- identifier: so-curator-cluster-close
|
||||
- user: root
|
||||
- minute: '2'
|
||||
- hour: '*/1'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
so-curator-cluster-delete:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-cluster-delete > /opt/so/log/curator/cron-cluster-delete.log 2>&1
|
||||
- identifier: so-curator-cluster-delete
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
- hour: '*'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,85 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICDEFAULTS %}
|
||||
{%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%}
|
||||
{%- set RETENTION = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) -%}
|
||||
|
||||
LOG="/opt/so/log/curator/so-curator-cluster-delete.log"
|
||||
LOG_SIZE_LIMIT=$(/usr/sbin/so-elasticsearch-cluster-space-total {{ RETENTION.retention_pct}})
|
||||
|
||||
overlimit() {
|
||||
[[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt "${LOG_SIZE_LIMIT}" ]]
|
||||
}
|
||||
|
||||
closedindices() {
|
||||
# If we can't query Elasticsearch, then immediately return false.
|
||||
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close > /dev/null 2>&1
|
||||
[ $? -eq 1 ] && return false
|
||||
# First, get the list of closed indices using _cat/indices?h=index,status | grep close | awk '{print $1}'.
|
||||
# Next, filter out any so-case indices.
|
||||
# Finally, use grep's -q option to return true if there are any remaining logstash-, so-, or .ds-logs- indices.
|
||||
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -q -E "(logstash-|so-|.ds-logs-)"
|
||||
}
|
||||
|
||||
# Check for 2 conditions:
|
||||
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
|
||||
# 2. Are there any closed indices that we can delete?
|
||||
# If both conditions are true, keep on looping until one of the conditions is false.
|
||||
|
||||
while overlimit && closedindices; do
|
||||
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep close | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
|
||||
# We iterate through the closed indices
|
||||
for CLOSED_INDEX in ${CLOSED_INDICES}; do
|
||||
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
|
||||
# To do so, we need to identify to which data stream this index is associated
|
||||
# We extract the data stream name using the pattern below
|
||||
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
|
||||
DATASTREAM=$(echo "${CLOSED_INDEX}" | grep -oE "$DATASTREAM_PATTERN")
|
||||
# We look up the data stream, and determine the write index
|
||||
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
|
||||
# We make sure we are not trying to delete a write index
|
||||
if [ "${CLOSED_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
|
||||
# This should not be a write index, so we should be allowed to delete it
|
||||
/usr/sbin/so-elasticsearch-query ${CLOSED_INDEX} -XDELETE
|
||||
# Finally, write a log entry that says we deleted it.
|
||||
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${CLOSED_INDEX} deleted ..." >> ${LOG}
|
||||
fi
|
||||
if ! overlimit; then
|
||||
exit
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
while overlimit; do
|
||||
|
||||
# We need to determine the oldest open index.
|
||||
# First, get the list of open indices using _cat/indices?h=index,status | grep open | awk '{print $1}'.
|
||||
# Next, filter out any so-case indices and only select the remaining logstash-, so-, or .ds-logs- indices.
|
||||
# Then, sort by date by telling sort to use hyphen as delimiter and sort on the third field.
|
||||
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep open | awk '{print $1}' | grep -v "so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
|
||||
# We iterate through the open indices
|
||||
for OPEN_INDEX in ${OPEN_INDICES}; do
|
||||
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
|
||||
# To do so, we need to identify to which data stream this index is associated
|
||||
# We extract the data stream name using the pattern below
|
||||
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
|
||||
DATASTREAM=$(echo "${OPEN_INDEX}" | grep -oE "$DATASTREAM_PATTERN")
|
||||
# We look up the data stream, and determine the write index
|
||||
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
|
||||
# We make sure we are not trying to delete a write index
|
||||
if [ "${OPEN_INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
|
||||
# This should not be a write index, so we should be allowed to delete it
|
||||
/usr/sbin/so-elasticsearch-query ${OPEN_INDEX} -XDELETE
|
||||
# Finally, write a log entry that says we deleted it.
|
||||
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT} GB) - Index ${OPEN_INDEX} deleted ..." >> ${LOG}
|
||||
fi
|
||||
if ! overlimit; then
|
||||
exit
|
||||
fi
|
||||
done
|
||||
done
|
||||
@@ -4,9 +4,9 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% if grains['role'] in ['so-searchnode', 'so-heavynode'] %}
|
||||
{% if GLOBALS.role in ['so-searchnode', 'so-heavynode'] %}
|
||||
{%- set elasticsearch = GLOBALS.node_ip -%}
|
||||
{% elif grains['role'] in ['so-eval', 'so-managersearch', 'so-standalone', 'so-manager'] %}
|
||||
{% elif GLOBALS.role in ['so-eval', 'so-managersearch', 'so-standalone', 'so-manager'] %}
|
||||
{%- set elasticsearch = GLOBALS.manager_ip -%}
|
||||
{%- endif %}
|
||||
{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
@@ -30,10 +30,8 @@ elasticsearch:
|
||||
id:
|
||||
api_key:
|
||||
master_only: False
|
||||
{%- if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %}
|
||||
username: "{{ ES_USER }}"
|
||||
password: "{{ ES_PASS }}"
|
||||
{%- endif %}
|
||||
|
||||
logging:
|
||||
loglevel: INFO
|
||||
|
||||
@@ -3,153 +3,11 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from "curator/map.jinja" import CURATOROPTIONS %}
|
||||
{% from "curator/map.jinja" import CURATORMERGED %}
|
||||
{% set REMOVECURATORCRON = False %}
|
||||
|
||||
# Curator
|
||||
# Create the group
|
||||
curatorgroup:
|
||||
group.present:
|
||||
- name: curator
|
||||
- gid: 934
|
||||
|
||||
# Add user
|
||||
curator:
|
||||
user.present:
|
||||
- uid: 934
|
||||
- gid: 934
|
||||
- home: /opt/so/conf/curator
|
||||
- createhome: False
|
||||
|
||||
# Create the log directory
|
||||
curlogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/curator
|
||||
- user: 934
|
||||
- group: 939
|
||||
|
||||
curactiondir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/curator/action
|
||||
- user: 934
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
actionconfs:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/curator/action
|
||||
- source: salt://curator/files/action
|
||||
- user: 934
|
||||
- group: 939
|
||||
- template: jinja
|
||||
- defaults:
|
||||
CURATORMERGED: {{ CURATORMERGED }}
|
||||
|
||||
curconf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/curator/curator.yml
|
||||
- source: salt://curator/files/curator.yml
|
||||
- user: 934
|
||||
- group: 939
|
||||
- mode: 660
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
|
||||
curclusterclose:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-curator-cluster-close
|
||||
- source: salt://curator/files/bin/so-curator-cluster-close
|
||||
- user: 934
|
||||
- group: 939
|
||||
- mode: 755
|
||||
- template: jinja
|
||||
|
||||
curclusterdelete:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-curator-cluster-delete
|
||||
- source: salt://curator/files/bin/so-curator-cluster-delete
|
||||
- user: 934
|
||||
- group: 939
|
||||
- mode: 755
|
||||
|
||||
curclusterdeletedelete:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-curator-cluster-delete-delete
|
||||
- source: salt://curator/files/bin/so-curator-cluster-delete-delete
|
||||
- user: 934
|
||||
- group: 939
|
||||
- mode: 755
|
||||
- template: jinja
|
||||
|
||||
so-curator:
|
||||
docker_container.{{ CURATOROPTIONS.status }}:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-curator:{{ GLOBALS.so_version }}
|
||||
- start: {{ CURATOROPTIONS.start }}
|
||||
- hostname: curator
|
||||
- name: so-curator
|
||||
- user: curator
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-curator'].ip }}
|
||||
- interactive: True
|
||||
- tty: True
|
||||
- binds:
|
||||
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
|
||||
- /opt/so/conf/curator/action/:/etc/curator/action:ro
|
||||
- /opt/so/log/curator:/var/log/curator:rw
|
||||
- require:
|
||||
- file: actionconfs
|
||||
- file: curconf
|
||||
- file: curlogdir
|
||||
- watch:
|
||||
- file: curconf
|
||||
|
||||
append_so-curator_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-curator
|
||||
- unless: grep -q so-curator /opt/so/conf/so-status/so-status.conf
|
||||
{% if not CURATOROPTIONS.start %}
|
||||
so-curator_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-curator$
|
||||
{% else %}
|
||||
delete_so-curator_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-curator$
|
||||
{% endif %}
|
||||
|
||||
so-curatorclusterclose:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-cluster-close > /opt/so/log/curator/cron-close.log 2>&1
|
||||
- user: root
|
||||
- minute: '2'
|
||||
- hour: '*/1'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
|
||||
so-curatorclusterdeletecron:
|
||||
cron.present:
|
||||
- name: /usr/sbin/so-curator-cluster-delete > /opt/so/log/curator/cron-cluster-delete.log 2>&1
|
||||
- user: root
|
||||
- minute: '*/5'
|
||||
- hour: '*'
|
||||
- daymonth: '*'
|
||||
- month: '*'
|
||||
- dayweek: '*'
|
||||
{% from 'curator/map.jinja' import CURATORMERGED %}
|
||||
|
||||
include:
|
||||
{% if CURATORMERGED.enabled %}
|
||||
- curator.enabled
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
- curator.disabled
|
||||
{% endif %}
|
||||
|
||||
@@ -1,18 +1,7 @@
|
||||
{% set CURATOROPTIONS = {} %}
|
||||
{% set ENABLED = salt['pillar.get']('curator:enabled', True) %}
|
||||
{% do CURATOROPTIONS.update({'manage_sostatus': True}) %}
|
||||
|
||||
# don't start the docker container if curator is disabled via pillar
|
||||
{% if not ENABLED %}
|
||||
{% do CURATOROPTIONS.update({'start': False}) %}
|
||||
{% do CURATOROPTIONS.update({'status': 'absent'}) %}
|
||||
{% if (TRUECLUSTER and grains.id.split('_')|last == 'searchnode') or (not TRUECLUSTER and grains.id.split('_')|last == 'manager') %}
|
||||
{% do CURATOROPTIONS.update({'manage_sostatus': False}) %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{% do CURATOROPTIONS.update({'start': True}) %}
|
||||
{% do CURATOROPTIONS.update({'status': 'running'}) %}
|
||||
{% endif %}
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'curator/defaults.yaml' as CURATORDEFAULTS %}
|
||||
{% set CURATORMERGED = salt['pillar.get']('elasticsearch:index_settings', CURATORDEFAULTS.elasticsearch.index_settings, merge=true) %}
|
||||
{% set CURATORMERGED = salt['pillar.get']('curator', CURATORDEFAULTS.curator, merge=true) %}
|
||||
|
||||
108
salt/curator/soc_curator.yaml
Normal file
108
salt/curator/soc_curator.yaml
Normal file
@@ -0,0 +1,108 @@
|
||||
curator:
|
||||
enabled:
|
||||
description: You can enable or disable Curator.
|
||||
helpLink: curator.html
|
||||
elasticsearch:
|
||||
index_settings:
|
||||
logs-import-so:
|
||||
close: &close
|
||||
description: Age, in days, when Curator closes the index.
|
||||
helpLink: curator.html
|
||||
forcedType: int
|
||||
delete: &delete
|
||||
description: Age, in days, when Curator deletes the index.
|
||||
helpLink: curator.html
|
||||
forcedType: int
|
||||
logs-strelka-so:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-suricata-so:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-syslog-so:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-zeek-so:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-elastic_agent-metricbeat-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-elastic_agent-osquerybeat-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-elastic_agent-fleet_server-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-elastic_agent-filebeat-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-elastic_agent-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-system-auth-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-system-application-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-system-security-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-system-system-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-system-syslog-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-windows-powershell-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
logs-windows-sysmon_operational-default:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-beats:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-elasticsearch:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-firewall:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-ids:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-import:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-kratos:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-kibana:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-logstash:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-netflow:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-osquery:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-ossec:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-redis:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-strelka:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-syslog:
|
||||
close: *close
|
||||
delete: *delete
|
||||
so-zeek:
|
||||
close: *close
|
||||
delete: *delete
|
||||
21
salt/curator/sostatus.sls
Normal file
21
salt/curator/sostatus.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-curator_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-curator
|
||||
- unless: grep -q so-curator /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
0
salt/common/tools/sbin/so-curator-restart → salt/curator/tools/sbin/so-curator-restart
Executable file → Normal file
0
salt/common/tools/sbin/so-curator-restart → salt/curator/tools/sbin/so-curator-restart
Executable file → Normal file
0
salt/common/tools/sbin/so-curator-start → salt/curator/tools/sbin/so-curator-start
Executable file → Normal file
0
salt/common/tools/sbin/so-curator-start → salt/curator/tools/sbin/so-curator-start
Executable file → Normal file
0
salt/common/tools/sbin/so-curator-stop → salt/curator/tools/sbin/so-curator-stop
Executable file → Normal file
0
salt/common/tools/sbin/so-curator-stop → salt/curator/tools/sbin/so-curator-stop
Executable file → Normal file
67
salt/curator/tools/sbin_jinja/so-curator-cluster-delete-delete
Executable file
67
salt/curator/tools/sbin_jinja/so-curator-cluster-delete-delete
Executable file
@@ -0,0 +1,67 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICDEFAULTS %}
|
||||
{%- set ELASTICSEARCH_HOST = GLOBALS.node_ip -%}
|
||||
{%- set RETENTION = salt['pillar.get']('elasticsearch:retention', ELASTICDEFAULTS.elasticsearch.retention, merge=true) -%}
|
||||
|
||||
LOG="/opt/so/log/curator/so-curator-cluster-delete.log"
|
||||
ALERT_LOG="/opt/so/log/curator/alert.log"
|
||||
LOG_SIZE_LIMIT_GB=$(/usr/sbin/so-elasticsearch-cluster-space-total {{ RETENTION.retention_pct}})
|
||||
LOG_SIZE_LIMIT=$(( "$LOG_SIZE_LIMIT_GB" * 1000 * 1000 * 1000 ))
|
||||
ITERATION=0
|
||||
MAX_ITERATIONS=10
|
||||
|
||||
overlimit() {
|
||||
[[ $(/usr/sbin/so-elasticsearch-cluster-space-used) -gt ${LOG_SIZE_LIMIT} ]]
|
||||
}
|
||||
|
||||
###########################
|
||||
# Check for 2 conditions: #
|
||||
###########################
|
||||
# 1. Check if Elasticsearch indices are using more disk space than LOG_SIZE_LIMIT
|
||||
# 2. Check if the maximum number of iterations - MAX_ITERATIONS - has been exceeded. If so, exit.
|
||||
# Closed indices will be deleted first. If we are able to bring disk space under LOG_SIZE_LIMIT, or the number of iterations has exceeded the maximum allowed number of iterations, we will break out of the loop.
|
||||
|
||||
while overlimit && [[ $ITERATION -lt $MAX_ITERATIONS ]]; do
|
||||
|
||||
# If we can't query Elasticsearch, then immediately return false.
|
||||
/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status > /dev/null 2>&1
|
||||
[ $? -eq 1 ] && echo "$(date) - Could not query Elasticsearch." >> ${LOG} && exit
|
||||
|
||||
# We iterate through the closed and open indices
|
||||
CLOSED_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'close$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
|
||||
OPEN_INDICES=$(/usr/sbin/so-elasticsearch-query _cat/indices?h=index,status | grep 'open$' | awk '{print $1}' | grep -vE "playbook|so-case" | grep -E "(logstash-|so-|.ds-logs-)" | sort -t- -k3)
|
||||
|
||||
for INDEX in ${CLOSED_INDICES} ${OPEN_INDICES}; do
|
||||
# Now that we've sorted the indices from oldest to newest, we need to check each index to see if it is assigned as the current write index for a data stream
|
||||
# To do so, we need to identify to which data stream this index is associated
|
||||
# We extract the data stream name using the pattern below
|
||||
DATASTREAM_PATTERN="logs-[a-zA-Z_.]+-[a-zA-Z_.]+"
|
||||
DATASTREAM=$(echo "${INDEX}" | grep -oE "$DATASTREAM_PATTERN")
|
||||
# We look up the data stream, and determine the write index. If there is only one backing index, we delete the entire data stream
|
||||
BACKING_INDICES=$(/usr/sbin/so-elasticsearch-query _data_stream/${DATASTREAM} | jq -r '.data_streams[0].indices | length')
|
||||
if [ "$BACKING_INDICES" -gt 1 ]; then
|
||||
CURRENT_WRITE_INDEX=$(/usr/sbin/so-elasticsearch-query _data_stream/$DATASTREAM | jq -r .data_streams[0].indices[-1].index_name)
|
||||
# We make sure we are not trying to delete a write index
|
||||
if [ "${INDEX}" != "${CURRENT_WRITE_INDEX}" ]; then
|
||||
# This should not be a write index, so we should be allowed to delete it
|
||||
printf "\n$(date) - Used disk space exceeds LOG_SIZE_LIMIT (${LOG_SIZE_LIMIT_GB} GB) - Deleting ${INDEX} index...\n" >> ${LOG}
|
||||
/usr/sbin/so-elasticsearch-query ${INDEX} -XDELETE >> ${LOG} 2>&1
|
||||
fi
|
||||
fi
|
||||
if ! overlimit ; then
|
||||
exit
|
||||
fi
|
||||
((ITERATION++))
|
||||
done
|
||||
if [[ $ITERATION -ge $MAX_ITERATIONS ]]; then
|
||||
alert_id=$(uuidgen)
|
||||
printf "\n$(date) -> Maximum iteration limit reached ($MAX_ITERATIONS). Unable to bring disk below threshold. Writing alert ($alert_id) to ${ALERT_LOG}\n" >> ${LOG}
|
||||
printf "\n$(date),$alert_id,Maximum iteration limit reached ($MAX_ITERATIONS). Unable to bring disk below threshold.\n" >> ${ALERT_LOG}
|
||||
fi
|
||||
done
|
||||
@@ -8,93 +8,193 @@ docker:
|
||||
final_octet: 20
|
||||
port_bindings:
|
||||
- 0.0.0.0:5000:5000
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-elastic-fleet':
|
||||
final_octet: 21
|
||||
port_bindings:
|
||||
- 0.0.0.0:8220:8220/tcp
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-elasticsearch':
|
||||
final_octet: 22
|
||||
port_bindings:
|
||||
- 0.0.0.0:9200:9200/tcp
|
||||
- 0.0.0.0:9300:9300/tcp
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-idstools':
|
||||
final_octet: 25
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-influxdb':
|
||||
final_octet: 26
|
||||
port_bindings:
|
||||
- 0.0.0.0:8086:8086
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-kibana':
|
||||
final_octet: 27
|
||||
port_bindings:
|
||||
- 0.0.0.0:5601:5601
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-kratos':
|
||||
final_octet: 28
|
||||
port_bindings:
|
||||
- 0.0.0.0:4433:4433
|
||||
- 0.0.0.0:4434:4434
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-logstash':
|
||||
final_octet: 29
|
||||
port_bindings:
|
||||
- 0.0.0.0:3765:3765
|
||||
- 0.0.0.0:5044:5044
|
||||
- 0.0.0.0:5055:5055
|
||||
- 0.0.0.0:5056:5056
|
||||
- 0.0.0.0:5644:5644
|
||||
- 0.0.0.0:6050:6050
|
||||
- 0.0.0.0:6051:6051
|
||||
- 0.0.0.0:6052:6052
|
||||
- 0.0.0.0:6053:6053
|
||||
- 0.0.0.0:9600:9600
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-mysql':
|
||||
final_octet: 30
|
||||
port_bindings:
|
||||
- 0.0.0.0:3306:3306
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-nginx':
|
||||
final_octet: 31
|
||||
port_bindings:
|
||||
- 80:80
|
||||
- 443:443
|
||||
- 8443:8443
|
||||
- 7788:7788
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-playbook':
|
||||
final_octet: 32
|
||||
port_bindings:
|
||||
- 0.0.0.0:3000:3000
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-redis':
|
||||
final_octet: 33
|
||||
port_bindings:
|
||||
- 0.0.0.0:6379:6379
|
||||
- 0.0.0.0:9696:9696
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-sensoroni':
|
||||
final_octet: 99
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-soc':
|
||||
final_octet: 34
|
||||
port_bindings:
|
||||
- 0.0.0.0:9822:9822
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-soctopus':
|
||||
final_octet: 35
|
||||
port_bindings:
|
||||
- 0.0.0.0:7000:7000
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-strelka-backend':
|
||||
final_octet: 36
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-strelka-filestream':
|
||||
final_octet: 37
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-strelka-frontend':
|
||||
final_octet: 38
|
||||
port_bindings:
|
||||
- 0.0.0.0:57314:57314
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-strelka-manager':
|
||||
final_octet: 39
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-strelka-gatekeeper':
|
||||
final_octet: 40
|
||||
port_bindings:
|
||||
- 0.0.0.0:6381:6379
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-strelka-coordinator':
|
||||
final_octet: 41
|
||||
port_bindings:
|
||||
- 0.0.0.0:6380:6379
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-elastalert':
|
||||
final_octet: 42
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-curator':
|
||||
final_octet: 43
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-elastic-fleet-package-registry':
|
||||
final_octet: 44
|
||||
port_bindings:
|
||||
- 0.0.0.0:8080:8080/tcp
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-idh':
|
||||
final_octet: 45
|
||||
final_octet: 45
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-telegraf':
|
||||
final_octet: 99
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-steno':
|
||||
final_octet: 99
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-suricata':
|
||||
final_octet: 99
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-zeek':
|
||||
final_octet: 99
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
@@ -26,10 +26,10 @@ dockerheldpackages:
|
||||
dockerheldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.6.18-3.1.el9
|
||||
- docker-ce: 23.0.1-1.el9
|
||||
- docker-ce-cli: 23.0.1-1.el9
|
||||
- docker-ce-rootless-extras: 23.0.1-1.el9
|
||||
- containerd.io: 1.6.20-3.1.el9
|
||||
- docker-ce: 23.0.5-1.el9
|
||||
- docker-ce-cli: 23.0.5-1.el9
|
||||
- docker-ce-rootless-extras: 23.0.5-1.el9
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% endif %}
|
||||
|
||||
@@ -28,6 +28,24 @@ docker:
|
||||
helpLink: docker.html
|
||||
advanced: True
|
||||
multiline: True
|
||||
custom_bind_mounts:
|
||||
description: List of custom local volume bindings.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
extra_hosts:
|
||||
description: List of additional host entries for the container.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
extra_env:
|
||||
description: List of additional ENV entries for the container.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
so-dockerregistry: *dockerOptions
|
||||
so-elastalert: *dockerOptions
|
||||
so-elastic-fleet-package-registry: *dockerOptions
|
||||
|
||||
103
salt/elastalert/config.sls
Normal file
103
salt/elastalert/config.sls
Normal file
@@ -0,0 +1,103 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
{% from 'elastalert/map.jinja' import ELASTALERTMERGED %}
|
||||
|
||||
# Create the group
|
||||
elastagroup:
|
||||
group.present:
|
||||
- name: elastalert
|
||||
- gid: 933
|
||||
|
||||
# Add user
|
||||
elastalert:
|
||||
user.present:
|
||||
- uid: 933
|
||||
- gid: 933
|
||||
- home: /opt/so/conf/elastalert
|
||||
- createhome: False
|
||||
|
||||
elastalogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/elastalert
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastalert_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://elastalert/tools/sbin
|
||||
- user: 933
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
#elastalert_sbin_jinja:
|
||||
# file.recurse:
|
||||
# - name: /usr/sbin
|
||||
# - source: salt://elastalert/tools/sbin_jinja
|
||||
# - user: 933
|
||||
# - group: 939
|
||||
# - file_mode: 755
|
||||
# - template: jinja
|
||||
|
||||
elastarules:
|
||||
file.directory:
|
||||
- name: /opt/so/rules/elastalert
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastaconfdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elastalert
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastasomodulesdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elastalert/modules/so
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastacustmodulesdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elastalert/modules/custom
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastasomodulesync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/elastalert/modules/so
|
||||
- source: salt://elastalert/files/modules/so
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastaconf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elastalert/elastalert_config.yaml
|
||||
- source: salt://elastalert/files/elastalert_config.yaml.jinja
|
||||
- context:
|
||||
elastalert_config: {{ ELASTALERTMERGED.config }}
|
||||
- user: 933
|
||||
- group: 933
|
||||
- mode: 660
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,4 +1,5 @@
|
||||
elastalert:
|
||||
enabled: False
|
||||
config:
|
||||
rules_folder: /opt/elastalert/rules/
|
||||
scan_subdirectories: true
|
||||
@@ -12,9 +13,10 @@ elastalert:
|
||||
es_port: 9200
|
||||
es_conn_timeout: 55
|
||||
max_query_size: 5000
|
||||
eql: true
|
||||
use_ssl: true
|
||||
verify_certs: false
|
||||
writeback_index: elastalert_status
|
||||
writeback_index: elastalert
|
||||
alert_time_limit:
|
||||
days: 2
|
||||
index_settings:
|
||||
|
||||
27
salt/elastalert/disabled.sls
Normal file
27
salt/elastalert/disabled.sls
Normal file
@@ -0,0 +1,27 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- elastalert.sostatus
|
||||
|
||||
so-elastalert:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-elastalert_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-elastalert$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,11 +0,0 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'elastalert/defaults.yaml' as ELASTALERT %}
|
||||
{% set elastalert_pillar = salt['pillar.get']('elastalert:config', {}) %}
|
||||
|
||||
|
||||
{% do ELASTALERT.elastalert.config.update({'es_host': GLOBALS.manager}) %}
|
||||
{% do ELASTALERT.elastalert.config.update({'es_username': pillar.elasticsearch.auth.users.so_elastic_user.user}) %}
|
||||
{% do ELASTALERT.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %}
|
||||
|
||||
{% do salt['defaults.merge'](ELASTALERT.elastalert.config, elastalert_pillar, in_place=True) %}
|
||||
|
||||
74
salt/elastalert/enabled.sls
Normal file
74
salt/elastalert/enabled.sls
Normal file
@@ -0,0 +1,74 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
|
||||
include:
|
||||
- elastalert.config
|
||||
- elastalert.sostatus
|
||||
|
||||
wait_for_elasticsearch:
|
||||
cmd.run:
|
||||
- name: so-elasticsearch-wait
|
||||
|
||||
so-elastalert:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastalert:{{ GLOBALS.so_version }}
|
||||
- hostname: elastalert
|
||||
- name: so-elastalert
|
||||
- user: so-elastalert
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elastalert'].ip }}
|
||||
- detach: True
|
||||
- binds:
|
||||
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro
|
||||
- /opt/so/log/elastalert:/var/log/elastalert:rw
|
||||
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
|
||||
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
|
||||
{% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
{% if DOCKER.containers['so-elastalert'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-elastalert'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-elastalert'].extra_env %}
|
||||
- environment:
|
||||
{% for XTRAENV in DOCKER.containers['so-elastalert'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- require:
|
||||
- cmd: wait_for_elasticsearch
|
||||
- file: elastarules
|
||||
- file: elastalogdir
|
||||
- file: elastacustmodulesdir
|
||||
- file: elastaconf
|
||||
- watch:
|
||||
- file: elastaconf
|
||||
- onlyif:
|
||||
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
|
||||
|
||||
delete_so-elastalert_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-elastalert$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -31,8 +31,8 @@ class PlaybookESAlerter(Alerter):
|
||||
creds = (self.rule['es_username'], self.rule['es_password'])
|
||||
|
||||
payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
|
||||
url = f"https://{self.rule['es_host']}:{self.rule['es_port']}/so-playbook-alerts-{today}/_doc/"
|
||||
url = f"{self.rule['es_hosts']}/so-playbook-alerts-{today}/_doc/"
|
||||
requests.post(url, data=json.dumps(payload), headers=headers, verify=False, auth=creds)
|
||||
|
||||
def get_info(self):
|
||||
return {'type': 'PlaybookESAlerter'}
|
||||
return {'type': 'PlaybookESAlerter'}
|
||||
|
||||
@@ -1,124 +1,13 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'elastalert/elastalert_config.map.jinja' import ELASTALERT as elastalert_config with context %}
|
||||
|
||||
# Create the group
|
||||
elastagroup:
|
||||
group.present:
|
||||
- name: elastalert
|
||||
- gid: 933
|
||||
|
||||
# Add user
|
||||
elastalert:
|
||||
user.present:
|
||||
- uid: 933
|
||||
- gid: 933
|
||||
- home: /opt/so/conf/elastalert
|
||||
- createhome: False
|
||||
|
||||
elastalogdir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/elastalert
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastarules:
|
||||
file.directory:
|
||||
- name: /opt/so/rules/elastalert
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastaconfdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elastalert
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastasomodulesdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elastalert/modules/so
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastacustmodulesdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elastalert/modules/custom
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastasomodulesync:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/elastalert/modules/so
|
||||
- source: salt://elastalert/files/modules/so
|
||||
- user: 933
|
||||
- group: 933
|
||||
- makedirs: True
|
||||
|
||||
elastaconf:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elastalert/elastalert_config.yaml
|
||||
- source: salt://elastalert/files/elastalert_config.yaml.jinja
|
||||
- context:
|
||||
elastalert_config: {{ elastalert_config.elastalert.config }}
|
||||
- user: 933
|
||||
- group: 933
|
||||
- mode: 660
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
|
||||
wait_for_elasticsearch:
|
||||
cmd.run:
|
||||
- name: so-elasticsearch-wait
|
||||
|
||||
so-elastalert:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastalert:{{ GLOBALS.so_version }}
|
||||
- hostname: elastalert
|
||||
- name: so-elastalert
|
||||
- user: so-elastalert
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elastalert'].ip }}
|
||||
- detach: True
|
||||
- binds:
|
||||
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro
|
||||
- /opt/so/log/elastalert:/var/log/elastalert:rw
|
||||
- /opt/so/conf/elastalert/modules/:/opt/elastalert/modules/:ro
|
||||
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
- require:
|
||||
- cmd: wait_for_elasticsearch
|
||||
- file: elastarules
|
||||
- file: elastalogdir
|
||||
- file: elastacustmodulesdir
|
||||
- file: elastaconf
|
||||
- watch:
|
||||
- file: elastaconf
|
||||
- onlyif:
|
||||
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
|
||||
|
||||
|
||||
append_so-elastalert_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-elastalert
|
||||
{% from 'elastalert/map.jinja' import ELASTALERTMERGED %}
|
||||
|
||||
include:
|
||||
{% if ELASTALERTMERGED.enabled %}
|
||||
- elastalert.enabled
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
- elastalert.disabled
|
||||
{% endif %}
|
||||
|
||||
15
salt/elastalert/map.jinja
Normal file
15
salt/elastalert/map.jinja
Normal file
@@ -0,0 +1,15 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'elastalert/defaults.yaml' as ELASTALERTDEFAULTS %}
|
||||
{% set elastalert_pillar = salt['pillar.get']('elastalert:config', {}) %}
|
||||
|
||||
|
||||
{% do ELASTALERTDEFAULTS.elastalert.config.update({'es_hosts': 'https://' + GLOBALS.manager + ':' + ELASTALERTDEFAULTS.elastalert.config.es_port|string}) %}
|
||||
{% do ELASTALERTDEFAULTS.elastalert.config.update({'es_username': pillar.elasticsearch.auth.users.so_elastic_user.user}) %}
|
||||
{% do ELASTALERTDEFAULTS.elastalert.config.update({'es_password': pillar.elasticsearch.auth.users.so_elastic_user.pass}) %}
|
||||
|
||||
{% set ELASTALERTMERGED = salt['pillar.get']('elastalert', ELASTALERTDEFAULTS.elastalert, merge=True) %}
|
||||
@@ -1,4 +1,7 @@
|
||||
elastalert:
|
||||
enabled:
|
||||
description: You can enable or disable Elastalert.
|
||||
helpLink: elastalert.html
|
||||
config:
|
||||
disable_rules_on_error:
|
||||
description: Disable rules on failure.
|
||||
|
||||
21
salt/elastalert/sostatus.sls
Normal file
21
salt/elastalert/sostatus.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-elastalert_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-elastalert
|
||||
- unless: grep -q so-elastalert /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
29
salt/elastic-fleet-package-registry/config.sls
Normal file
29
salt/elastic-fleet-package-registry/config.sls
Normal file
@@ -0,0 +1,29 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
# Add Group
|
||||
elasticsagentprgroup:
|
||||
group.present:
|
||||
- name: elastic-agent-pr
|
||||
- gid: 948
|
||||
|
||||
# Add user
|
||||
elastic-agent-pr:
|
||||
user.present:
|
||||
- uid: 948
|
||||
- gid: 948
|
||||
- home: /opt/so/conf/elastic-fleet-pr
|
||||
- createhome: False
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
2
salt/elastic-fleet-package-registry/defaults.yaml
Normal file
2
salt/elastic-fleet-package-registry/defaults.yaml
Normal file
@@ -0,0 +1,2 @@
|
||||
elastic_fleet_package_registry:
|
||||
enabled: False
|
||||
27
salt/elastic-fleet-package-registry/disabled.sls
Normal file
27
salt/elastic-fleet-package-registry/disabled.sls
Normal file
@@ -0,0 +1,27 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- elastic-fleet-package-registry.sostatus
|
||||
|
||||
so-elastic-fleet-package-registry:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-elastic-fleet-package-registry_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-elastic-fleet-package-registry$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
59
salt/elastic-fleet-package-registry/enabled.sls
Normal file
59
salt/elastic-fleet-package-registry/enabled.sls
Normal file
@@ -0,0 +1,59 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
|
||||
include:
|
||||
- elastic-fleet-package-registry.config
|
||||
- elastic-fleet-package-registry.sostatus
|
||||
|
||||
so-elastic-fleet-package-registry:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-fleet-package-registry:{{ GLOBALS.so_version }}
|
||||
- name: so-elastic-fleet-package-registry
|
||||
- hostname: Fleet-package-reg-{{ GLOBALS.hostname }}
|
||||
- detach: True
|
||||
- user: 948
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet-package-registry'].ip }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
|
||||
{% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-elastic-fleet-package-registry'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
{% if DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
|
||||
- binds:
|
||||
{% for BIND in DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %}
|
||||
- environment:
|
||||
{% for XTRAENV in DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
delete_so-elastic-fleet-package-registry_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-elastic-fleet-package-registry$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -1,52 +1,13 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# Add Group
|
||||
elasticsagentprgroup:
|
||||
group.present:
|
||||
- name: elastic-agent-pr
|
||||
- gid: 948
|
||||
|
||||
|
||||
# Add user
|
||||
elastic-agent-pr:
|
||||
user.present:
|
||||
- uid: 948
|
||||
- gid: 948
|
||||
- home: /opt/so/conf/elastic-fleet-pr
|
||||
- createhome: False
|
||||
|
||||
so-elastic-fleet-package-registry:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-fleet-package-registry:{{ GLOBALS.so_version }}
|
||||
- name: so-elastic-fleet-package-registry
|
||||
- hostname: Fleet-package-reg-{{ GLOBALS.hostname }}
|
||||
- detach: True
|
||||
- user: 948
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet-package-registry'].ip }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-elastic-fleet-package-registry'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
|
||||
append_so-elastic-fleet-package-registry_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-elastic-fleet-package-registry
|
||||
{% from 'elastic-fleet-package-registry/map.jinja' import ELASTICFLEETPACKAGEREGISTRYMERGED %}
|
||||
|
||||
include:
|
||||
{% if ELASTICFLEETPACKAGEREGISTRYMERGED.enabled %}
|
||||
- elastic-fleet-package-registry.enabled
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
- elastic-fleet-package-registry.disabled
|
||||
{% endif %}
|
||||
|
||||
7
salt/elastic-fleet-package-registry/map.jinja
Normal file
7
salt/elastic-fleet-package-registry/map.jinja
Normal file
@@ -0,0 +1,7 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% import_yaml 'elastic-fleet-package-registry/defaults.yaml' as ELASTICFLEETPACKAGEREGISTRYDEFAULTS %}
|
||||
{% set ELASTICFLEETPACKAGEREGISTRYMERGED = salt['pillar.get']('elastic_fleet_package_registry', ELASTICFLEETPACKAGEREGISTRYDEFAULTS.elastic_fleet_package_registry, merge=True) %}
|
||||
@@ -0,0 +1,4 @@
|
||||
elastic_fleet_package_registry:
|
||||
enabled:
|
||||
description: You can enable or disable Elastic Fleet Package Registry.
|
||||
advanced: True
|
||||
21
salt/elastic-fleet-package-registry/sostatus.sls
Normal file
21
salt/elastic-fleet-package-registry/sostatus.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
append_so-elastic-fleet-package-registry_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-elastic-fleet-package-registry
|
||||
- unless: grep -q so-elastic-fleet-package-registry /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
11
salt/elasticfleet/artifact_registry.sls
Normal file
11
salt/elasticfleet/artifact_registry.sls
Normal file
@@ -0,0 +1,11 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
|
||||
|
||||
fleetartifactdir:
|
||||
file.directory:
|
||||
- name: /nsm/elastic-fleet/artifacts
|
||||
- user: 947
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
91
salt/elasticfleet/config.sls
Normal file
91
salt/elasticfleet/config.sls
Normal file
@@ -0,0 +1,91 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
# Add EA Group
|
||||
elasticsagentgroup:
|
||||
group.present:
|
||||
- name: elastic-agent
|
||||
- gid: 947
|
||||
|
||||
# Add EA user
|
||||
elastic-agent:
|
||||
user.present:
|
||||
- uid: 947
|
||||
- gid: 947
|
||||
- home: /opt/so/conf/elastic-fleet
|
||||
- createhome: False
|
||||
|
||||
elasticfleet_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://elasticfleet/tools/sbin
|
||||
- user: 947
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
elasticfleet_sbin_jinja:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://elasticfleet/tools/sbin_jinja
|
||||
- user: 947
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
|
||||
eaconfdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elastic-fleet
|
||||
- user: 947
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
eastatedir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elastic-fleet/state
|
||||
- user: 947
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
{% if GLOBALS.role != "so-fleet" %}
|
||||
eaintegrationsdir:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/elastic-fleet/integrations
|
||||
- user: 947
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
eadynamicintegration:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/elastic-fleet/integrations
|
||||
- source: salt://elasticfleet/files/integrations-dynamic
|
||||
- user: 947
|
||||
- group: 939
|
||||
- template: jinja
|
||||
|
||||
eaintegration:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/elastic-fleet/integrations
|
||||
- source: salt://elasticfleet/files/integrations
|
||||
- user: 947
|
||||
- group: 939
|
||||
|
||||
ea-integrations-load:
|
||||
file.absent:
|
||||
- name: /opt/so/state/eaintegrations.txt
|
||||
- onchanges:
|
||||
- file: eaintegration
|
||||
- file: eadynamicintegration
|
||||
{% endif %}
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
23
salt/elasticfleet/defaults.yaml
Normal file
23
salt/elasticfleet/defaults.yaml
Normal file
@@ -0,0 +1,23 @@
|
||||
elasticfleet:
|
||||
enabled: False
|
||||
config:
|
||||
server:
|
||||
endpoints_enrollment: ''
|
||||
es_token: ''
|
||||
grid_enrollment: ''
|
||||
url: ''
|
||||
logging:
|
||||
zeek:
|
||||
excluded:
|
||||
- broker
|
||||
- capture_loss
|
||||
- ecat_arp_info
|
||||
- known_hosts
|
||||
- known_services
|
||||
- loaded_scripts
|
||||
- ntp
|
||||
- packet_filter
|
||||
- reporter
|
||||
- stats
|
||||
- stderr
|
||||
- stdout
|
||||
27
salt/elasticfleet/disabled.sls
Normal file
27
salt/elasticfleet/disabled.sls
Normal file
@@ -0,0 +1,27 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- elasticfleet.sostatus
|
||||
|
||||
so-elastic-fleet:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-elastic-fleet_so-status.disabled:
|
||||
file.comment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-elastic-fleet$
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
83
salt/elasticfleet/enabled.sls
Normal file
83
salt/elasticfleet/enabled.sls
Normal file
@@ -0,0 +1,83 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{# This value is generated during node install and stored in minion pillar #}
|
||||
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %}
|
||||
|
||||
include:
|
||||
- elasticfleet.config
|
||||
- elasticfleet.sostatus
|
||||
|
||||
{% if SERVICETOKEN != '' %}
|
||||
so-elastic-fleet:
|
||||
docker_container.running:
|
||||
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent:{{ GLOBALS.so_version }}
|
||||
- name: so-elastic-fleet
|
||||
- hostname: FleetServer-{{ GLOBALS.hostname }}
|
||||
- detach: True
|
||||
- user: 947
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet'].ip }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
|
||||
{% if DOCKER.containers['so-elastic-fleet'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-elastic-fleet'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-elastic-fleet'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- binds:
|
||||
- /etc/pki:/etc/pki:ro
|
||||
#- /opt/so/conf/elastic-fleet/state:/usr/share/elastic-agent/state:rw
|
||||
{% if DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- environment:
|
||||
- FLEET_SERVER_ENABLE=true
|
||||
- FLEET_URL=https://{{ GLOBALS.node_ip }}:8220
|
||||
- FLEET_SERVER_ELASTICSEARCH_HOST=https://{{ GLOBALS.manager }}:9200
|
||||
- FLEET_SERVER_SERVICE_TOKEN={{ SERVICETOKEN }}
|
||||
- FLEET_SERVER_POLICY_ID=FleetServer_{{ GLOBALS.hostname }}
|
||||
- FLEET_SERVER_ELASTICSEARCH_CA=/etc/pki/tls/certs/intca.crt
|
||||
- FLEET_SERVER_CERT=/etc/pki/elasticfleet.crt
|
||||
- FLEET_SERVER_CERT_KEY=/etc/pki/elasticfleet.key
|
||||
- FLEET_CA=/etc/pki/tls/certs/intca.crt
|
||||
{% if DOCKER.containers['so-elastic-fleet'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-elastic-fleet'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role != "so-fleet" %}
|
||||
so-elastic-fleet-integrations:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
|
||||
{% endif %}
|
||||
|
||||
delete_so-elastic-fleet_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- regex: ^so-elastic-fleet$
|
||||
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -0,0 +1,32 @@
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{%- raw -%}
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-zeek-logs",
|
||||
"namespace": "so",
|
||||
"description": "Zeek Import logs",
|
||||
"policy_id": "so-grid-nodes",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/zeek/logs/*.log"
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"tags": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
|
||||
"custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{%- endraw -%}
|
||||
@@ -0,0 +1,33 @@
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{%- raw -%}
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"id": "zeek-logs",
|
||||
"name": "zeek-logs",
|
||||
"namespace": "so",
|
||||
"description": "Zeek logs",
|
||||
"policy_id": "so-grid-nodes",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/zeek/logs/current/*.log"
|
||||
],
|
||||
"data_stream.dataset": "zeek",
|
||||
"tags": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
|
||||
"custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
{%- endraw -%}
|
||||
@@ -0,0 +1,28 @@
|
||||
{
|
||||
"name": "elastic-defend-endpoints",
|
||||
"namespace": "default",
|
||||
"description": "",
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": ""
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_id": "endpoints-initial",
|
||||
"vars": {},
|
||||
"inputs": [{
|
||||
"type": "endpoint",
|
||||
"enabled": true,
|
||||
"streams": [],
|
||||
"config": {
|
||||
"integration_config": {
|
||||
"value": {
|
||||
"type": "endpoint",
|
||||
"endpointConfig": {
|
||||
"preset": "DataCollection"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}]
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "osquery_manager",
|
||||
"version": ""
|
||||
},
|
||||
"name": "osquery-endpoints",
|
||||
"namespace": "default",
|
||||
"policy_id": "endpoints-initial",
|
||||
"inputs": {
|
||||
"osquery_manager-osquery": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"osquery_manager.result": {
|
||||
"enabled": true,
|
||||
"vars": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,57 @@
|
||||
{
|
||||
"policy_id": "endpoints-initial",
|
||||
"package": {
|
||||
"name": "system",
|
||||
"version": ""
|
||||
},
|
||||
"name": "system-endpoints",
|
||||
"namespace": "default",
|
||||
"inputs": {
|
||||
"system-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"system.auth": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/var/log/auth.log*",
|
||||
"/var/log/secure*"
|
||||
]
|
||||
}
|
||||
},
|
||||
"system.syslog": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/var/log/messages*",
|
||||
"/var/log/syslog*"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"system-winlog": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"preserve_original_event": false
|
||||
},
|
||||
"streams": {
|
||||
"system.application": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"tags": []
|
||||
}
|
||||
},
|
||||
"system.security": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"tags": []
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"system-system/metrics": {
|
||||
"enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,67 @@
|
||||
{
|
||||
"policy_id": "endpoints-initial",
|
||||
"package": {
|
||||
"name": "windows",
|
||||
"version": ""
|
||||
},
|
||||
"name": "windows-endpoints",
|
||||
"description": "",
|
||||
"namespace": "default",
|
||||
"inputs": {
|
||||
"windows-winlog": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"windows.forwarded": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"tags": [
|
||||
"forwarded"
|
||||
],
|
||||
"preserve_original_event": false
|
||||
}
|
||||
},
|
||||
"windows.powershell": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"tags": [],
|
||||
"preserve_original_event": false
|
||||
}
|
||||
},
|
||||
"windows.powershell_operational": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"tags": [],
|
||||
"preserve_original_event": false
|
||||
}
|
||||
},
|
||||
"windows.sysmon_operational": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"tags": [],
|
||||
"preserve_original_event": false
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"windows-windows/metrics": {
|
||||
"enabled": false,
|
||||
"streams": {
|
||||
"windows.perfmon": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"perfmon.group_measurements_by_instance": false,
|
||||
"perfmon.ignore_non_existent_counters": false,
|
||||
"perfmon.queries": "- object: 'Process'\n instance: [\"*\"]\n counters:\n - name: '% Processor Time'\n field: cpu_perc\n format: \"float\"\n - name: \"Working Set\"\n",
|
||||
"period": "10s"
|
||||
}
|
||||
},
|
||||
"windows.service": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"period": "60s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,106 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "elasticsearch",
|
||||
"version": ""
|
||||
},
|
||||
"name": "elasticsearch-logs",
|
||||
"namespace": "default",
|
||||
"description": "Elasticsearch Logs",
|
||||
"policy_id": "so-grid-nodes",
|
||||
"inputs": {
|
||||
"elasticsearch-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"elasticsearch.audit": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/var/log/elasticsearch/*_audit.json"
|
||||
]
|
||||
}
|
||||
},
|
||||
"elasticsearch.deprecation": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/var/log/elasticsearch/*_deprecation.json"
|
||||
]
|
||||
}
|
||||
},
|
||||
"elasticsearch.gc": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/var/log/elasticsearch/gc.log.[0-9]*",
|
||||
"/var/log/elasticsearch/gc.log"
|
||||
]
|
||||
}
|
||||
},
|
||||
"elasticsearch.server": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/elasticsearch/*.log"
|
||||
]
|
||||
}
|
||||
},
|
||||
"elasticsearch.slowlog": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/var/log/elasticsearch/*_index_search_slowlog.json",
|
||||
"/var/log/elasticsearch/*_index_indexing_slowlog.json"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"elasticsearch-elasticsearch/metrics": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"hosts": [
|
||||
"http://localhost:9200"
|
||||
],
|
||||
"scope": "node"
|
||||
},
|
||||
"streams": {
|
||||
"elasticsearch.stack_monitoring.ccr": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.cluster_stats": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.enrich": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.index": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.index_recovery": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"active.only": true
|
||||
}
|
||||
},
|
||||
"elasticsearch.stack_monitoring.index_summary": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.ml_job": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.node": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.node_stats": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.pending_tasks": {
|
||||
"enabled": false
|
||||
},
|
||||
"elasticsearch.stack_monitoring.shard": {
|
||||
"enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "idh-logs",
|
||||
"namespace": "so",
|
||||
"description": "IDH integration",
|
||||
"policy_id": "so-grid-nodes",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/idh/opencanary.log"
|
||||
],
|
||||
"data_stream.dataset": "idh",
|
||||
"tags": [],
|
||||
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
||||
"custom": "pipeline: common"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-evtx-logs",
|
||||
"namespace": "so",
|
||||
"description": "Import Windows EVTX logs",
|
||||
"policy_id": "so-grid-nodes",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/evtx/data.json"
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"tags": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: windows_eventlog\n imported: true",
|
||||
"custom": "pipeline: import.wel"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-suricata-logs",
|
||||
"namespace": "so",
|
||||
"description": "Import Suricata logs",
|
||||
"policy_id": "so-grid-nodes",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/suricata/eve*.json"
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"tags": [],
|
||||
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"",
|
||||
"custom": "pipeline: suricata.common"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "kratos-logs",
|
||||
"namespace": "so",
|
||||
"description": "Kratos logs",
|
||||
"policy_id": "so-grid-nodes",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/kratos/kratos.log"
|
||||
],
|
||||
"data_stream.dataset": "kratos",
|
||||
"tags": ["so-kratos"],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
|
||||
"custom": "pipeline: kratos"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "osquery_manager",
|
||||
"version": ""
|
||||
},
|
||||
"name": "osquery-grid-nodes",
|
||||
"namespace": "default",
|
||||
"policy_id": "so-grid-nodes",
|
||||
"inputs": {
|
||||
"osquery_manager-osquery": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"osquery_manager.result": {
|
||||
"enabled": true,
|
||||
"vars": {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,76 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "redis",
|
||||
"version": ""
|
||||
},
|
||||
"name": "redis-logs",
|
||||
"namespace": "default",
|
||||
"description": "Redis logs",
|
||||
"policy_id": "so-grid-nodes",
|
||||
"inputs": {
|
||||
"redis-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"redis.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/redis/redis.log"
|
||||
],
|
||||
"tags": [
|
||||
"redis-log"
|
||||
],
|
||||
"preserve_original_event": false
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"redis-redis": {
|
||||
"enabled": false,
|
||||
"streams": {
|
||||
"redis.slowlog": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"hosts": [
|
||||
"127.0.0.1:6379"
|
||||
],
|
||||
"password": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"redis-redis/metrics": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"hosts": [
|
||||
"127.0.0.1:6379"
|
||||
],
|
||||
"idle_timeout": "20s",
|
||||
"maxconn": 10,
|
||||
"network": "tcp",
|
||||
"password": ""
|
||||
},
|
||||
"streams": {
|
||||
"redis.info": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"period": "10s"
|
||||
}
|
||||
},
|
||||
"redis.key": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"key.patterns": "- limit: 20\n pattern: *\n",
|
||||
"period": "10s"
|
||||
}
|
||||
},
|
||||
"redis.keyspace": {
|
||||
"enabled": false,
|
||||
"vars": {
|
||||
"period": "10s"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,29 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-auth-sync-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Elastic Auth Sync - Logs",
|
||||
"policy_id": "so-grid-nodes",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.log": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sync.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": ["so-soc"],
|
||||
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
|
||||
"custom": "pipeline: common"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user