mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-01-13 19:51:23 +01:00
Compare commits
845 Commits
2.4.130-20
...
2.4/playbo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
422b4bc4c9 | ||
|
|
6cdd88808a | ||
|
|
c1d85493df | ||
|
|
e01d0f81ea | ||
|
|
376d0f3295 | ||
|
|
4418623f73 | ||
|
|
5166db1caa | ||
|
|
ff5ad586af | ||
|
|
4dae1afe0b | ||
|
|
456cad1ada | ||
|
|
562b7e54cb | ||
|
|
3c847bca8b | ||
|
|
ce2cc26224 | ||
|
|
f3c574679c | ||
|
|
5da3fed1ce | ||
|
|
e6bcf5db6b | ||
|
|
4d24c57903 | ||
|
|
0606c0a454 | ||
|
|
bb984e05e3 | ||
|
|
b35b0aaf2c | ||
|
|
62f04fa5dd | ||
|
|
d89df5f0dd | ||
|
|
f0c1922600 | ||
|
|
ab2cdd18ed | ||
|
|
889bb7ddf4 | ||
|
|
a959f90d0b | ||
|
|
a54cd004d6 | ||
|
|
5100032fbd | ||
|
|
0f235baa7e | ||
|
|
e5660b8c8e | ||
|
|
588a1b86d1 | ||
|
|
46f0afa24b | ||
|
|
a7651b2734 | ||
|
|
890f76e45c | ||
|
|
e6eecc93c8 | ||
|
|
8dc0f8d20e | ||
|
|
fbdc0c4705 | ||
|
|
d1a2b57aa2 | ||
|
|
f5ec1d4b7c | ||
|
|
0aa556e375 | ||
|
|
d9e86c15bc | ||
|
|
4107fa006f | ||
|
|
29980ea958 | ||
|
|
8f36d2ec00 | ||
|
|
10511b8431 | ||
|
|
2535ae953d | ||
|
|
2f68cd7483 | ||
|
|
6655276410 | ||
|
|
9f7bcb0f7d | ||
|
|
aa43177d8c | ||
|
|
12959d114c | ||
|
|
855b489c4b | ||
|
|
673f9cb544 | ||
|
|
0a3ff47008 | ||
|
|
834e34128d | ||
|
|
73776f8d11 | ||
|
|
120e61e45c | ||
|
|
fc2d450de0 | ||
|
|
cea4eaf081 | ||
|
|
b1753f86f9 | ||
|
|
6323fbf46b | ||
|
|
ba601c39b3 | ||
|
|
ec27517bdd | ||
|
|
624ec3c93e | ||
|
|
f318a84c18 | ||
|
|
8cca58dba9 | ||
|
|
6c196ea61a | ||
|
|
207572f2f9 | ||
|
|
4afc986f48 | ||
|
|
ba5d140d4b | ||
|
|
348f9dcaec | ||
|
|
915b9e7bd7 | ||
|
|
dfec29d18e | ||
|
|
38ef4a6046 | ||
|
|
a007fa6505 | ||
|
|
1a32a0897c | ||
|
|
e26310d172 | ||
|
|
c7cdb0b466 | ||
|
|
df0b484b45 | ||
|
|
2181cddf49 | ||
|
|
a2b6968cef | ||
|
|
285fbc2783 | ||
|
|
94c5a1fd98 | ||
|
|
19362fe5e5 | ||
|
|
a7a81e9825 | ||
|
|
31484d1158 | ||
|
|
f51cd008f2 | ||
|
|
a5675a79fe | ||
|
|
1ea7b3c09f | ||
|
|
d9127a288f | ||
|
|
ebb78bc9bd | ||
|
|
e5920b6465 | ||
|
|
153a99a002 | ||
|
|
69a5e1e2f5 | ||
|
|
0858160be2 | ||
|
|
ccd79c814d | ||
|
|
a8a01b8191 | ||
|
|
ac2c044a94 | ||
|
|
e10d00d114 | ||
|
|
cbdd369a18 | ||
|
|
b2e7f58b3d | ||
|
|
a6600b8762 | ||
|
|
5479d49379 | ||
|
|
304985b61e | ||
|
|
d6c725299b | ||
|
|
d99857002d | ||
|
|
2a6c74917e | ||
|
|
9f0bd4bad3 | ||
|
|
924b06976c | ||
|
|
1357f19e48 | ||
|
|
c91e9ea4e0 | ||
|
|
c2c96dad6e | ||
|
|
1a08833e77 | ||
|
|
d16dfcf4e8 | ||
|
|
b79c7b0540 | ||
|
|
9f45792217 | ||
|
|
d3108c3549 | ||
|
|
7d883cb5e0 | ||
|
|
ebd81c1df9 | ||
|
|
418dbee9fa | ||
|
|
cccc3bf625 | ||
|
|
a3e0072631 | ||
|
|
220e485312 | ||
|
|
67f8fca043 | ||
|
|
0e0ab8384c | ||
|
|
58228f70ca | ||
|
|
7968de06b4 | ||
|
|
87fdd90f56 | ||
|
|
65e7e56fbe | ||
|
|
424fdff934 | ||
|
|
f72996d9d1 | ||
|
|
d77556c672 | ||
|
|
c412e9bad2 | ||
|
|
87a28e8ce7 | ||
|
|
9ca0c7d53a | ||
|
|
2e94e452ed | ||
|
|
6a0d40ee0d | ||
|
|
0cebcf4432 | ||
|
|
ed0e24fcaf | ||
|
|
24be2f869b | ||
|
|
f8058a4a3a | ||
|
|
d0ba6df2fc | ||
|
|
95bee91b12 | ||
|
|
751b5bd556 | ||
|
|
77273449c9 | ||
|
|
46e1f1bc5c | ||
|
|
884bec7465 | ||
|
|
8d3220f94b | ||
|
|
9cb42911dc | ||
|
|
a3cc6f025e | ||
|
|
6fae4a9974 | ||
|
|
f7a1a3a172 | ||
|
|
292e1ad782 | ||
|
|
af1fe86586 | ||
|
|
97100cdfdd | ||
|
|
5f60ef1541 | ||
|
|
c7e7a0a871 | ||
|
|
f09eff530e | ||
|
|
50b34a116a | ||
|
|
42874fb0d0 | ||
|
|
482847187c | ||
|
|
a19b99268d | ||
|
|
3c5a03d7b6 | ||
|
|
c1a5c2b2d1 | ||
|
|
baf0f7ba95 | ||
|
|
ee27965314 | ||
|
|
d02093295b | ||
|
|
6381444fdc | ||
|
|
01b313868d | ||
|
|
3859ebd69c | ||
|
|
9753e431e3 | ||
|
|
b307667ae2 | ||
|
|
5d7dcbbcee | ||
|
|
281b395053 | ||
|
|
3518f39d39 | ||
|
|
ae0ffc4977 | ||
|
|
bc2f716c99 | ||
|
|
9617da1791 | ||
|
|
2ba5d7d64b | ||
|
|
437b9016ca | ||
|
|
c5db0a7195 | ||
|
|
82894d88b6 | ||
|
|
4a4146f515 | ||
|
|
59a4d0129f | ||
|
|
5cf2149218 | ||
|
|
453c32df0d | ||
|
|
1df10b80b2 | ||
|
|
9d96a11753 | ||
|
|
e9e3252bb5 | ||
|
|
930c8147e7 | ||
|
|
378ecad94c | ||
|
|
02299a6742 | ||
|
|
15cbc626c4 | ||
|
|
8720a4540a | ||
|
|
7b5980bfe5 | ||
|
|
ebfb670f6a | ||
|
|
c98042fa80 | ||
|
|
70181e3e08 | ||
|
|
adb1e01c7a | ||
|
|
cdb7f0602c | ||
|
|
d52e817dd5 | ||
|
|
07305d8799 | ||
|
|
fbf5bafae7 | ||
|
|
d49cd3cb85 | ||
|
|
b60b9e7743 | ||
|
|
26fd8562c5 | ||
|
|
84b38daf62 | ||
|
|
a0f9d5dc61 | ||
|
|
e8c25d157f | ||
|
|
214f4f0f0c | ||
|
|
7ae0369a3b | ||
|
|
2e5682f11c | ||
|
|
2e7cb0e362 | ||
|
|
56748ea6e7 | ||
|
|
621f03994c | ||
|
|
ab8ad72920 | ||
|
|
3fc244ee85 | ||
|
|
4728b96c51 | ||
|
|
f303363a73 | ||
|
|
2a166af524 | ||
|
|
ab4d055fd1 | ||
|
|
af49a8e4ef | ||
|
|
669d219fdc | ||
|
|
442aecb9f4 | ||
|
|
beda0bc89c | ||
|
|
64fd6bf979 | ||
|
|
1955434416 | ||
|
|
ab6a083fa8 | ||
|
|
eabca5df18 | ||
|
|
5dac3ff2a6 | ||
|
|
93024738d3 | ||
|
|
05a368681a | ||
|
|
246161018c | ||
|
|
f27714890a | ||
|
|
47831eb300 | ||
|
|
0b1f2252ee | ||
|
|
3ce6b555f7 | ||
|
|
c29f11863e | ||
|
|
952403b696 | ||
|
|
b3eb06f53e | ||
|
|
5198d0cdf0 | ||
|
|
e61e2f04b3 | ||
|
|
1aa876f4eb | ||
|
|
a3fb2f13be | ||
|
|
9e77eae71e | ||
|
|
cd5de5cd05 | ||
|
|
98a67530f5 | ||
|
|
a037421809 | ||
|
|
6bb6c24641 | ||
|
|
617834a044 | ||
|
|
2c5c0e7830 | ||
|
|
81d2c52867 | ||
|
|
4f8bd16910 | ||
|
|
ab9d03bc2e | ||
|
|
10bf3e8fab | ||
|
|
f8108e93d5 | ||
|
|
3108556495 | ||
|
|
f97b2444e7 | ||
|
|
415f456661 | ||
|
|
e49b3fc260 | ||
|
|
9b125fbe53 | ||
|
|
10e3b32fed | ||
|
|
5386c07b66 | ||
|
|
7149d20b42 | ||
|
|
8a57b79b77 | ||
|
|
a4e8e7ea53 | ||
|
|
95ba327eb3 | ||
|
|
3056410fd1 | ||
|
|
bf8da60605 | ||
|
|
226f858866 | ||
|
|
317d7dea7d | ||
|
|
4e548ceb6e | ||
|
|
d846fe55e1 | ||
|
|
3b2942651e | ||
|
|
fa6f4100dd | ||
|
|
33e2d18aa7 | ||
|
|
a03764d956 | ||
|
|
3fb703cd22 | ||
|
|
f1cbe23f57 | ||
|
|
07a22a0b4b | ||
|
|
b9d813cef2 | ||
|
|
76ab0eac03 | ||
|
|
08a2ad2c40 | ||
|
|
47bbc9987e | ||
|
|
59628ec8b7 | ||
|
|
bef2fa9e8d | ||
|
|
d4f0cbcb67 | ||
|
|
9e96b12e94 | ||
|
|
42552810fb | ||
|
|
4bf2c931e9 | ||
|
|
beda6ac20d | ||
|
|
d8be6e42e1 | ||
|
|
4fb7fe9e45 | ||
|
|
6d7066c381 | ||
|
|
d003e1380f | ||
|
|
ef8badaef1 | ||
|
|
dea9c149d7 | ||
|
|
56c9fa3129 | ||
|
|
a86105294b | ||
|
|
33c23c30d3 | ||
|
|
fe76a79ebd | ||
|
|
5035ec2539 | ||
|
|
9f35b20664 | ||
|
|
b93c6c0270 | ||
|
|
e5dd403dd1 | ||
|
|
493359e5a2 | ||
|
|
b0f5218775 | ||
|
|
8fdc7049f9 | ||
|
|
d79d7e2ba1 | ||
|
|
596b3e2614 | ||
|
|
59f8544324 | ||
|
|
daaad3699c | ||
|
|
1e9f3a65a4 | ||
|
|
b2acf2f807 | ||
|
|
34e561f358 | ||
|
|
e5a07170b3 | ||
|
|
02dbbc5289 | ||
|
|
5e62d3ecb2 | ||
|
|
373ef9fe91 | ||
|
|
2f1e6fd625 | ||
|
|
6b8ef43cc1 | ||
|
|
7e746b87c5 | ||
|
|
2ad2a3110c | ||
|
|
bc24a6c574 | ||
|
|
b25bb0faf0 | ||
|
|
38c74b46b6 | ||
|
|
fbb6d8146a | ||
|
|
83ecc02589 | ||
|
|
21d9964827 | ||
|
|
f3b6d9febb | ||
|
|
b052a75e64 | ||
|
|
0602601655 | ||
|
|
480e248131 | ||
|
|
6fc7c930a6 | ||
|
|
31cd5b1365 | ||
|
|
19fb081fa0 | ||
|
|
d3b1a4f928 | ||
|
|
4729e194a0 | ||
|
|
ab6060c484 | ||
|
|
0b65021f75 | ||
|
|
bd4f2093db | ||
|
|
48dfcab9f0 | ||
|
|
849f8f13bc | ||
|
|
07359ad6ec | ||
|
|
1e2453eddf | ||
|
|
4c9773c68d | ||
|
|
4666670f4f | ||
|
|
0f71b45e0f | ||
|
|
92e9bd43ca | ||
|
|
a600c64229 | ||
|
|
121dec0180 | ||
|
|
b451c4c034 | ||
|
|
dbdbffa4b0 | ||
|
|
f360c6ecbc | ||
|
|
b9ea151846 | ||
|
|
b428573a0a | ||
|
|
350e1c9d91 | ||
|
|
a3b5db5945 | ||
|
|
3efe0eac13 | ||
|
|
aca54b4645 | ||
|
|
643afeeae7 | ||
|
|
d9fb79403b | ||
|
|
2ef89be67d | ||
|
|
43e994f2c2 | ||
|
|
ab89858d04 | ||
|
|
395c4e37ba | ||
|
|
3da2c7cabc | ||
|
|
832d66052e | ||
|
|
add538f6dd | ||
|
|
fc9107f129 | ||
|
|
d9790b04f6 | ||
|
|
88fa04b0f6 | ||
|
|
d240fca721 | ||
|
|
4d6171bde6 | ||
|
|
6238a5b3ed | ||
|
|
061600fa7a | ||
|
|
1b89cc6818 | ||
|
|
6e1e617124 | ||
|
|
7f8bf850a2 | ||
|
|
0277891392 | ||
|
|
08d99a3890 | ||
|
|
773606d876 | ||
|
|
bf38055a6c | ||
|
|
90b8d6b2f7 | ||
|
|
2d78fa1a41 | ||
|
|
45d541d4f2 | ||
|
|
b3c48674c5 | ||
|
|
8d42739030 | ||
|
|
27358137f2 | ||
|
|
a54b9ddbe4 | ||
|
|
58936b31d5 | ||
|
|
fcdacc3b0d | ||
|
|
40531dd919 | ||
|
|
05dfce62fb | ||
|
|
9df9cc2247 | ||
|
|
d3ee5ed7b8 | ||
|
|
502e1e1f1b | ||
|
|
e5b12ecdb9 | ||
|
|
be5e41227f | ||
|
|
08f208cd38 | ||
|
|
db08ac9022 | ||
|
|
ad5a27f991 | ||
|
|
07ec302267 | ||
|
|
112704e340 | ||
|
|
e6753440f8 | ||
|
|
18d899a7f9 | ||
|
|
b2650da057 | ||
|
|
31df0b5d7d | ||
|
|
a430a47a30 | ||
|
|
00f811ce31 | ||
|
|
ddd023c69a | ||
|
|
2911025c0c | ||
|
|
2e8ab648fd | ||
|
|
b753d40861 | ||
|
|
a32aac7111 | ||
|
|
2fff6232c1 | ||
|
|
f751c82e1c | ||
|
|
39f74fe547 | ||
|
|
11fb33fdeb | ||
|
|
58f4db95ea | ||
|
|
b55cb257b6 | ||
|
|
b0a8191f59 | ||
|
|
28aedcf50b | ||
|
|
6988f03ebc | ||
|
|
2948577b0e | ||
|
|
870a9ff80c | ||
|
|
689db57f5f | ||
|
|
2768722132 | ||
|
|
df103b3dca | ||
|
|
0542c77137 | ||
|
|
9022dc24fb | ||
|
|
78b7068638 | ||
|
|
70339b9a94 | ||
|
|
5c8460fd26 | ||
|
|
69e90e1e70 | ||
|
|
8c5ea19d3c | ||
|
|
82562f89f6 | ||
|
|
ede36b5ef8 | ||
|
|
fd00a4db85 | ||
|
|
510c7a0c19 | ||
|
|
2a7365c7d7 | ||
|
|
f7ca3e45ac | ||
|
|
0172272e1b | ||
|
|
776f574427 | ||
|
|
a0aafb7c51 | ||
|
|
09ec14acd8 | ||
|
|
61f8b251f0 | ||
|
|
75dd04c398 | ||
|
|
e2ef544bfc | ||
|
|
daad99a0b6 | ||
|
|
fdeee45d3f | ||
|
|
7fe9e2cbfd | ||
|
|
74d557a5e0 | ||
|
|
82f9043a14 | ||
|
|
a8cb18bb2e | ||
|
|
e1d31c895e | ||
|
|
e661c73583 | ||
|
|
42ba778740 | ||
|
|
204d53e4a7 | ||
|
|
d47a798645 | ||
|
|
9e0f13cce5 | ||
|
|
68ea229a1c | ||
|
|
1ecf2b29fc | ||
|
|
8c37a4454c | ||
|
|
ef436026d5 | ||
|
|
a595bc4b31 | ||
|
|
8a321e3f15 | ||
|
|
b4214f73f4 | ||
|
|
b9da7eb35b | ||
|
|
d6139d0f19 | ||
|
|
d2fe8da082 | ||
|
|
1931de2e52 | ||
|
|
d68a14d789 | ||
|
|
f988af52f6 | ||
|
|
fd02950864 | ||
|
|
a167e5e520 | ||
|
|
26d7ceebb2 | ||
|
|
382c3328df | ||
|
|
92d8985f3c | ||
|
|
c2d9523e09 | ||
|
|
c34914c8de | ||
|
|
d020bf5504 | ||
|
|
95d8e0f318 | ||
|
|
be4df48742 | ||
|
|
ba4df4c8b6 | ||
|
|
86eab6fda2 | ||
|
|
5d2bed950e | ||
|
|
e5c0f8a46c | ||
|
|
044d230158 | ||
|
|
5965459423 | ||
|
|
3a31d80a85 | ||
|
|
5a8e542f96 | ||
|
|
7a60afdd5a | ||
|
|
c3b3e0ab21 | ||
|
|
b918a5e256 | ||
|
|
1ddc653a52 | ||
|
|
85f5f75c84 | ||
|
|
3cb3281cd5 | ||
|
|
6246e25fbe | ||
|
|
b858543a60 | ||
|
|
5ecb483596 | ||
|
|
102ddaf262 | ||
|
|
151db2af30 | ||
|
|
e9a4668c63 | ||
|
|
5f45327372 | ||
|
|
ac8ac23522 | ||
|
|
b2bd8577b9 | ||
|
|
4df3070a1d | ||
|
|
142609ea67 | ||
|
|
46779513de | ||
|
|
e27a0d8f7a | ||
|
|
9e4c456eb9 | ||
|
|
400739736d | ||
|
|
196e0c1486 | ||
|
|
76d63bb2ad | ||
|
|
ed80c4e13b | ||
|
|
69c904548c | ||
|
|
272410ecae | ||
|
|
19514a969b | ||
|
|
77f88371b8 | ||
|
|
559190aee3 | ||
|
|
8c4cf0ba08 | ||
|
|
e17fea849a | ||
|
|
b2c09d6fd9 | ||
|
|
30c4acb828 | ||
|
|
4ec185a9c7 | ||
|
|
166e4e0ebc | ||
|
|
4b7478654f | ||
|
|
5bd84c4e30 | ||
|
|
f5a8e917a4 | ||
|
|
4e6c707067 | ||
|
|
c89adce3a1 | ||
|
|
af1bee4c68 | ||
|
|
e3c8d22cac | ||
|
|
285d73d526 | ||
|
|
0bcb6040c9 | ||
|
|
3f13f8deae | ||
|
|
13d96ae5af | ||
|
|
3b447b343f | ||
|
|
d0375d3c7e | ||
|
|
b607689993 | ||
|
|
8f1e528f1c | ||
|
|
2f8d8d2d96 | ||
|
|
366e39950a | ||
|
|
5fd7bf311d | ||
|
|
152fdaa7bb | ||
|
|
07ef3d632c | ||
|
|
7f5cde9a1c | ||
|
|
58df566c79 | ||
|
|
395b81ffc6 | ||
|
|
e3d5829b89 | ||
|
|
df31c349b0 | ||
|
|
759d5f76cd | ||
|
|
240484deea | ||
|
|
ceabb673e0 | ||
|
|
f1070992a8 | ||
|
|
c0f9c344bb | ||
|
|
21bb325157 | ||
|
|
00029e6f83 | ||
|
|
9459bf8a27 | ||
|
|
96e99fc442 | ||
|
|
4b14bf90a3 | ||
|
|
2cb002668f | ||
|
|
c11a10638b | ||
|
|
6fe240de45 | ||
|
|
ecd7da540a | ||
|
|
2a43a6f37e | ||
|
|
4cdfb6e3eb | ||
|
|
1edd13523c | ||
|
|
4217e23272 | ||
|
|
f94c81a041 | ||
|
|
4c3518385b | ||
|
|
1429226667 | ||
|
|
888ab162bd | ||
|
|
5498673fc3 | ||
|
|
96c56297ce | ||
|
|
8ab38956d1 | ||
|
|
0f120f7500 | ||
|
|
f6a0e62853 | ||
|
|
cc0e91aa96 | ||
|
|
bf9f92b04e | ||
|
|
270958ddfc | ||
|
|
b99bb0b004 | ||
|
|
8f3664f26c | ||
|
|
445afca6ee | ||
|
|
3083e3bc63 | ||
|
|
9c455badb9 | ||
|
|
9e16c03d25 | ||
|
|
275489b8a3 | ||
|
|
cd6deae0a7 | ||
|
|
0b8a7f5b67 | ||
|
|
3c342bb90d | ||
|
|
ba10228fef | ||
|
|
71f146d1d9 | ||
|
|
b22fe5bd3d | ||
|
|
a60e55e5cd | ||
|
|
e7aa4428de | ||
|
|
64f71143dc | ||
|
|
72fd25dcaf | ||
|
|
eef4b82afb | ||
|
|
1d4d442554 | ||
|
|
02ad08035e | ||
|
|
335d8851e6 | ||
|
|
e4d2513609 | ||
|
|
7aad298720 | ||
|
|
22fae2e98d | ||
|
|
3850558be3 | ||
|
|
5b785d3ef8 | ||
|
|
8b874e46d0 | ||
|
|
4165b33995 | ||
|
|
3e10c95b7b | ||
|
|
1d058729e5 | ||
|
|
f9bf4e4130 | ||
|
|
056a29ea89 | ||
|
|
667e66bbef | ||
|
|
595ff8dce2 | ||
|
|
99aa383e01 | ||
|
|
5f116b3e43 | ||
|
|
bb8f0605e1 | ||
|
|
5836bc5bd1 | ||
|
|
55c815cae8 | ||
|
|
79388af645 | ||
|
|
d7e831fbeb | ||
|
|
8f40b66e3b | ||
|
|
0fe3038802 | ||
|
|
cd9b04e1bb | ||
|
|
0fbb6afee1 | ||
|
|
402e26fc19 | ||
|
|
b6e10b1de7 | ||
|
|
54f3a8cb91 | ||
|
|
1f98cef816 | ||
|
|
7a71a5369c | ||
|
|
964b631d58 | ||
|
|
dcb667b32d | ||
|
|
e61d37893a | ||
|
|
60bd960251 | ||
|
|
b974c6e8df | ||
|
|
7484495021 | ||
|
|
0952b7528f | ||
|
|
14c95a5fe0 | ||
|
|
d0bb86a24f | ||
|
|
749825af19 | ||
|
|
844283cc38 | ||
|
|
ae0bf1ccdf | ||
|
|
a0637fa25d | ||
|
|
d2a21c1e4c | ||
|
|
ed23340157 | ||
|
|
ef6dbf9e46 | ||
|
|
1236c8c1f2 | ||
|
|
51625e19ad | ||
|
|
760ff1e45b | ||
|
|
5b3fa17f81 | ||
|
|
053eadbb39 | ||
|
|
540b0de00c | ||
|
|
c30cbf9af0 | ||
|
|
41c0a91d77 | ||
|
|
6e1e5a2ee6 | ||
|
|
aa8fd647b6 | ||
|
|
8feae6ba11 | ||
|
|
028297cef8 | ||
|
|
19755d4077 | ||
|
|
cd655e6adb | ||
|
|
2be143d902 | ||
|
|
1b98f9f313 | ||
|
|
762ccdd222 | ||
|
|
277504fff6 | ||
|
|
3f3e7ea1e8 | ||
|
|
4d7fdd390c | ||
|
|
269919b980 | ||
|
|
05c93e3796 | ||
|
|
fe21a19c5c | ||
|
|
af6245f19d | ||
|
|
ad8f3dfde7 | ||
|
|
2dc977ddd8 | ||
|
|
28c7362cfa | ||
|
|
c93a5de460 | ||
|
|
44a5b3b1e5 | ||
|
|
d23b6958c1 | ||
|
|
60b1535018 | ||
|
|
758c6728f9 | ||
|
|
5234b21743 | ||
|
|
7d73f6cfd7 | ||
|
|
ae94722eda | ||
|
|
ae993c47c1 | ||
|
|
c784a6e440 | ||
|
|
c66cd3b2f3 | ||
|
|
f30938ed59 | ||
|
|
6c472dd383 | ||
|
|
2c5861a0c2 | ||
|
|
8047e196fe | ||
|
|
c6c979dc19 | ||
|
|
c8a1c8377a | ||
|
|
4e954c24f7 | ||
|
|
52839e2a7d | ||
|
|
1a9d5f151f | ||
|
|
d6f527881a | ||
|
|
5811b184be | ||
|
|
e0a3b51ca2 | ||
|
|
b5276a6a1d | ||
|
|
cc1b030c00 | ||
|
|
c896785480 | ||
|
|
0006948c29 | ||
|
|
6ac14f832e | ||
|
|
fd9a4966ec | ||
|
|
3246176c0a | ||
|
|
b68f561e6f | ||
|
|
8ffd4fc664 | ||
|
|
f46548ed88 | ||
|
|
0d335e3056 | ||
|
|
6ff701bd5c | ||
|
|
c34be5313d | ||
|
|
ec2fc0a5f2 | ||
|
|
ad54afe39a | ||
|
|
eb4cd75218 | ||
|
|
a84f5a1e32 | ||
|
|
e193347fb4 | ||
|
|
ad27c8674b | ||
|
|
5123a86062 | ||
|
|
010c205eec | ||
|
|
160c84ec1a | ||
|
|
924c0b63bd | ||
|
|
9b8dce0c77 | ||
|
|
7159678385 | ||
|
|
c8e232c598 | ||
|
|
a3013ff85b | ||
|
|
65c5abfa88 | ||
|
|
0114e36cfa | ||
|
|
5c56e0f498 | ||
|
|
61992ae787 | ||
|
|
08bbeedbd7 | ||
|
|
a5f2db8c80 | ||
|
|
8d1ce0460f | ||
|
|
3c85b48291 | ||
|
|
ea2e026c56 | ||
|
|
8b3f310212 | ||
|
|
87136e9e2b | ||
|
|
5a6a9d6ec2 | ||
|
|
d3b3a0eb8a | ||
|
|
91fc59cffc | ||
|
|
e32dbad0d0 | ||
|
|
b66aafd168 | ||
|
|
2cd0f69069 | ||
|
|
0177f641c8 | ||
|
|
b3969a6ce0 | ||
|
|
ab97d3b8b7 | ||
|
|
213df68d04 | ||
|
|
9db3cd901c | ||
|
|
64c9230423 | ||
|
|
17943ef0db | ||
|
|
8ed3f0b1cc | ||
|
|
7c50a5e17b | ||
|
|
c13c85bd2d | ||
|
|
ae01dc9639 | ||
|
|
a74ed0daf0 | ||
|
|
60387651d2 | ||
|
|
3a78be68d6 | ||
|
|
a896332db3 | ||
|
|
54eeb0e327 | ||
|
|
1f13554bd9 | ||
|
|
4cc3691489 | ||
|
|
24eadf2507 | ||
|
|
a274bfb744 | ||
|
|
2277c792b9 | ||
|
|
61f5614ac9 | ||
|
|
6367aed62a | ||
|
|
739f592061 | ||
|
|
116c2b73c1 | ||
|
|
58be7ae5db | ||
|
|
0e0fb885d2 | ||
|
|
e8546b82f8 | ||
|
|
837fbab96d | ||
|
|
cbd2d88000 | ||
|
|
01ac1cdcca | ||
|
|
161e8a6c21 | ||
|
|
2e3c1adc63 | ||
|
|
776afa4a36 | ||
|
|
3cac19d498 | ||
|
|
2ba8a87c9d | ||
|
|
d677dc51de | ||
|
|
ebbfcd169c | ||
|
|
574d2994d1 | ||
|
|
ecc5d64584 | ||
|
|
6888682f92 | ||
|
|
0197cdb33d | ||
|
|
3c59858f70 | ||
|
|
6f0161e9da | ||
|
|
f2bd735f51 | ||
|
|
7a8fd8c3e5 | ||
|
|
b24aa2f797 | ||
|
|
5e4f1fc279 | ||
|
|
e779d180f9 | ||
|
|
a84a32c075 | ||
|
|
5649986834 | ||
|
|
7eaa8d54dc | ||
|
|
61a1fbde6e | ||
|
|
a0a18973d8 | ||
|
|
efbf62f56a | ||
|
|
39391c8088 | ||
|
|
9ac5ef09ad | ||
|
|
3394588602 | ||
|
|
c64a05f2ff | ||
|
|
0c4426a55e | ||
|
|
feb700393e | ||
|
|
0476585370 | ||
|
|
dcc1738978 | ||
|
|
0b0ff62bc5 | ||
|
|
9f76371449 | ||
|
|
50bd8448cc | ||
|
|
0b326370bd | ||
|
|
d0963baad4 | ||
|
|
75e8c60fe2 | ||
|
|
e7ea27a1b3 | ||
|
|
aaa48f6a1a | ||
|
|
0766a5da91 | ||
|
|
267d1a27ac | ||
|
|
f5e6e49075 | ||
|
|
d44ce0a070 | ||
|
|
9ddccba780 | ||
|
|
301894f6e8 | ||
|
|
a425a7fda2 | ||
|
|
21c3835322 | ||
|
|
d110503639 | ||
|
|
64bf7eb363 | ||
|
|
205560cc95 | ||
|
|
7698243caf | ||
|
|
67f0934930 | ||
|
|
30e998edf7 | ||
|
|
2a35e45920 | ||
|
|
aa5de9f7bd | ||
|
|
f9eeb76518 | ||
|
|
957235a656 | ||
|
|
64a0c171f3 | ||
|
|
a28ac3bee6 | ||
|
|
3643303a51 | ||
|
|
81d407f0ff | ||
|
|
d29b0660f0 | ||
|
|
59b94177d6 | ||
|
|
9d2c5d54b0 | ||
|
|
a6f1a0245a | ||
|
|
fcf859ffed | ||
|
|
fe3f87e1fd | ||
|
|
5a24a7775e | ||
|
|
52e52f35f7 | ||
|
|
810be2c9d2 | ||
|
|
8e4777a5ff |
3
.github/.gitleaks.toml
vendored
3
.github/.gitleaks.toml
vendored
@@ -541,5 +541,6 @@ paths = [
|
||||
'''gitleaks.toml''',
|
||||
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
||||
'''(go.mod|go.sum)$''',
|
||||
'''salt/nginx/files/enterprise-attack.json'''
|
||||
'''salt/nginx/files/enterprise-attack.json''',
|
||||
'''(.*?)whl$'''
|
||||
]
|
||||
|
||||
7
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
7
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -25,6 +25,13 @@ body:
|
||||
- 2.4.111
|
||||
- 2.4.120
|
||||
- 2.4.130
|
||||
- 2.4.140
|
||||
- 2.4.141
|
||||
- 2.4.150
|
||||
- 2.4.160
|
||||
- 2.4.170
|
||||
- 2.4.180
|
||||
- 2.4.190
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
8
.github/workflows/pythontest.yml
vendored
8
.github/workflows/pythontest.yml
vendored
@@ -1,10 +1,6 @@
|
||||
name: python-test
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "salt/sensoroni/files/analyzers/**"
|
||||
- "salt/manager/tools/sbin"
|
||||
pull_request:
|
||||
paths:
|
||||
- "salt/sensoroni/files/analyzers/**"
|
||||
@@ -17,7 +13,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.10"]
|
||||
python-version: ["3.13"]
|
||||
python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"]
|
||||
|
||||
steps:
|
||||
@@ -36,4 +32,4 @@ jobs:
|
||||
flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics
|
||||
- name: Test with pytest
|
||||
run: |
|
||||
pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini
|
||||
PYTHONPATH=${{ matrix.python-code-path }} pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
|
||||
# Created by https://www.gitignore.io/api/macos,windows
|
||||
# Edit at https://www.gitignore.io/?templates=macos,windows
|
||||
|
||||
@@ -67,4 +66,4 @@ __pycache__
|
||||
|
||||
# Analyzer dev/test config files
|
||||
*_dev.yaml
|
||||
site-packages
|
||||
site-packages
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
### 2.4.130-20250311 ISO image released on 2025/03/11
|
||||
### 2.4.180-20250916 ISO image released on 2025/09/17
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.130-20250311 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.130-20250311.iso
|
||||
2.4.180-20250916 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.180-20250916.iso
|
||||
|
||||
MD5: 4641CA710570CCE18CD7D50653373DC0
|
||||
SHA1: 786EF73F7945FDD80126C9AE00BDD29E58743715
|
||||
SHA256: 48C7A042F20C46B8087BAE0F971696DADE9F9364D52F416718245C16E7CCB977
|
||||
MD5: DE93880E38DE4BE45D05A41E1745CB1F
|
||||
SHA1: AEA6948911E50A4A38E8729E0E965C565402E3FC
|
||||
SHA256: C9BD8CA071E43B048ABF9ED145B87935CB1D4BB839B2244A06FAD1BBA8EAC84A
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.130-20250311.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.180-20250916.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.130-20250311.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.180-20250916.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.130-20250311.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.180-20250916.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.130-20250311.iso.sig securityonion-2.4.130-20250311.iso
|
||||
gpg --verify securityonion-2.4.180-20250916.iso.sig securityonion-2.4.180-20250916.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Mon 10 Mar 2025 06:30:49 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Tue 16 Sep 2025 06:30:19 PM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
34
pillar/hypervisor/nodes.sls
Normal file
34
pillar/hypervisor/nodes.sls
Normal file
@@ -0,0 +1,34 @@
|
||||
{% set node_types = {} %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='G@role:so-hypervisor or G@role:so-managerhype',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='compound') | dictsort()
|
||||
%}
|
||||
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% set hostname = minionid.split('_') | first %}
|
||||
{% set node_type = minionid.split('_') | last %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
hypervisor:
|
||||
nodes:
|
||||
{% for node_type, values in node_types.items() %}
|
||||
{{node_type}}:
|
||||
{% for hostname, ip in values.items() %}
|
||||
{{hostname}}:
|
||||
ip: {{ip}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
@@ -24,6 +24,7 @@
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% if node_types %}
|
||||
node_data:
|
||||
{% for node_type, host_values in node_types.items() %}
|
||||
{% for hostname, details in host_values.items() %}
|
||||
@@ -33,3 +34,6 @@ node_data:
|
||||
role: {{node_type}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% else %}
|
||||
node_data: False
|
||||
{% endif %}
|
||||
|
||||
@@ -18,16 +18,22 @@ base:
|
||||
- telegraf.adv_telegraf
|
||||
- versionlock.soc_versionlock
|
||||
- versionlock.adv_versionlock
|
||||
- soc.license
|
||||
|
||||
'* and not *_desktop':
|
||||
- firewall.soc_firewall
|
||||
- firewall.adv_firewall
|
||||
- nginx.soc_nginx
|
||||
- nginx.adv_nginx
|
||||
- node_data.ips
|
||||
|
||||
'*_manager or *_managersearch':
|
||||
'salt-cloud:driver:libvirt':
|
||||
- match: grain
|
||||
- vm.soc_vm
|
||||
- vm.adv_vm
|
||||
|
||||
'*_manager or *_managersearch or *_managerhype':
|
||||
- match: compound
|
||||
- node_data.ips
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
@@ -44,7 +50,6 @@ base:
|
||||
- logstash.adv_logstash
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- kratos.soc_kratos
|
||||
@@ -70,6 +75,9 @@ base:
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
- hypervisor.nodes
|
||||
- hypervisor.soc_hypervisor
|
||||
- hypervisor.adv_hypervisor
|
||||
- stig.soc_stig
|
||||
|
||||
'*_sensor':
|
||||
@@ -87,9 +95,9 @@ base:
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
|
||||
'*_eval':
|
||||
- node_data.ips
|
||||
- secrets
|
||||
- healthcheck.eval
|
||||
- elasticsearch.index_templates
|
||||
@@ -113,7 +121,6 @@ base:
|
||||
- idstools.adv_idstools
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
@@ -138,6 +145,7 @@ base:
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_standalone':
|
||||
- node_data.ips
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
@@ -172,7 +180,6 @@ base:
|
||||
- manager.adv_manager
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
@@ -238,7 +245,6 @@ base:
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
@@ -256,10 +262,12 @@ base:
|
||||
- minions.adv_{{ grains.id }}
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
- soc.license
|
||||
- stig.soc_stig
|
||||
- elasticfleet.soc_elasticfleet
|
||||
- elasticfleet.adv_elasticfleet
|
||||
|
||||
'*_import':
|
||||
- node_data.ips
|
||||
- secrets
|
||||
- elasticsearch.index_templates
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
@@ -280,7 +288,6 @@ base:
|
||||
- manager.adv_manager
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- backup.soc_backup
|
||||
@@ -305,6 +312,7 @@ base:
|
||||
- minions.adv_{{ grains.id }}
|
||||
|
||||
'*_fleet':
|
||||
- node_data.ips
|
||||
- backup.soc_backup
|
||||
- backup.adv_backup
|
||||
- logstash.nodes
|
||||
@@ -314,9 +322,15 @@ base:
|
||||
- elasticfleet.adv_elasticfleet
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
|
||||
'*_hypervisor':
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
|
||||
'*_desktop':
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
|
||||
|
||||
246
salt/_modules/qcow2.py
Normal file
246
salt/_modules/qcow2.py
Normal file
@@ -0,0 +1,246 @@
|
||||
#!py
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
"""
|
||||
Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions
|
||||
for modifying network configurations within QCOW2 images and adjusting virtual machine hardware settings.
|
||||
It serves as a Salt interface to the so-qcow2-modify-network and so-kvm-modify-hardware scripts.
|
||||
|
||||
The module offers two main capabilities:
|
||||
1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images
|
||||
2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough)
|
||||
|
||||
This module is intended to work with Security Onion's virtualization infrastructure and is typically
|
||||
used in conjunction with salt-cloud for VM provisioning and management.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
import shlex
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'qcow2'
|
||||
|
||||
def __virtual__():
|
||||
return __virtualname__
|
||||
|
||||
def modify_network_config(image, interface, mode, vm_name, ip4=None, gw4=None, dns4=None, search4=None):
|
||||
'''
|
||||
Usage:
|
||||
salt '*' qcow2.modify_network_config image=<path> interface=<iface> mode=<mode> vm_name=<name> [ip4=<addr>] [gw4=<addr>] [dns4=<servers>] [search4=<domain>]
|
||||
|
||||
Options:
|
||||
image
|
||||
Path to the QCOW2 image file that will be modified
|
||||
interface
|
||||
Network interface name to configure (e.g., 'enp1s0')
|
||||
mode
|
||||
Network configuration mode, either 'dhcp4' or 'static4'
|
||||
vm_name
|
||||
Full name of the VM (hostname_role)
|
||||
ip4
|
||||
IPv4 address with CIDR notation (e.g., '192.168.1.10/24')
|
||||
Required when mode='static4'
|
||||
gw4
|
||||
IPv4 gateway address (e.g., '192.168.1.1')
|
||||
Required when mode='static4'
|
||||
dns4
|
||||
Comma-separated list of IPv4 DNS servers (e.g., '8.8.8.8,8.8.4.4')
|
||||
Optional for both DHCP and static configurations
|
||||
search4
|
||||
DNS search domain for IPv4 (e.g., 'example.local')
|
||||
Optional for both DHCP and static configurations
|
||||
|
||||
Examples:
|
||||
1. **Configure DHCP:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='dhcp4'
|
||||
```
|
||||
This configures enp1s0 to use DHCP for IP assignment
|
||||
|
||||
2. **Configure Static IP:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='static4' ip4='192.168.1.10/24' gw4='192.168.1.1' dns4='192.168.1.1,8.8.8.8' search4='example.local'
|
||||
```
|
||||
This sets a static IP configuration with DNS servers and search domain
|
||||
|
||||
Notes:
|
||||
- The QCOW2 image must be accessible and writable by the salt minion
|
||||
- The image should not be in use by a running VM when modified
|
||||
- Network changes take effect on next VM boot
|
||||
- Requires so-qcow2-modify-network script to be installed
|
||||
|
||||
Description:
|
||||
This function modifies network configuration within a QCOW2 image file by executing
|
||||
the so-qcow2-modify-network script. It supports both DHCP and static IPv4 configuration.
|
||||
The script mounts the image, modifies the network configuration files, and unmounts
|
||||
safely. All operations are logged for troubleshooting purposes.
|
||||
|
||||
Exit Codes:
|
||||
0: Success
|
||||
1: Invalid parameters or configuration
|
||||
2: Image access or mounting error
|
||||
3: Network configuration error
|
||||
4: System command error
|
||||
255: Unexpected error
|
||||
|
||||
Logging:
|
||||
- All operations are logged to the salt minion log
|
||||
- Log entries are prefixed with 'qcow2 module:'
|
||||
- Error conditions include detailed error messages and stack traces
|
||||
- Success/failure status is logged for verification
|
||||
'''
|
||||
|
||||
cmd = ['/usr/sbin/so-qcow2-modify-network', '-I', image, '-i', interface, '-n', vm_name]
|
||||
|
||||
if mode.lower() == 'dhcp4':
|
||||
cmd.append('--dhcp4')
|
||||
elif mode.lower() == 'static4':
|
||||
cmd.append('--static4')
|
||||
if not ip4 or not gw4:
|
||||
raise ValueError('Both ip4 and gw4 are required for static configuration.')
|
||||
cmd.extend(['--ip4', ip4, '--gw4', gw4])
|
||||
if dns4:
|
||||
cmd.extend(['--dns4', dns4])
|
||||
if search4:
|
||||
cmd.extend(['--search4', search4])
|
||||
else:
|
||||
raise ValueError("Invalid mode '{}'. Expected 'dhcp4' or 'static4'.".format(mode))
|
||||
|
||||
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
ret = {
|
||||
'retcode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
if result.returncode != 0:
|
||||
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||
else:
|
||||
log.info('qcow2 module: Script executed successfully.')
|
||||
return ret
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
|
||||
def modify_hardware_config(vm_name, cpu=None, memory=None, pci=None, start=False):
|
||||
'''
|
||||
Usage:
|
||||
salt '*' qcow2.modify_hardware_config vm_name=<name> [cpu=<count>] [memory=<size>] [pci=<id>] [pci=<id>] [start=<bool>]
|
||||
|
||||
Options:
|
||||
vm_name
|
||||
Name of the virtual machine to modify
|
||||
cpu
|
||||
Number of virtual CPUs to assign (positive integer)
|
||||
Optional - VM's current CPU count retained if not specified
|
||||
memory
|
||||
Amount of memory to assign in MiB (positive integer)
|
||||
Optional - VM's current memory size retained if not specified
|
||||
pci
|
||||
PCI hardware ID(s) to passthrough to the VM (e.g., '0000:c7:00.0')
|
||||
Can be specified multiple times for multiple devices
|
||||
Optional - no PCI passthrough if not specified
|
||||
start
|
||||
Boolean flag to start the VM after modification
|
||||
Optional - defaults to False
|
||||
|
||||
Examples:
|
||||
1. **Modify CPU and Memory:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=4 memory=8192
|
||||
```
|
||||
This assigns 4 CPUs and 8GB memory to the VM
|
||||
|
||||
2. **Enable PCI Passthrough:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_hardware_config vm_name='sensor1' pci='0000:c7:00.0' pci='0000:c4:00.0' start=True
|
||||
```
|
||||
This configures PCI passthrough and starts the VM
|
||||
|
||||
3. **Complete Hardware Configuration:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=8 memory=16384 pci='0000:c7:00.0' start=True
|
||||
```
|
||||
This sets CPU, memory, PCI passthrough, and starts the VM
|
||||
|
||||
Notes:
|
||||
- VM must be stopped before modification unless only the start flag is set
|
||||
- Memory is specified in MiB (1024 = 1GB)
|
||||
- PCI devices must be available and not in use by the host
|
||||
- CPU count should align with host capabilities
|
||||
- Requires so-kvm-modify-hardware script to be installed
|
||||
|
||||
Description:
|
||||
This function modifies the hardware configuration of a KVM virtual machine using
|
||||
the so-kvm-modify-hardware script. It can adjust CPU count, memory allocation,
|
||||
and PCI device passthrough. Changes are applied to the VM's libvirt configuration.
|
||||
The VM can optionally be started after modifications are complete.
|
||||
|
||||
Exit Codes:
|
||||
0: Success
|
||||
1: Invalid parameters
|
||||
2: VM state error (running when should be stopped)
|
||||
3: Hardware configuration error
|
||||
4: System command error
|
||||
255: Unexpected error
|
||||
|
||||
Logging:
|
||||
- All operations are logged to the salt minion log
|
||||
- Log entries are prefixed with 'qcow2 module:'
|
||||
- Hardware configuration changes are logged
|
||||
- Errors include detailed messages and stack traces
|
||||
- Final status of modification is logged
|
||||
'''
|
||||
|
||||
cmd = ['/usr/sbin/so-kvm-modify-hardware', '-v', vm_name]
|
||||
|
||||
if cpu is not None:
|
||||
if isinstance(cpu, int) and cpu > 0:
|
||||
cmd.extend(['-c', str(cpu)])
|
||||
else:
|
||||
raise ValueError('cpu must be a positive integer.')
|
||||
if memory is not None:
|
||||
if isinstance(memory, int) and memory > 0:
|
||||
cmd.extend(['-m', str(memory)])
|
||||
else:
|
||||
raise ValueError('memory must be a positive integer.')
|
||||
if pci:
|
||||
# Handle PCI IDs (can be a single device or comma-separated list)
|
||||
if isinstance(pci, str):
|
||||
devices = [dev.strip() for dev in pci.split(',') if dev.strip()]
|
||||
elif isinstance(pci, list):
|
||||
devices = pci
|
||||
else:
|
||||
devices = [pci]
|
||||
|
||||
# Add each device with its own -p flag
|
||||
for device in devices:
|
||||
cmd.extend(['-p', str(device)])
|
||||
if start:
|
||||
cmd.append('-s')
|
||||
|
||||
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
ret = {
|
||||
'retcode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
if result.returncode != 0:
|
||||
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||
else:
|
||||
log.info('qcow2 module: Script executed successfully.')
|
||||
return ret
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
1092
salt/_runners/setup_hypervisor.py
Normal file
1092
salt/_runners/setup_hypervisor.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,264 +1,180 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
|
||||
{% set saltversion = saltversion.salt.minion.version %}
|
||||
|
||||
{# this is the list we are returning from this map file, it gets built below #}
|
||||
{% set allowed_states= [] %}
|
||||
{# Define common state groups to reduce redundancy #}
|
||||
{% set base_states = [
|
||||
'common',
|
||||
'patch.os.schedule',
|
||||
'motd',
|
||||
'salt.minion-check',
|
||||
'sensoroni',
|
||||
'salt.lasthighstate',
|
||||
'salt.minion'
|
||||
] %}
|
||||
|
||||
{% set ssl_states = [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
] %}
|
||||
|
||||
{% set manager_states = [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility'
|
||||
] %}
|
||||
|
||||
{% set sensor_states = [
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'tcpreplay',
|
||||
'zeek',
|
||||
'strelka'
|
||||
] %}
|
||||
|
||||
{% set kafka_states = [
|
||||
'kafka'
|
||||
] %}
|
||||
|
||||
{% set stig_states = [
|
||||
'stig'
|
||||
] %}
|
||||
|
||||
{% set elastic_stack_states = [
|
||||
'elasticsearch',
|
||||
'elasticsearch.auth',
|
||||
'kibana',
|
||||
'kibana.secrets',
|
||||
'elastalert',
|
||||
'logstash',
|
||||
'redis'
|
||||
] %}
|
||||
|
||||
{# Initialize the allowed_states list #}
|
||||
{% set allowed_states = [] %}
|
||||
|
||||
{% if grains.saltversion | string == saltversion | string %}
|
||||
{# Map role-specific states #}
|
||||
{% set role_states = {
|
||||
'so-eval': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states +
|
||||
elastic_stack_states | reject('equalto', 'logstash') | list
|
||||
),
|
||||
'so-heavynode': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
|
||||
),
|
||||
'so-idh': (
|
||||
ssl_states +
|
||||
['idh']
|
||||
),
|
||||
'so-import': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
|
||||
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
|
||||
),
|
||||
'so-manager': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managerhype': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managersearch': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-searchnode': (
|
||||
ssl_states +
|
||||
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
|
||||
stig_states
|
||||
),
|
||||
'so-standalone': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
|
||||
sensor_states +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-sensor': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['nginx'] +
|
||||
stig_states
|
||||
),
|
||||
'so-fleet': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['logstash', 'nginx', 'healthcheck', 'elasticfleet']
|
||||
),
|
||||
'so-receiver': (
|
||||
ssl_states +
|
||||
kafka_states +
|
||||
stig_states +
|
||||
['logstash', 'redis']
|
||||
),
|
||||
'so-hypervisor': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['hypervisor', 'libvirt']
|
||||
),
|
||||
'so-desktop': (
|
||||
['ssl', 'docker_clean', 'telegraf'] +
|
||||
stig_states
|
||||
)
|
||||
} %}
|
||||
|
||||
{% set allowed_states= salt['grains.filter_by']({
|
||||
'so-eval': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'healthcheck',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'utility',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-heavynode': [
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'elasticagent',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-idh': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'idh',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-import': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'influxdb',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'pcap',
|
||||
'utility',
|
||||
'suricata',
|
||||
'zeek',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry'
|
||||
],
|
||||
'so-manager': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-managersearch': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elastic-fleet-package-registry',
|
||||
'elasticfleet',
|
||||
'firewall',
|
||||
'manager',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-searchnode': [
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka.ca',
|
||||
'kafka.ssl'
|
||||
],
|
||||
'so-standalone': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elastic-fleet-package-registry',
|
||||
'elasticfleet',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'utility',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-sensor': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'nginx',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
],
|
||||
'so-fleet': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'logstash',
|
||||
'nginx',
|
||||
'healthcheck',
|
||||
'schedule',
|
||||
'elasticfleet',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-receiver': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'kafka',
|
||||
'stig'
|
||||
],
|
||||
'so-desktop': [
|
||||
'ssl',
|
||||
'docker_clean',
|
||||
'telegraf',
|
||||
'stig'
|
||||
],
|
||||
}, grain='role') %}
|
||||
|
||||
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('zeek') %}
|
||||
{%- endif %}
|
||||
|
||||
{% if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('strelka') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch.auth') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('kibana') %}
|
||||
{% do allowed_states.append('kibana.secrets') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% do allowed_states.append('elastalert') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% do allowed_states.append('logstash') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver', 'so-eval'] %}
|
||||
{% do allowed_states.append('redis') %}
|
||||
{% endif %}
|
||||
|
||||
{# all nodes on the right salt version can run the following states #}
|
||||
{% do allowed_states.append('common') %}
|
||||
{% do allowed_states.append('patch.os.schedule') %}
|
||||
{% do allowed_states.append('motd') %}
|
||||
{% do allowed_states.append('salt.minion-check') %}
|
||||
{% do allowed_states.append('sensoroni') %}
|
||||
{% do allowed_states.append('salt.lasthighstate') %}
|
||||
{# Get states for the current role #}
|
||||
{% if grains.role in role_states %}
|
||||
{% set allowed_states = role_states[grains.role] %}
|
||||
{% endif %}
|
||||
|
||||
{# Add base states that apply to all roles #}
|
||||
{% for state in base_states %}
|
||||
{% do allowed_states.append(state) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
{# Add airgap state if needed #}
|
||||
{% if ISAIRGAP %}
|
||||
{% do allowed_states.append('airgap') %}
|
||||
{% do allowed_states.append('airgap') %}
|
||||
{% endif %}
|
||||
|
||||
{# all nodes can always run salt.minion state #}
|
||||
{% do allowed_states.append('salt.minion') %}
|
||||
|
||||
@@ -11,6 +11,10 @@ TODAY=$(date '+%Y_%m_%d')
|
||||
BACKUPDIR={{ DESTINATION }}
|
||||
BACKUPFILE="$BACKUPDIR/so-config-backup-$TODAY.tar"
|
||||
MAXBACKUPS=7
|
||||
EXCLUSIONS=(
|
||||
"--exclude=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers"
|
||||
)
|
||||
|
||||
|
||||
# Create backup dir if it does not exist
|
||||
mkdir -p /nsm/backup
|
||||
@@ -23,7 +27,7 @@ if [ ! -f $BACKUPFILE ]; then
|
||||
|
||||
# Loop through all paths defined in global.sls, and append them to backup file
|
||||
{%- for LOCATION in BACKUPLOCATIONS %}
|
||||
tar -rf $BACKUPFILE {{ LOCATION }}
|
||||
tar -rf $BACKUPFILE "${EXCLUSIONS[@]}" {{ LOCATION }}
|
||||
{%- endfor %}
|
||||
|
||||
fi
|
||||
|
||||
@@ -106,7 +106,7 @@ Etc/UTC:
|
||||
timezone.system
|
||||
|
||||
# Sync curl configuration for Elasticsearch authentication
|
||||
{% if GLOBALS.role in ['so-eval', 'so-heavynode', 'so-import', 'so-manager', 'so-managersearch', 'so-searchnode', 'so-standalone'] %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-searchnode'] %}
|
||||
elastic_curl_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/curl.config
|
||||
@@ -129,6 +129,10 @@ common_sbin:
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
- show_changes: False
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
- exclude_pat:
|
||||
- so-pcap-import
|
||||
{% endif %}
|
||||
|
||||
common_sbin_jinja:
|
||||
file.recurse:
|
||||
@@ -139,6 +143,20 @@ common_sbin_jinja:
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- show_changes: False
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
- exclude_pat:
|
||||
- so-import-pcap
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
remove_so-pcap-import_heavynode:
|
||||
file.absent:
|
||||
- name: /usr/sbin/so-pcap-import
|
||||
|
||||
remove_so-import-pcap_heavynode:
|
||||
file.absent:
|
||||
- name: /usr/sbin/so-import-pcap
|
||||
{% endif %}
|
||||
|
||||
{% if not GLOBALS.is_manager%}
|
||||
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% if GLOBALS.os_family == 'Debian' %}
|
||||
# we cannot import GLOBALS from vars/globals.map.jinja in this state since it is called in setup.virt.init
|
||||
# since it is early in setup of a new VM, the pillars imported in GLOBALS are not yet defined
|
||||
{% if grains.os_family == 'Debian' %}
|
||||
commonpkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: True
|
||||
@@ -46,7 +46,7 @@ python-rich:
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.os_family == 'RedHat' %}
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
|
||||
remove_mariadb:
|
||||
pkg.removed:
|
||||
|
||||
@@ -64,6 +64,12 @@ copy_so-repo-sync_manager_tools_sbin:
|
||||
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
|
||||
- preserve: True
|
||||
|
||||
copy_bootstrap-salt_manager_tools_sbin:
|
||||
file.copy:
|
||||
- name: /opt/so/saltstack/default/salt/salt/scripts/bootstrap-salt.sh
|
||||
- source: {{UPDATE_DIR}}/salt/salt/scripts/bootstrap-salt.sh
|
||||
- preserve: True
|
||||
|
||||
# This section is used to put the new script in place so that it can be called during soup.
|
||||
# It is faster than calling the states that normally manage them to put them in place.
|
||||
copy_so-common_sbin:
|
||||
@@ -108,6 +114,13 @@ copy_so-repo-sync_sbin:
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
copy_bootstrap-salt_sbin:
|
||||
file.copy:
|
||||
- name: /usr/sbin/bootstrap-salt.sh
|
||||
- source: {{UPDATE_DIR}}/salt/salt/scripts/bootstrap-salt.sh
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
{# this is added in 2.4.120 to remove salt repo files pointing to saltproject.io to accomodate the move to broadcom and new bootstrap-salt script #}
|
||||
{% if salt['pkg.version_cmp'](SOVERSION, '2.4.120') == -1 %}
|
||||
{% set saltrepofile = '/etc/yum.repos.d/salt.repo' %}
|
||||
|
||||
@@ -99,6 +99,17 @@ add_interface_bond0() {
|
||||
fi
|
||||
}
|
||||
|
||||
airgap_playbooks() {
|
||||
SRC_DIR=$1
|
||||
# Copy playbooks if using airgap
|
||||
mkdir -p /nsm/airgap-resources
|
||||
# Purge old airgap playbooks to ensure SO only uses the latest released playbooks
|
||||
rm -fr /nsm/airgap-resources/playbooks
|
||||
tar xf $SRC_DIR/airgap-resources/playbooks.tgz -C /nsm/airgap-resources/
|
||||
chown -R socore:socore /nsm/airgap-resources/playbooks
|
||||
git config --global --add safe.directory /nsm/airgap-resources/playbooks
|
||||
}
|
||||
|
||||
check_container() {
|
||||
docker ps | grep "$1:" > /dev/null 2>&1
|
||||
return $?
|
||||
@@ -299,7 +310,8 @@ fail() {
|
||||
|
||||
get_agent_count() {
|
||||
if [ -f /opt/so/log/agents/agentstatus.log ]; then
|
||||
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
|
||||
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}' | sed 's/,//')
|
||||
[[ -z "$AGENTCOUNT" ]] && AGENTCOUNT="0"
|
||||
else
|
||||
AGENTCOUNT=0
|
||||
fi
|
||||
|
||||
@@ -45,7 +45,7 @@ def check_for_fps():
|
||||
result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE)
|
||||
if result.returncode == 0:
|
||||
fps = 1
|
||||
except FileNotFoundError:
|
||||
except:
|
||||
fn = '/proc/sys/crypto/' + feat_full + '_enabled'
|
||||
try:
|
||||
with open(fn, 'r') as f:
|
||||
|
||||
@@ -4,22 +4,16 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
|
||||
|
||||
import sys, argparse, re, docker
|
||||
import sys, argparse, re, subprocess, json
|
||||
from packaging.version import Version, InvalidVersion
|
||||
from itertools import groupby, chain
|
||||
|
||||
|
||||
def get_image_name(string) -> str:
|
||||
return ':'.join(string.split(':')[:-1])
|
||||
|
||||
|
||||
def get_so_image_basename(string) -> str:
|
||||
return get_image_name(string).split('/so-')[-1]
|
||||
|
||||
|
||||
def get_image_version(string) -> str:
|
||||
ver = string.split(':')[-1]
|
||||
if ver == 'latest':
|
||||
@@ -35,56 +29,75 @@ def get_image_version(string) -> str:
|
||||
return '999999.9.9'
|
||||
return ver
|
||||
|
||||
def run_command(command):
|
||||
process = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
|
||||
if process.returncode != 0:
|
||||
print(f"Error executing command: {command}", file=sys.stderr)
|
||||
print(f"Error message: {process.stderr}", file=sys.stderr)
|
||||
exit(1)
|
||||
return process.stdout
|
||||
|
||||
def main(quiet):
|
||||
client = docker.from_env()
|
||||
|
||||
# Prune old/stopped containers
|
||||
if not quiet: print('Pruning old containers')
|
||||
client.containers.prune()
|
||||
|
||||
image_list = client.images.list(filters={ 'dangling': False })
|
||||
|
||||
# Map list of image objects to flattened list of tags (format: "name:version")
|
||||
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
|
||||
|
||||
# Filter to only SO images (base name begins with "so-")
|
||||
tag_list = list(filter(lambda x: re.match(r'^.*\/so-[^\/]*$', get_image_name(x)), tag_list))
|
||||
|
||||
# Group tags into lists by base name (sort by same projection first)
|
||||
tag_list.sort(key=lambda x: get_so_image_basename(x))
|
||||
grouped_tag_lists = [ list(it) for _, it in groupby(tag_list, lambda x: get_so_image_basename(x)) ]
|
||||
|
||||
no_prunable = True
|
||||
for t_list in grouped_tag_lists:
|
||||
try:
|
||||
# Group tags by version, in case multiple images exist with the same version string
|
||||
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
|
||||
grouped_t_list = [ list(it) for _,it in groupby(t_list, lambda x: get_image_version(x)) ]
|
||||
|
||||
# Keep the 2 most current version groups
|
||||
if len(grouped_t_list) <= 2:
|
||||
continue
|
||||
else:
|
||||
no_prunable = False
|
||||
for group in grouped_t_list[2:]:
|
||||
for tag in group:
|
||||
if not quiet: print(f'Removing image {tag}')
|
||||
# Prune old/stopped containers using docker CLI
|
||||
if not quiet: print('Pruning old containers')
|
||||
run_command('docker container prune -f')
|
||||
|
||||
# Get list of images using docker CLI
|
||||
images_json = run_command('docker images --format "{{json .}}"')
|
||||
|
||||
# Parse the JSON output
|
||||
image_list = []
|
||||
for line in images_json.strip().split('\n'):
|
||||
if line: # Skip empty lines
|
||||
image_list.append(json.loads(line))
|
||||
|
||||
# Extract tags in the format "name:version"
|
||||
tag_list = []
|
||||
for img in image_list:
|
||||
# Skip dangling images
|
||||
if img.get('Repository') != "<none>" and img.get('Tag') != "<none>":
|
||||
tag = f"{img.get('Repository')}:{img.get('Tag')}"
|
||||
# Filter to only SO images (base name begins with "so-")
|
||||
if re.match(r'^.*\/so-[^\/]*$', get_image_name(tag)):
|
||||
tag_list.append(tag)
|
||||
|
||||
# Group tags into lists by base name (sort by same projection first)
|
||||
tag_list.sort(key=lambda x: get_so_image_basename(x))
|
||||
grouped_tag_lists = [list(it) for k, it in groupby(tag_list, lambda x: get_so_image_basename(x))]
|
||||
|
||||
no_prunable = True
|
||||
for t_list in grouped_tag_lists:
|
||||
try:
|
||||
client.images.remove(tag, force=True)
|
||||
except docker.errors.ClientError as e:
|
||||
print(f'Could not remove image {tag}, continuing...')
|
||||
except (docker.errors.APIError, InvalidVersion) as e:
|
||||
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
|
||||
exit(1)
|
||||
# Group tags by version, in case multiple images exist with the same version string
|
||||
t_list.sort(key=lambda x: Version(get_image_version(x)), reverse=True)
|
||||
grouped_t_list = [list(it) for k, it in groupby(t_list, lambda x: get_image_version(x))]
|
||||
# Keep the 2 most current version groups
|
||||
if len(grouped_t_list) <= 2:
|
||||
continue
|
||||
else:
|
||||
no_prunable = False
|
||||
for group in grouped_t_list[2:]:
|
||||
for tag in group:
|
||||
if not quiet: print(f'Removing image {tag}')
|
||||
try:
|
||||
run_command(f'docker rmi -f {tag}')
|
||||
except Exception as e:
|
||||
print(f'Could not remove image {tag}, continuing...')
|
||||
except (InvalidVersion) as e:
|
||||
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
|
||||
exit(1)
|
||||
except Exception as e:
|
||||
print('Unhandled exception occurred:')
|
||||
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
if no_prunable and not quiet:
|
||||
print('No Security Onion images to prune')
|
||||
|
||||
except Exception as e:
|
||||
print('Unhandled exception occurred:')
|
||||
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
if no_prunable and not quiet:
|
||||
print('No Security Onion images to prune')
|
||||
|
||||
print(f"Error: {e}", file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main_parser = argparse.ArgumentParser(add_help=False)
|
||||
|
||||
@@ -127,6 +127,8 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process already finished" # Telegraf script finished just as the auto kill timeout kicked in
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|No shard available" # Typical error when making a query before ES has finished loading all indices
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
@@ -155,6 +157,9 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|request_unauthorized" # false positive (login failures to Hydra result in an 'error' log)
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding index lifecycle policy" # false positive (elasticsearch policy names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
|
||||
53
salt/common/tools/sbin/so_logging_utils.py
Normal file
53
salt/common/tools/sbin/so_logging_utils.py
Normal file
@@ -0,0 +1,53 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
def setup_logging(logger_name, log_file_path, log_level=logging.INFO, format_str='%(asctime)s - %(levelname)s - %(message)s'):
|
||||
"""
|
||||
Sets up logging for a script.
|
||||
|
||||
Parameters:
|
||||
logger_name (str): The name of the logger.
|
||||
log_file_path (str): The file path for the log file.
|
||||
log_level (int): The logging level (e.g., logging.INFO, logging.DEBUG).
|
||||
format_str (str): The format string for log messages.
|
||||
|
||||
Returns:
|
||||
logging.Logger: Configured logger object.
|
||||
"""
|
||||
logger = logging.getLogger(logger_name)
|
||||
logger.setLevel(log_level)
|
||||
|
||||
# Create directory for log file if it doesn't exist
|
||||
log_file_dir = os.path.dirname(log_file_path)
|
||||
if log_file_dir and not os.path.exists(log_file_dir):
|
||||
try:
|
||||
os.makedirs(log_file_dir)
|
||||
except OSError as e:
|
||||
print(f"Error creating directory {log_file_dir}: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Create handlers
|
||||
c_handler = logging.StreamHandler()
|
||||
f_handler = logging.FileHandler(log_file_path)
|
||||
c_handler.setLevel(log_level)
|
||||
f_handler.setLevel(log_level)
|
||||
|
||||
# Create formatter and add it to handlers
|
||||
formatter = logging.Formatter(format_str)
|
||||
c_handler.setFormatter(formatter)
|
||||
f_handler.setFormatter(formatter)
|
||||
|
||||
# Add handlers to the logger if they are not already added
|
||||
if not logger.hasHandlers():
|
||||
logger.addHandler(c_handler)
|
||||
logger.addHandler(f_handler)
|
||||
|
||||
return logger
|
||||
@@ -248,7 +248,7 @@ fi
|
||||
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
|
||||
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
|
||||
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
|
||||
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source.as.organization.name%20source.geo.country_name%20%7C%20groupby%20destination.as.organization.name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
|
||||
|
||||
status "Import complete!"
|
||||
status
|
||||
|
||||
@@ -0,0 +1,132 @@
|
||||
#!/opt/saltstack/salt/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) -%}
|
||||
|
||||
"""
|
||||
Script for emitting VM deployment status events to the Salt event bus.
|
||||
|
||||
This script provides functionality to emit status events for VM deployment operations,
|
||||
used by various Security Onion VM management tools.
|
||||
|
||||
Usage:
|
||||
so-salt-emit-vm-deployment-status-event -v <vm_name> -H <hypervisor> -s <status>
|
||||
|
||||
Arguments:
|
||||
-v, --vm-name Name of the VM (hostname_role)
|
||||
-H, --hypervisor Name of the hypervisor
|
||||
-s, --status Current deployment status of the VM
|
||||
|
||||
Example:
|
||||
so-salt-emit-vm-deployment-status-event -v sensor1_sensor -H hypervisor1 -s "Creating"
|
||||
"""
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import logging
|
||||
import salt.client
|
||||
from typing import Dict, Any
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def emit_event(vm_name: str, hypervisor: str, status: str) -> bool:
|
||||
"""
|
||||
Emit a VM deployment status event to the salt event bus.
|
||||
|
||||
Args:
|
||||
vm_name: Name of the VM (hostname_role)
|
||||
hypervisor: Name of the hypervisor
|
||||
status: Current deployment status of the VM
|
||||
|
||||
Returns:
|
||||
bool: True if event was sent successfully, False otherwise
|
||||
|
||||
Raises:
|
||||
ValueError: If status is not a valid deployment status
|
||||
"""
|
||||
log.info("Attempting to emit deployment event...")
|
||||
|
||||
try:
|
||||
caller = salt.client.Caller()
|
||||
event_data = {
|
||||
'vm_name': vm_name,
|
||||
'hypervisor': hypervisor,
|
||||
'status': status
|
||||
}
|
||||
|
||||
# Use consistent event tag structure
|
||||
event_tag = f'soc/dyanno/hypervisor/{status.lower()}'
|
||||
|
||||
ret = caller.cmd(
|
||||
'event.send',
|
||||
event_tag,
|
||||
event_data
|
||||
)
|
||||
|
||||
if not ret:
|
||||
log.error("Failed to emit VM deployment status event: %s", event_data)
|
||||
return False
|
||||
|
||||
log.info("Successfully emitted VM deployment status event: %s", event_data)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
log.error("Error emitting VM deployment status event: %s", str(e))
|
||||
return False
|
||||
|
||||
def parse_args():
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Emit VM deployment status events to the Salt event bus.'
|
||||
)
|
||||
parser.add_argument('-v', '--vm-name', required=True,
|
||||
help='Name of the VM (hostname_role)')
|
||||
parser.add_argument('-H', '--hypervisor', required=True,
|
||||
help='Name of the hypervisor')
|
||||
parser.add_argument('-s', '--status', required=True,
|
||||
help='Current deployment status of the VM')
|
||||
return parser.parse_args()
|
||||
|
||||
def main():
|
||||
"""Main entry point for the script."""
|
||||
try:
|
||||
args = parse_args()
|
||||
|
||||
success = emit_event(
|
||||
vm_name=args.vm_name,
|
||||
hypervisor=args.hypervisor,
|
||||
status=args.status
|
||||
)
|
||||
|
||||
if not success:
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
log.error("Failed to emit status event: %s", str(e))
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
{%- else -%}
|
||||
|
||||
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||
for more information about purchasing a license to enable this feature."
|
||||
|
||||
{% endif -%}
|
||||
@@ -200,6 +200,7 @@ docker:
|
||||
final_octet: 88
|
||||
port_bindings:
|
||||
- 0.0.0.0:9092:9092
|
||||
- 0.0.0.0:29092:29092
|
||||
- 0.0.0.0:9093:9093
|
||||
- 0.0.0.0:8778:8778
|
||||
custom_bind_mounts: []
|
||||
|
||||
@@ -9,3 +9,6 @@ fleetartifactdir:
|
||||
- user: 947
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
|
||||
@@ -9,6 +9,9 @@
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{% set node_data = salt['pillar.get']('node_data') %}
|
||||
|
||||
include:
|
||||
- elasticfleet.artifact_registry
|
||||
|
||||
# Add EA Group
|
||||
elasticfleetgroup:
|
||||
group.present:
|
||||
@@ -166,7 +169,7 @@ eaoptionalintegrationsdir:
|
||||
|
||||
{% for minion in node_data %}
|
||||
{% set role = node_data[minion]["role"] %}
|
||||
{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %}
|
||||
{% if role in [ "eval","fleet","heavynode","import","manager", "managerhype", "managersearch","standalone" ] %}
|
||||
{% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %}
|
||||
{% set integration_keys = optional_integrations.keys() %}
|
||||
fleet_server_integrations_{{ minion }}:
|
||||
|
||||
@@ -11,6 +11,7 @@ elasticfleet:
|
||||
defend_filters:
|
||||
enable_auto_configuration: False
|
||||
subscription_integrations: False
|
||||
auto_upgrade_integrations: False
|
||||
logging:
|
||||
zeek:
|
||||
excluded:
|
||||
@@ -37,6 +38,7 @@ elasticfleet:
|
||||
- elasticsearch
|
||||
- endpoint
|
||||
- fleet_server
|
||||
- filestream
|
||||
- http_endpoint
|
||||
- httpjson
|
||||
- log
|
||||
|
||||
@@ -67,6 +67,8 @@ so-elastic-fleet-auto-configure-artifact-urls:
|
||||
elasticagent_syncartifacts:
|
||||
file.recurse:
|
||||
- name: /nsm/elastic-fleet/artifacts/beats
|
||||
- user: 947
|
||||
- group: 947
|
||||
- source: salt://beats
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -0,0 +1,46 @@
|
||||
{%- set identities = salt['sqlite3.fetch']('/nsm/kratos/db/db.sqlite', 'SELECT id, json_extract(traits, "$.email") as email FROM identities;') -%}
|
||||
{%- set valid_identities = false -%}
|
||||
{%- if identities -%}
|
||||
{%- set valid_identities = true -%}
|
||||
{%- for id, email in identities -%}
|
||||
{%- if not id or not email -%}
|
||||
{%- set valid_identities = false -%}
|
||||
{%- break -%}
|
||||
{%- endif -%}
|
||||
{%- endfor -%}
|
||||
{%- endif -%}
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "kratos-logs",
|
||||
"namespace": "so",
|
||||
"description": "Kratos logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/kratos/kratos.log"
|
||||
],
|
||||
"data_stream.dataset": "kratos",
|
||||
"tags": ["so-kratos"],
|
||||
{%- if valid_identities -%}
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
|
||||
{%- else -%}
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
|
||||
{%- endif -%}
|
||||
"custom": "pipeline: kratos"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
@@ -1,32 +1,33 @@
|
||||
{
|
||||
"name": "elastic-defend-endpoints",
|
||||
"namespace": "default",
|
||||
"description": "",
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.17.0",
|
||||
"requires_root": true
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_id": "endpoints-initial",
|
||||
"vars": {},
|
||||
"inputs": [
|
||||
{
|
||||
"type": "endpoint",
|
||||
"enabled": true,
|
||||
"config": {
|
||||
"integration_config": {
|
||||
"value": {
|
||||
"type": "endpoint",
|
||||
"endpointConfig": {
|
||||
"preset": "DataCollection"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"streams": []
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
"name": "elastic-defend-endpoints",
|
||||
"namespace": "default",
|
||||
"description": "",
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.18.1",
|
||||
"requires_root": true
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_ids": [
|
||||
"endpoints-initial"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": [
|
||||
{
|
||||
"type": "ENDPOINT_INTEGRATION_CONFIG",
|
||||
"enabled": true,
|
||||
"config": {
|
||||
"_config": {
|
||||
"value": {
|
||||
"type": "endpoint",
|
||||
"endpointConfig": {
|
||||
"preset": "DataCollection"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"streams": []
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "agent-monitor",
|
||||
"namespace": "",
|
||||
"description": "",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"output_id": null,
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/agents/agent-monitor.log"
|
||||
],
|
||||
"data_stream.dataset": "agentmonitor",
|
||||
"pipeline": "elasticagent.monitor",
|
||||
"parsers": "",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: gridmetrics",
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": true,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": 64,
|
||||
"file_identity_native": false,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -19,7 +19,7 @@
|
||||
],
|
||||
"data_stream.dataset": "idh",
|
||||
"tags": [],
|
||||
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
||||
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
||||
"custom": "pipeline: common"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"custom": "",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.67.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-2.5.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.67.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.67.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-2.5.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.5.4\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.5.4\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.5.4\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"tags": [
|
||||
"import"
|
||||
]
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"version": ""
|
||||
},
|
||||
"name": "kratos-logs",
|
||||
"namespace": "so",
|
||||
"description": "Kratos logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/kratos/kratos.log"
|
||||
],
|
||||
"data_stream.dataset": "kratos",
|
||||
"tags": ["so-kratos"],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
|
||||
"custom": "pipeline: kratos"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
@@ -11,7 +11,7 @@
|
||||
"tcp-tcp": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"tcp.generic": {
|
||||
"tcp.tcp": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"listen_address": "0.0.0.0",
|
||||
@@ -23,7 +23,8 @@
|
||||
"syslog"
|
||||
],
|
||||
"syslog_options": "field: message\n#format: auto\n#timezone: Local",
|
||||
"ssl": ""
|
||||
"ssl": "",
|
||||
"custom": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -31,7 +31,8 @@
|
||||
],
|
||||
"tags": [
|
||||
"so-grid-node"
|
||||
]
|
||||
],
|
||||
"processors": "- if:\n contains:\n message: \"salt-minion\"\n then: \n - dissect:\n tokenizer: \"%{} %{} %{} %{} %{} %{}: [%{log.level}] %{*}\"\n field: \"message\"\n trim_values: \"all\"\n target_prefix: \"\"\n - drop_event:\n when:\n equals:\n log.level: \"INFO\""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
|
||||
{% import_json '/opt/so/state/esfleet_package_components.json' as ADDON_PACKAGE_COMPONENTS %}
|
||||
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
|
||||
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
|
||||
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
|
||||
@@ -14,6 +15,7 @@
|
||||
'awsfirehose.logs': 'awsfirehose',
|
||||
'awsfirehose.metrics': 'aws.cloudwatch',
|
||||
'cribl.logs': 'cribl',
|
||||
'cribl.metrics': 'cribl',
|
||||
'sentinel_one_cloud_funnel.logins': 'sentinel_one_cloud_funnel.login',
|
||||
'azure_application_insights.app_insights': 'azure.app_insights',
|
||||
'azure_application_insights.app_state': 'azure.app_state',
|
||||
@@ -45,7 +47,10 @@
|
||||
'synthetics.browser_screenshot': 'synthetics-browser.screenshot',
|
||||
'synthetics.http': 'synthetics-http',
|
||||
'synthetics.icmp': 'synthetics-icmp',
|
||||
'synthetics.tcp': 'synthetics-tcp'
|
||||
'synthetics.tcp': 'synthetics-tcp',
|
||||
'swimlane.swimlane_api': 'swimlane.api',
|
||||
'swimlane.tenant_api': 'swimlane.tenant',
|
||||
'swimlane.turbine_api': 'turbine.api'
|
||||
} %}
|
||||
|
||||
{% for pkg in ADDON_PACKAGE_COMPONENTS %}
|
||||
@@ -62,70 +67,90 @@
|
||||
{% else %}
|
||||
{% set integration_type = "" %}
|
||||
{% endif %}
|
||||
{% set component_name = pkg.name ~ "." ~ pattern.title %}
|
||||
{# fix weirdly named components #}
|
||||
{% if component_name in WEIRD_INTEGRATIONS %}
|
||||
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
||||
{% endif %}
|
||||
{% set component_name = pkg.name ~ "." ~ pattern.title %}
|
||||
{% set index_pattern = pattern.name %}
|
||||
|
||||
{# fix weirdly named components #}
|
||||
{% if component_name in WEIRD_INTEGRATIONS %}
|
||||
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
||||
{% endif %}
|
||||
|
||||
{# create duplicate of component_name, so we can split generics from @custom component templates in the index template below and overwrite the default @package when needed
|
||||
eg. having to replace unifiedlogs.generic@package with filestream.generic@package, but keep the ability to customize unifiedlogs.generic@custom and its ILM policy #}
|
||||
{% set custom_component_name = component_name %}
|
||||
|
||||
{# duplicate integration_type to assist with sometimes needing to overwrite component templates with 'logs-filestream.generic@package' (there is no metrics-filestream.generic@package) #}
|
||||
{% set generic_integration_type = integration_type %}
|
||||
|
||||
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
||||
{% set component_name_x = component_name.replace(".","_x_") %}
|
||||
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
||||
{% set integration_key = "so-" ~ integration_type ~ component_name_x %}
|
||||
|
||||
{# if its a .generic template make sure that a .generic@package for the integration exists. Else default to logs-filestream.generic@package #}
|
||||
{% if ".generic" in component_name and integration_type ~ component_name ~ "@package" not in INSTALLED_COMPONENT_TEMPLATES %}
|
||||
{# these generic templates by default are directed to index_pattern of 'logs-generic-*', overwrite that here to point to eg gcp_pubsub.generic-* #}
|
||||
{% set index_pattern = integration_type ~ component_name ~ "-*" %}
|
||||
{# includes use of .generic component template, but it doesn't exist in installed component templates. Redirect it to filestream.generic@package #}
|
||||
{% set component_name = "filestream.generic" %}
|
||||
{% set generic_integration_type = "logs-" %}
|
||||
{% endif %}
|
||||
|
||||
{# Default integration settings #}
|
||||
{% set integration_defaults = {
|
||||
"index_sorting": false,
|
||||
"index_template": {
|
||||
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||
"data_stream": {
|
||||
"allow_custom_routing": false,
|
||||
"hidden": false
|
||||
},
|
||||
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
|
||||
"index_patterns": [pattern.name],
|
||||
"priority": 501,
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
|
||||
"number_of_replicas": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 0}
|
||||
},
|
||||
"min_age": "60d"
|
||||
"index_sorting": false,
|
||||
"index_template": {
|
||||
"composed_of": [generic_integration_type ~ component_name ~ "@package", integration_type ~ custom_component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||
"data_stream": {
|
||||
"allow_custom_routing": false,
|
||||
"hidden": false
|
||||
},
|
||||
"ignore_missing_component_templates": [integration_type ~ custom_component_name ~ "@custom"],
|
||||
"index_patterns": [index_pattern],
|
||||
"priority": 501,
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {"name": "so-" ~ integration_type ~ custom_component_name ~ "-logs"},
|
||||
"number_of_replicas": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 0}
|
||||
},
|
||||
"min_age": "60d"
|
||||
},
|
||||
"delete": {
|
||||
"actions": {
|
||||
"delete": {}
|
||||
},
|
||||
"min_age": "365d"
|
||||
},
|
||||
"hot": {
|
||||
"actions": {
|
||||
"rollover": {
|
||||
"max_age": "30d",
|
||||
"max_primary_shard_size": "50gb"
|
||||
},
|
||||
"set_priority": {"priority": 100}
|
||||
},
|
||||
"delete": {
|
||||
"actions": {
|
||||
"delete": {}
|
||||
},
|
||||
"min_age": "365d"
|
||||
},
|
||||
"hot": {
|
||||
"actions": {
|
||||
"rollover": {
|
||||
"max_age": "30d",
|
||||
"max_primary_shard_size": "50gb"
|
||||
},
|
||||
"set_priority": {"priority": 100}
|
||||
},
|
||||
"min_age": "0ms"
|
||||
},
|
||||
"warm": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 50}
|
||||
},
|
||||
"min_age": "30d"
|
||||
}
|
||||
}
|
||||
}
|
||||
} %}
|
||||
"min_age": "0ms"
|
||||
},
|
||||
"warm": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 50}
|
||||
},
|
||||
"min_age": "30d"
|
||||
}
|
||||
}
|
||||
}
|
||||
} %}
|
||||
|
||||
{% do ADDON_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
@@ -45,6 +45,11 @@ elasticfleet:
|
||||
global: True
|
||||
forcedType: bool
|
||||
helpLink: elastic-fleet.html
|
||||
auto_upgrade_integrations:
|
||||
description: Enables or disables automatically upgrading Elastic Agent integrations.
|
||||
global: True
|
||||
forcedType: bool
|
||||
helpLink: elastic-fleet.html
|
||||
server:
|
||||
custom_fqdn:
|
||||
description: Custom FQDN for Agents to connect to. One per line.
|
||||
|
||||
@@ -88,7 +88,13 @@ elastic_fleet_package_version_check() {
|
||||
|
||||
elastic_fleet_package_latest_version_check() {
|
||||
PACKAGE=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.latestVersion'
|
||||
if output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" --fail); then
|
||||
if version=$(jq -e -r '.item.latestVersion' <<< $output); then
|
||||
echo "$version"
|
||||
fi
|
||||
else
|
||||
echo "Error: Failed to get latest version for $PACKAGE"
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_package_install() {
|
||||
@@ -108,7 +114,7 @@ elastic_fleet_package_is_installed() {
|
||||
}
|
||||
|
||||
elastic_fleet_installed_packages() {
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' -H 'Content-Type: application/json' "localhost:5601/api/fleet/epm/packages/installed?perPage=300"
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' -H 'Content-Type: application/json' "localhost:5601/api/fleet/epm/packages/installed?perPage=500"
|
||||
}
|
||||
|
||||
elastic_fleet_agent_policy_ids() {
|
||||
@@ -149,9 +155,13 @@ elastic_fleet_integration_policy_package_name() {
|
||||
elastic_fleet_integration_policy_package_version() {
|
||||
AGENT_POLICY=$1
|
||||
INTEGRATION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version'
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve package version for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||
|
||||
if output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" --fail); then
|
||||
if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< $output); then
|
||||
echo "$version"
|
||||
fi
|
||||
else
|
||||
echo "Error: Failed to retrieve agent policy $AGENT_POLICY"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -1,62 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
curl_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to connect to Kibana."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS=$'\n'
|
||||
agent_policies=$(elastic_fleet_agent_policy_ids)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
for INTEGRATION in $integrations; do
|
||||
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
|
||||
# Get package name so we know what package to look for when checking the current and latest available version
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
# Get currently installed version of package
|
||||
PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
# Get latest available version of package
|
||||
AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME")
|
||||
|
||||
# Get integration ID
|
||||
INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
|
||||
# Dry run of the upgrade
|
||||
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
|
||||
echo "Upgrading $INTEGRATION..."
|
||||
echo "Starting dry run..."
|
||||
DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
|
||||
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
|
||||
|
||||
# If no errors with dry run, proceed with actual upgrade
|
||||
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
|
||||
echo "No errors detected. Proceeding with upgrade..."
|
||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Upgrade failed for integration ID '$INTEGRATION_ID'."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Errors detected during dry run. Stopping upgrade..."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
done
|
||||
echo
|
||||
@@ -83,5 +83,10 @@ docker run \
|
||||
{{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-elastic-agent-builder:{{ GLOBALS.so_version }} wixl -o so-elastic-agent_windows_amd64_msi --arch x64 /workspace/so-elastic-agent.wxs
|
||||
printf "\n### MSI Generated...\n"
|
||||
|
||||
printf "\n### Cleaning up temp files in /nsm/elastic-agent-workspace\n"
|
||||
printf "\n### Cleaning up temp files \n"
|
||||
rm -rf /nsm/elastic-agent-workspace
|
||||
rm -rf /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/so-elastic-agent_windows_amd64.exe
|
||||
|
||||
printf "\n### Copying so_agent-installers to /nsm/elastic-fleet/ for nginx.\n"
|
||||
\cp -vr /opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers/ /nsm/elastic-fleet/
|
||||
chmod 644 /nsm/elastic-fleet/so_agent-installers/*
|
||||
|
||||
@@ -14,7 +14,7 @@ if ! is_manager_node; then
|
||||
fi
|
||||
|
||||
# Get current list of Grid Node Agents that need to be upgraded
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=policy_id%20%3A%20so-grid-nodes_%2A&showInactive=false&showUpgradeable=true&getStatusSummary=true")
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true")
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
|
||||
{%- set AUTO_UPGRADE_INTEGRATIONS = salt['pillar.get']('elasticfleet:config:auto_upgrade_integrations', default=false) %}
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
curl_output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to connect to Kibana."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IFS=$'\n'
|
||||
agent_policies=$(elastic_fleet_agent_policy_ids)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
|
||||
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
for INTEGRATION in $integrations; do
|
||||
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
|
||||
# Get package name so we know what package to look for when checking the current and latest available version
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||
{%- endif %}
|
||||
# Get currently installed version of package
|
||||
attempt=0
|
||||
max_attempts=3
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
if PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION") && AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME"); then
|
||||
break
|
||||
fi
|
||||
attempt=$((attempt + 1))
|
||||
done
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo "Error: Failed getting $PACKAGE_VERSION or $AVAILABLE_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get integration ID
|
||||
INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
|
||||
# Dry run of the upgrade
|
||||
echo ""
|
||||
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
|
||||
echo "Upgrading $INTEGRATION..."
|
||||
echo "Starting dry run..."
|
||||
DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
|
||||
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
|
||||
|
||||
# If no errors with dry run, proceed with actual upgrade
|
||||
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
|
||||
echo "No errors detected. Proceeding with upgrade..."
|
||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
fi
|
||||
{%- endif %}
|
||||
fi
|
||||
done
|
||||
done
|
||||
echo
|
||||
@@ -3,7 +3,10 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
{% set SUB = salt['pillar.get']('elasticfleet:config:subscription_integrations', default=false) %}
|
||||
{% set AUTO_UPGRADE_INTEGRATIONS = salt['pillar.get']('elasticfleet:config:auto_upgrade_integrations', default=false) %}
|
||||
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
@@ -16,6 +19,7 @@ BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json
|
||||
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
|
||||
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
|
||||
PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
|
||||
COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json
|
||||
|
||||
PENDING_UPDATE=false
|
||||
|
||||
@@ -46,6 +50,28 @@ compare_versions() {
|
||||
fi
|
||||
}
|
||||
|
||||
IFS=$'\n'
|
||||
agent_policies=$(elastic_fleet_agent_policy_ids)
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
|
||||
|
||||
in_use_integrations=()
|
||||
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
for INTEGRATION in $integrations; do
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
# non-default integrations that are in-use in any policy
|
||||
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||
in_use_integrations+=("$PACKAGE_NAME")
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
if retry 3 1 "curl -s -K /opt/so/conf/elasticsearch/curl.config --output /dev/null --silent --head --fail localhost:5601/api/fleet/epm/packages"; then
|
||||
# Package_list contains all integrations beta / non-beta.
|
||||
@@ -77,10 +103,19 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
else
|
||||
results=$(compare_versions "$latest_version" "$installed_version")
|
||||
if [ $results == "greater" ]; then
|
||||
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
|
||||
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
|
||||
{#- When auto_upgrade_integrations is false, skip upgrading in_use_integrations #}
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
if ! [[ " ${in_use_integrations[@]} " =~ " $package_name " ]]; then
|
||||
{%- endif %}
|
||||
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
|
||||
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
|
||||
|
||||
PENDING_UPDATE=true
|
||||
PENDING_UPDATE=true
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
else
|
||||
echo "skipping available upgrade for in use integration - $package_name."
|
||||
fi
|
||||
{%- endif %}
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
@@ -92,9 +127,18 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
else
|
||||
results=$(compare_versions "$latest_version" "$installed_version")
|
||||
if [ $results == "greater" ]; then
|
||||
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
|
||||
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
|
||||
PENDING_UPDATE=true
|
||||
{#- When auto_upgrade_integrations is false, skip upgrading in_use_integrations #}
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
if ! [[ " ${in_use_integrations[@]} " =~ " $package_name " ]]; then
|
||||
{%- endif %}
|
||||
echo "$package_name is at version $installed_version latest version is $latest_version... Adding to next update."
|
||||
jq --argjson package "$bulk_package" '.packages += [$package]' $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_PACKAGE_TMP && mv $BULK_INSTALL_PACKAGE_TMP $BULK_INSTALL_PACKAGE_LIST
|
||||
PENDING_UPDATE=true
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
else
|
||||
echo "skipping available upgrade for in use integration - $package_name."
|
||||
fi
|
||||
{%- endif %}
|
||||
fi
|
||||
fi
|
||||
{% endif %}
|
||||
@@ -104,14 +148,33 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
done <<< "$(jq -c '.packages[]' "$INSTALLED_PACKAGE_LIST")"
|
||||
|
||||
if [ "$PENDING_UPDATE" = true ]; then
|
||||
# Run bulk install of packages
|
||||
elastic_fleet_bulk_package_install $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_OUTPUT
|
||||
# Run chunked install of packages
|
||||
echo "" > $BULK_INSTALL_OUTPUT
|
||||
pkg_group=1
|
||||
pkg_filename="${BULK_INSTALL_PACKAGE_LIST%.json}"
|
||||
|
||||
jq -c '.packages | _nwise(25)' $BULK_INSTALL_PACKAGE_LIST | while read -r line; do
|
||||
echo "$line" | jq '{ "packages": . }' > "${pkg_filename}_${pkg_group}.json"
|
||||
pkg_group=$((pkg_group + 1))
|
||||
done
|
||||
|
||||
for file in "${pkg_filename}_"*.json; do
|
||||
[ -e "$file" ] || continue
|
||||
elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT
|
||||
done
|
||||
# cleanup any temp files for chunked package install
|
||||
rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST
|
||||
else
|
||||
echo "Elastic integrations don't appear to need installation/updating..."
|
||||
fi
|
||||
# Write out file for generating index/component/ilm templates
|
||||
latest_installed_package_list=$(elastic_fleet_installed_packages)
|
||||
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
||||
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
|
||||
# Refresh installed component template list
|
||||
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
|
||||
echo $latest_component_templates_list > $COMPONENT_TEMPLATES
|
||||
fi
|
||||
|
||||
else
|
||||
# This is the installation of add-on integrations and upgrade of existing integrations. Exiting without error, next highstate will attempt to re-run.
|
||||
|
||||
@@ -23,14 +23,28 @@ function update_logstash_outputs() {
|
||||
}
|
||||
function update_kafka_outputs() {
|
||||
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
|
||||
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
|
||||
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
|
||||
if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
# Update policy when fleet has secrets enabled
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
# Update policy when fleet has secrets disabled or policy hasn't been force updated
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
fi
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
else
|
||||
printf "Failed to get current Kafka output policy..."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||
|
||||
@@ -5,46 +5,78 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-managerhype'] %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
force=false
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-f|--force)
|
||||
force=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option $1"
|
||||
echo "Usage: $0 [-f|--force]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check to make sure that Kibana API is up & ready
|
||||
RETURN_CODE=0
|
||||
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
||||
RETURN_CODE=$?
|
||||
|
||||
if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
|
||||
exit 1
|
||||
echo -e "\nKibana API not accessible, can't setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
|
||||
if ! echo "$output" | grep -q "so-manager_kafka"; then
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
# Create a new output policy for Kafka. Default is disabled 'is_default: false & is_default_monitoring: false'
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-securityonion","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
|
||||
)
|
||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
|
||||
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
|
||||
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
else
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 0
|
||||
fi
|
||||
elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null) && [[ "$force" == "true" ]]; then
|
||||
# force an update to Kafka policy. Keep the current value of Kafka output policy (enabled/disabled).
|
||||
ENABLED_DISABLED=$(echo "$kafka_output" | jq -e .item.is_default)
|
||||
HOSTS=$(echo "$kafka_output" | jq -r '.item.hosts')
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
--argjson HOSTS "$HOSTS" \
|
||||
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
else
|
||||
echo -e "\nForced update to Elastic Fleet output policy for Kafka...\n"
|
||||
fi
|
||||
|
||||
elif echo "$output" | grep -q "so-manager_kafka"; then
|
||||
else
|
||||
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
|
||||
fi
|
||||
{% else %}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
elastic_auth_pillar:
|
||||
file.managed:
|
||||
- name: /opt/so/saltstack/local/pillar/elasticsearch/auth.sls
|
||||
- mode: 600
|
||||
- mode: 640
|
||||
- reload_pillar: True
|
||||
- contents: |
|
||||
elasticsearch:
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %}
|
||||
{% if grains.id.split('_') | last in ['manager','managerhype','managersearch','standalone'] %}
|
||||
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
|
||||
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
elasticsearch:
|
||||
enabled: false
|
||||
version: 8.17.3
|
||||
version: 8.18.6
|
||||
index_clean: true
|
||||
config:
|
||||
action:
|
||||
@@ -162,6 +162,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -274,7 +275,87 @@ elasticsearch:
|
||||
number_of_replicas: 0
|
||||
auto_expand_replicas: 0-2
|
||||
number_of_shards: 1
|
||||
refresh_interval: 30s
|
||||
refresh_interval: 1s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-assistant-chat:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- assistant-chat-mappings
|
||||
- assistant-chat-settings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-assistant-chat*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
dynamic_templates:
|
||||
- strings_as_keyword:
|
||||
mapping:
|
||||
ignore_above: 1024
|
||||
type: keyword
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-assistant-chat-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 1s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-assistant-session:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- assistant-session-mappings
|
||||
- assistant-session-settings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-assistant-session*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
dynamic_templates:
|
||||
- strings_as_keyword:
|
||||
mapping:
|
||||
ignore_above: 1024
|
||||
type: keyword
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-assistant-session-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 1s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
@@ -316,6 +397,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -427,6 +509,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -534,6 +617,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -563,6 +647,7 @@ elasticsearch:
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- winlog-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -697,6 +782,7 @@ elasticsearch:
|
||||
- client-mappings
|
||||
- device-mappings
|
||||
- network-mappings
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -768,6 +854,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -878,6 +965,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -998,6 +1086,7 @@ elasticsearch:
|
||||
index_template:
|
||||
composed_of:
|
||||
- so-data-streams-mappings
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
- so-logs-mappings
|
||||
@@ -1234,6 +1323,68 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-elastic-agent-monitor:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- event-mappings
|
||||
- so-elastic-agent-monitor
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
index_patterns:
|
||||
- logs-agentmonitor-*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-elastic-agent-monitor-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
number_of_replicas: 0
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-elastic_agent_x_apm_server:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -2832,6 +2983,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -3062,6 +3214,7 @@ elasticsearch:
|
||||
- event-mappings
|
||||
- logs-system.syslog@package
|
||||
- logs-system.syslog@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
- so-system-mappings
|
||||
@@ -3421,6 +3574,7 @@ elasticsearch:
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- logstash-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -3505,6 +3659,7 @@ elasticsearch:
|
||||
composed_of:
|
||||
- metrics-endpoint.metadata@package
|
||||
- metrics-endpoint.metadata@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -3551,6 +3706,7 @@ elasticsearch:
|
||||
composed_of:
|
||||
- metrics-endpoint.metrics@package
|
||||
- metrics-endpoint.metrics@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -3597,6 +3753,7 @@ elasticsearch:
|
||||
composed_of:
|
||||
- metrics-endpoint.policy@package
|
||||
- metrics-endpoint.policy@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -3645,6 +3802,7 @@ elasticsearch:
|
||||
- metrics-fleet_server.agent_status@package
|
||||
- metrics-fleet_server.agent_status@custom
|
||||
- ecs@mappings
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -3668,6 +3826,7 @@ elasticsearch:
|
||||
- metrics-fleet_server.agent_versions@package
|
||||
- metrics-fleet_server.agent_versions@custom
|
||||
- ecs@mappings
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
@@ -3715,6 +3874,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -3827,6 +3987,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -3856,6 +4017,7 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -3939,6 +4101,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -3968,6 +4131,7 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -4009,7 +4173,7 @@ elasticsearch:
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 1d
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
@@ -4051,6 +4215,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -4080,6 +4245,7 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -4163,6 +4329,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -4276,6 +4443,7 @@ elasticsearch:
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
@@ -4307,6 +4475,7 @@ elasticsearch:
|
||||
- zeek-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -4479,6 +4648,14 @@ elasticsearch:
|
||||
- data
|
||||
- remote_cluster_client
|
||||
- transform
|
||||
so-managerhype:
|
||||
config:
|
||||
node:
|
||||
roles:
|
||||
- master
|
||||
- data
|
||||
- remote_cluster_client
|
||||
- transform
|
||||
so-managersearch:
|
||||
config:
|
||||
node:
|
||||
|
||||
@@ -204,12 +204,17 @@ so-elasticsearch-roles-load:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
|
||||
{% set ap = "absent" %}
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% if ELASTICSEARCHMERGED.index_clean %}
|
||||
{% set ap = "present" %}
|
||||
{% else %}
|
||||
{% set ap = "absent" %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||
so-elasticsearch-indices-delete:
|
||||
cron.{{ap}}:
|
||||
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
{
|
||||
"geoip": {
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination_geo",
|
||||
"target_field": "destination.as",
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true,
|
||||
@@ -36,13 +36,17 @@
|
||||
{
|
||||
"geoip": {
|
||||
"field": "source.ip",
|
||||
"target_field": "source_geo",
|
||||
"target_field": "source.as",
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true,
|
||||
"properties": ["ip", "asn", "organization_name", "network"]
|
||||
}
|
||||
},
|
||||
{ "rename": { "field": "destination.as.organization_name", "target_field": "destination.as.organization.name", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "rename": { "field": "source.as.organization_name", "target_field": "source.as.organization.name", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "rename": { "field": "destination.as.asn", "target_field": "destination.as.number", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "rename": { "field": "source.as.asn", "target_field": "source.as.number", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "set": { "if": "ctx.event?.severity == 1", "field": "event.severity_label", "value": "low", "override": true } },
|
||||
{ "set": { "if": "ctx.event?.severity == 2", "field": "event.severity_label", "value": "medium", "override": true } },
|
||||
{ "set": { "if": "ctx.event?.severity == 3", "field": "event.severity_label", "value": "high", "override": true } },
|
||||
|
||||
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"convert": {
|
||||
"field": "_ingest._value",
|
||||
"type": "ip",
|
||||
"target_field": "_ingest._temp_ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "temp._valid_ips",
|
||||
"allow_duplicates": false,
|
||||
"value": [
|
||||
"{{{_ingest._temp_ip}}}"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
36
salt/elasticsearch/files/ingest/elasticagent.monitor
Normal file
36
salt/elasticsearch/files/ingest/elasticagent.monitor
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "gridmetrics.agents",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.module",
|
||||
"value": "gridmetrics",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"host",
|
||||
"elastic_agent",
|
||||
"agent"
|
||||
],
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"add_to_root": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -8,7 +8,7 @@
|
||||
"processors": [
|
||||
{ "set": { "ignore_failure": true, "field": "event.module", "value": "elastic_agent" } },
|
||||
{ "split": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "separator": "\\.", "target_field": "module_temp" } },
|
||||
{ "split": { "if": "ctx.data_stream?.dataset.contains('.')", "field":"data_stream.dataset", "separator":"\\.", "target_field":"datastream_dataset_temp", "ignore_missing":true } },
|
||||
{ "split": { "if": "ctx.data_stream?.dataset != null && ctx.data_stream?.dataset.contains('.')", "field":"data_stream.dataset", "separator":"\\.", "target_field":"datastream_dataset_temp", "ignore_missing":true } },
|
||||
{ "set": { "if": "ctx.module_temp != null", "override": true, "field": "event.module", "value": "{{module_temp.0}}" } },
|
||||
{ "set": { "if": "ctx.datastream_dataset_temp != null && ctx.datastream_dataset_temp[0] == 'network_traffic'", "field":"event.module", "value":"{{ datastream_dataset_temp.0 }}", "ignore_failure":true, "ignore_empty_value":true, "description":"Fix EA network packet capture" } },
|
||||
{ "gsub": { "if": "ctx.event?.dataset != null && ctx.event.dataset.contains('.')", "field": "event.dataset", "pattern": "^[^.]*.", "replacement": "", "target_field": "dataset_tag_temp" } },
|
||||
@@ -19,11 +19,12 @@
|
||||
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
||||
{ "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.dataset", "value": "import" } },
|
||||
{ "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.namespace", "value": "so" } },
|
||||
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
|
||||
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
|
||||
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
{
|
||||
"description" : "import.wel",
|
||||
"processors" : [
|
||||
{ "set": { "field": "event.ingested", "value": "{{ @timestamp }}" } },
|
||||
{ "set" : { "field" : "@timestamp", "value" : "{{ event.created }}" } },
|
||||
{ "remove": { "field": [ "event_record_id", "event.created" , "timestamp" , "winlog.event_data.UtcTime" ], "ignore_failure": true } },
|
||||
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
|
||||
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
@@ -107,61 +107,61 @@
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-firewall",
|
||||
"name": "logs-pfsense.log-1.23.1-firewall",
|
||||
"if": "ctx.event.provider == 'filterlog'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-openvpn",
|
||||
"name": "logs-pfsense.log-1.23.1-openvpn",
|
||||
"if": "ctx.event.provider == 'openvpn'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-ipsec",
|
||||
"name": "logs-pfsense.log-1.23.1-ipsec",
|
||||
"if": "ctx.event.provider == 'charon'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-dhcp",
|
||||
"name": "logs-pfsense.log-1.23.1-dhcp",
|
||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-unbound",
|
||||
"name": "logs-pfsense.log-1.23.1-unbound",
|
||||
"if": "ctx.event.provider == 'unbound'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-haproxy",
|
||||
"name": "logs-pfsense.log-1.23.1-haproxy",
|
||||
"if": "ctx.event.provider == 'haproxy'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-php-fpm",
|
||||
"name": "logs-pfsense.log-1.23.1-php-fpm",
|
||||
"if": "ctx.event.provider == 'php-fpm'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-squid",
|
||||
"name": "logs-pfsense.log-1.23.1-squid",
|
||||
"if": "ctx.event.provider == 'squid'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-snort",
|
||||
"name": "logs-pfsense.log-1.23.1-snort",
|
||||
"if": "ctx.event.provider == 'snort'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-suricata",
|
||||
"name": "logs-pfsense.log-1.23.1-suricata",
|
||||
"if": "ctx.event.provider == 'suricata'"
|
||||
}
|
||||
},
|
||||
@@ -358,14 +358,6 @@
|
||||
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": "event.original",
|
||||
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "global@custom",
|
||||
@@ -9,6 +9,7 @@
|
||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
|
||||
{ "dissect": { "field": "rule.rule", "pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}", "ignore_missing": true, "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "common.nids" } }
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -18,6 +18,13 @@
|
||||
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
|
||||
{ "date": { "field": "message2.timestamp", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "timezone": "UTC", "ignore_failure": true } },
|
||||
{ "remove":{ "field": "agent", "ignore_failure": true } },
|
||||
{"append":{"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"ignore_failure":true}},
|
||||
{
|
||||
"script": {
|
||||
"source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
|
||||
"ignore_failure": false
|
||||
}
|
||||
},
|
||||
{ "pipeline": { "if": "ctx?.event?.dataset != null", "name": "suricata.{{event.dataset}}" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "community_id": {} },
|
||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
||||
{ "community_id": { "if": "ctx.network?.community_id == null" } },
|
||||
{ "set": { "if": "ctx.source?.ip != null", "field": "client.ip", "value": "{{source.ip}}" } },
|
||||
{ "set": { "if": "ctx.source?.port != null", "field": "client.port", "value": "{{source.port}}" } },
|
||||
{ "set": { "if": "ctx.destination?.ip != null", "field": "server.ip", "value": "{{destination.ip}}" } },
|
||||
|
||||
@@ -24,6 +24,10 @@
|
||||
{ "rename": { "field": "message2.resp_cc", "target_field": "server.country_code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.sensorname", "target_field": "observer.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4l", "target_field": "hash.ja4l", "ignore_missing" : true, "if": "ctx.message2?.ja4l != null && ctx.message2.ja4l.length() > 0" }},
|
||||
{ "rename": { "field": "message2.ja4ls", "target_field": "hash.ja4ls", "ignore_missing" : true, "if": "ctx.message2?.ja4ls != null && ctx.message2.ja4ls.length() > 0" }},
|
||||
{ "rename": { "field": "message2.ja4t", "target_field": "hash.ja4t", "ignore_missing" : true, "if": "ctx.message2?.ja4t != null && ctx.message2.ja4t.length() > 0" }},
|
||||
{ "rename": { "field": "message2.ja4ts", "target_field": "hash.ja4ts", "ignore_missing" : true, "if": "ctx.message2?.ja4ts != null && ctx.message2.ja4ts.length() > 0" }},
|
||||
{ "script": { "lang": "painless", "source": "ctx.network.bytes = (ctx.client.bytes + ctx.server.bytes)", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.connection?.state == 'S0'", "field": "connection.state_description", "value": "Connection attempt seen, no reply" } },
|
||||
{ "set": { "if": "ctx.connection?.state == 'S1'", "field": "connection.state_description", "value": "Connection established, not terminated" } },
|
||||
|
||||
@@ -20,7 +20,11 @@
|
||||
{ "rename": { "field": "message2.RD", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||
{ "foreach": {"field": "dns.answers.name","processor": {"pipeline": {"name": "common.ip_validation"}},"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null","ignore_failure": true}},
|
||||
{ "foreach": {"field": "temp._valid_ips","processor": {"append": {"field": "dns.resolved_ip","allow_duplicates": false,"value": "{{{_ingest._value}}}","ignore_failure": true}},"ignore_failure": true}},
|
||||
{ "script": { "source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }","ignore_failure": true }},
|
||||
{ "remove": {"field": ["temp"], "ignore_missing": true ,"ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
|
||||
@@ -28,4 +32,4 @@
|
||||
{ "pipeline": { "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
||||
{ "pipeline": { "name": "zeek.common" } }
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -27,6 +27,7 @@
|
||||
{ "rename": { "field": "message2.resp_fuids", "target_field": "log.id.resp_fuids", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4h", "target_field": "hash.ja4h", "ignore_missing": true, "if": "ctx?.message2?.ja4h != null && ctx.message2.ja4h.length() > 0" } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.useragent_length = ctx.useragent.length()", "ignore_failure": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.virtual_host_length = ctx.virtual_host.length()", "ignore_failure": true } },
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
{ "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.stream_id", "target_field": "http2.stream_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4h", "target_field": "hash.ja4h", "ignore_missing": true, "if": "ctx?.message2?.ja4h != null && ctx.message2.ja4h.length() > 0" } },
|
||||
{ "remove": { "field": "message2.tags", "ignore_failure": true } },
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
|
||||
|
||||
10
salt/elasticsearch/files/ingest/zeek.ja4ssh
Normal file
10
salt/elasticsearch/files/ingest/zeek.ja4ssh
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"description": "zeek.ja4ssh",
|
||||
"processors": [
|
||||
{"set": {"field": "event.dataset","value": "ja4ssh"}},
|
||||
{"remove": {"field": "host","ignore_missing": true,"ignore_failure": true}},
|
||||
{"json": {"field": "message","target_field": "message2","ignore_failure": true}},
|
||||
{"rename": {"field": "message2.ja4ssh", "target_field": "hash.ja4ssh", "ignore_missing": true, "if": "ctx?.message2?.ja4ssh != null && ctx.message2.ja4ssh.length() > 0" }},
|
||||
{"pipeline": {"name": "zeek.common"}}
|
||||
]
|
||||
}
|
||||
@@ -1,9 +1,25 @@
|
||||
{
|
||||
"description":"zeek.ldap_search",
|
||||
"processors":[
|
||||
{"pipeline": {"name": "zeek.ldap", "ignore_missing_pipeline":true,"ignore_failure":true}},
|
||||
{"set": {"field": "event.dataset", "value":"ldap_search"}},
|
||||
{"remove": {"field": "tags", "ignore_missing":true}},
|
||||
{"set": {"field": "event.dataset", "value":"ldap_search"}},
|
||||
{"json": {"field": "message", "target_field": "message2", "ignore_failure": true}},
|
||||
{"rename": {"field": "message2.message_id", "target_field": "ldap.message_id", "ignore_missing": true}},
|
||||
{"rename": {"field": "message2.opcode", "target_field": "ldap.opcode", "ignore_missing": true}},
|
||||
{"rename": {"field": "message2.result", "target_field": "ldap.result", "ignore_missing": true}},
|
||||
{"rename": {"field": "message2.diagnostic_message", "target_field": "ldap.diagnostic_message", "ignore_missing": true}},
|
||||
{"rename": {"field": "message2.version", "target_field": "ldap.version", "ignore_missing": true}},
|
||||
{"rename": {"field": "message2.object", "target_field": "ldap.object", "ignore_missing": true}},
|
||||
{"rename": {"field": "message2.argument", "target_field": "ldap.argument", "ignore_missing": true}},
|
||||
{"rename": {"field": "message2.scope", "target_field": "ldap_search.scope", "ignore_missing":true}},
|
||||
{"rename": {"field": "message2.deref_aliases", "target_field": "ldap_search.deref_aliases", "ignore_missing":true}},
|
||||
{"rename": {"field": "message2.base_object", "target_field": "ldap.object", "ignore_missing":true}},
|
||||
{"rename": {"field": "message2.result_count", "target_field": "ldap_search.result_count", "ignore_missing":true}},
|
||||
{"rename": {"field": "message2.filter", "target_field": "ldap_search.filter", "ignore_missing":true}},
|
||||
{"rename": {"field": "message2.attributes", "target_field": "ldap_search.attributes", "ignore_missing":true}},
|
||||
{"script": {"source": "if (ctx.containsKey('ldap') && ctx.ldap.containsKey('diagnostic_message') && ctx.ldap.diagnostic_message != null) {\n String message = ctx.ldap.diagnostic_message;\n\n // get user and property from SASL success\n if (message.toLowerCase().contains(\"sasl(0): successful result\")) {\n Pattern pattern = /user:\\s*([^ ]+)\\s*property:\\s*([^ ]+)/i;\n Matcher matcher = pattern.matcher(message);\n if (matcher.find()) {\n ctx.ldap.user_email = matcher.group(1); // Extract user email\n ctx.ldap.property = matcher.group(2); // Extract property\n }\n }\n if (message.toLowerCase().contains(\"ldaperr:\")) {\n Pattern pattern = /comment:\\s*([^,]+)/i;\n Matcher matcher = pattern.matcher(message);\n\n if (matcher.find()) {\n ctx.ldap.comment = matcher.group(1);\n }\n }\n }","ignore_failure": true}},
|
||||
{"script": {"source": "if (ctx.containsKey('ldap') && ctx.ldap.containsKey('object') && ctx.ldap.object != null) {\n String message = ctx.ldap.object;\n\n // parse common name from ldap object\n if (message.toLowerCase().contains(\"cn=\")) {\n Pattern pattern = /cn=([^,]+)/i;\n Matcher matcher = pattern.matcher(message);\n if (matcher.find()) {\n ctx.ldap.common_name = matcher.group(1); // Extract CN\n }\n }\n // build domain from ldap object\n if (message.toLowerCase().contains(\"dc=\")) {\n Pattern dcPattern = /dc=([^,]+)/i;\n Matcher dcMatcher = dcPattern.matcher(message);\n\n StringBuilder domainBuilder = new StringBuilder();\n while (dcMatcher.find()) {\n if (domainBuilder.length() > 0 ){\n domainBuilder.append(\".\");\n }\n domainBuilder.append(dcMatcher.group(1));\n }\n if (domainBuilder.length() > 0) {\n ctx.ldap.domain = domainBuilder.toString();\n }\n }\n // create list of any organizational units from ldap object\n if (message.toLowerCase().contains(\"ou=\")) {\n Pattern ouPattern = /ou=([^,]+)/i;\n Matcher ouMatcher = ouPattern.matcher(message);\n ctx.ldap.organizational_unit = [];\n\n while (ouMatcher.find()) {\n ctx.ldap.organizational_unit.add(ouMatcher.group(1));\n }\n if(ctx.ldap.organizational_unit.isEmpty()) {\n ctx.remove(\"ldap.organizational_unit\");\n }\n }\n}\n","ignore_failure": true}},
|
||||
{"remove": {"field": "message2.tags", "ignore_failure": true}},
|
||||
{"remove": {"field": ["host"], "ignore_failure": true}},
|
||||
{"pipeline": {"name": "zeek.common"}}
|
||||
]
|
||||
}
|
||||
@@ -23,6 +23,8 @@
|
||||
{ "rename": { "field": "message2.validation_status","target_field": "ssl.validation_status", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja3", "target_field": "hash.ja3", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja3s", "target_field": "hash.ja3s", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4", "target_field": "hash.ja4", "ignore_missing": true, "if": "ctx?.message2?.ja4 != null && ctx.message2.ja4.length() > 0" } },
|
||||
{ "rename": { "field": "message2.ja4s", "target_field": "hash.ja4s", "ignore_missing": true, "if": "ctx?.message2?.ja4s != null && ctx.message2.ja4s.length() > 0" } },
|
||||
{ "foreach":
|
||||
{
|
||||
"if": "ctx?.tls?.client?.hash?.sha256 !=null",
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
{ "dot_expander": { "field": "basic_constraints.path_length", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.basic_constraints.path_length", "target_field": "x509.basic_constraints.path_length", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.fingerprint", "target_field": "hash.sha256", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4x", "target_field": "hash.ja4x", "ignore_missing": true, "if": "ctx?.message2?.ja4x != null && ctx.message2.ja4x.length() > 0" } },
|
||||
{ "pipeline": { "name": "zeek.common_ssl" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ elasticsearch:
|
||||
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
|
||||
helpLink: elasticsearch.html
|
||||
index_clean:
|
||||
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings.
|
||||
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings. This setting only applies to EVAL, STANDALONE, and HEAVY NODE installations. Other installations can only use ILM settings.
|
||||
forcedType: bool
|
||||
helpLink: elasticsearch.html
|
||||
retention:
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
|
||||
|
||||
{# start generation of integration default index_settings #}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') %}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') and salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %}
|
||||
{% set check_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %}
|
||||
{% if check_package_components.size > 1 %}
|
||||
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
|
||||
|
||||
69
salt/elasticsearch/templates/component/ecs/hash.json
Normal file
69
salt/elasticsearch/templates/component/ecs/hash.json
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"hash": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ja3": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja3s": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"hassh": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"md5": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"sha1": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"sha256": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4l": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4ls": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4t": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4ts": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4ssh": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4h": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4x": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -29,7 +29,7 @@
|
||||
"file": {
|
||||
"properties": {
|
||||
"line": {
|
||||
"type": "integer"
|
||||
"type": "long"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
|
||||
26
salt/elasticsearch/templates/component/ecs/metadata.json
Normal file
26
salt/elasticsearch/templates/component/ecs/metadata.json
Normal file
@@ -0,0 +1,26 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"dynamic_templates": [],
|
||||
"properties": {
|
||||
"metadata": {
|
||||
"properties": {
|
||||
"kafka": {
|
||||
"properties": {
|
||||
"timestamp": {
|
||||
"type": "date"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"_meta": {
|
||||
"documentation": "https://www.elastic.co/guide/en/ecs/current/ecs-log.html",
|
||||
"ecs_version": "1.12.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"agent": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"hostname": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"last_checkin_status": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"last_checkin": {
|
||||
"type": "date"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"offline_duration_hours": {
|
||||
"type": "integer"
|
||||
},
|
||||
"policy_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"status": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@
|
||||
"managed_by": "security_onion",
|
||||
"managed": true
|
||||
},
|
||||
"date_detection": false,
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings_as_keyword": {
|
||||
@@ -16,7 +17,19 @@
|
||||
}
|
||||
}
|
||||
],
|
||||
"date_detection": false
|
||||
"properties": {
|
||||
"metadata": {
|
||||
"properties": {
|
||||
"kafka": {
|
||||
"properties": {
|
||||
"timestamp": {
|
||||
"type": "date"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
|
||||
@@ -1,37 +1,59 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"related": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
},
|
||||
"destination": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"related": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"properties":{
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
},
|
||||
"destination": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"source": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
},
|
||||
"metadata": {
|
||||
"properties": {
|
||||
"input": {
|
||||
"properties": {
|
||||
"beats": {
|
||||
"properties": {
|
||||
"host": {
|
||||
"properties": {
|
||||
"ip": {
|
||||
"type": "ip"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"managed_by": "security_onion",
|
||||
"managed": true
|
||||
}
|
||||
|
||||
}
|
||||
@@ -0,0 +1,104 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"so_kind": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"so_operation": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"so_chat": {
|
||||
"properties": {
|
||||
"role": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"content": {
|
||||
"type": "object",
|
||||
"enabled": false
|
||||
},
|
||||
"sessionId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"createTime": {
|
||||
"type": "date"
|
||||
},
|
||||
"deletedAt": {
|
||||
"type": "date"
|
||||
},
|
||||
"tags": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"tool_use_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"userId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"message": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"role": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"model": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"contentStr": {
|
||||
"type": "text"
|
||||
},
|
||||
"contentBlocks": {
|
||||
"type": "nested",
|
||||
"enabled": false
|
||||
},
|
||||
"stopReason": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"stopSequence": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"usage": {
|
||||
"properties": {
|
||||
"input_tokens": {
|
||||
"type": "long"
|
||||
},
|
||||
"output_tokens": {
|
||||
"type": "long"
|
||||
},
|
||||
"credits": {
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"ecs_version": "1.12.2"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"template": {},
|
||||
"version": 1,
|
||||
"_meta": {
|
||||
"description": "default settings for common Security Onion Assistant indices"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"so_kind": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"so_session": {
|
||||
"properties": {
|
||||
"title": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"sessionId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"createTime": {
|
||||
"type": "date"
|
||||
},
|
||||
"deleteTime": {
|
||||
"type": "date"
|
||||
},
|
||||
"tags": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"userId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"ecs_version": "1.12.2"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"template": {},
|
||||
"version": 1,
|
||||
"_meta": {
|
||||
"description": "default settings for common Security Onion Assistant indices"
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
/bin/bash
|
||||
#!/bin/bash
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
@@ -6,6 +6,6 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
echo "Starting ILM..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://localhost:9200/_ilm/start
|
||||
echo
|
||||
|
||||
@@ -8,3 +8,4 @@
|
||||
|
||||
echo "Stopping ILM..."
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://localhost:9200/_ilm/stop
|
||||
echo
|
||||
|
||||
113
salt/elasticsearch/tools/sbin/so-elasticsearch-indices-growth
Normal file
113
salt/elasticsearch/tools/sbin/so-elasticsearch-indices-growth
Normal file
@@ -0,0 +1,113 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
INFLUX_URL="https://localhost:8086/api/v2"
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
request() {
|
||||
curl -skK /opt/so/conf/influxdb/curl.config "$INFLUX_URL/$@"
|
||||
}
|
||||
|
||||
lookup_org_id() {
|
||||
response=$(request orgs?org=Security+Onion)
|
||||
echo "$response" | jq -r ".orgs[] | select(.name == \"Security Onion\").id"
|
||||
}
|
||||
|
||||
ORG_ID=$(lookup_org_id)
|
||||
|
||||
run_flux_query() {
|
||||
local query=$1
|
||||
request "query?org=$ORG_ID" -H 'Accept:application/csv' -H 'Content-type:application/vnd.flux' -d "$query" -XPOST 2>/dev/null
|
||||
}
|
||||
|
||||
read_csv_result() {
|
||||
local result="$1"
|
||||
echo "$result" | grep '^,_result,' | head -1 | awk -F',' '{print $NF}' | tr -d '\r\n\t '
|
||||
}
|
||||
|
||||
bytes_to_gb() {
|
||||
local bytes="${1:-0}"
|
||||
if [[ "$bytes" =~ ^-?[0-9]+$ ]]; then
|
||||
echo "$bytes" | awk '{printf "%.2f", $1 / 1024 / 1024 / 1024}'
|
||||
else
|
||||
echo "0.00"
|
||||
fi
|
||||
}
|
||||
|
||||
indexes_query='from(bucket: "telegraf/so_long_term")
|
||||
|> range(start: -7d)
|
||||
|> filter(fn: (r) => r._measurement == "elasticsearch_index_size")
|
||||
|> distinct(column: "_field")
|
||||
|> keep(columns: ["_field"])'
|
||||
|
||||
indexes_result=$(run_flux_query "$indexes_query")
|
||||
indexes=$(echo "$indexes_result" | tail -n +2 | cut -d',' -f4 | grep -v '^$' | grep -v '^_field$' | sed 's/\r$//' | sort -u)
|
||||
|
||||
printf "%-50s %15s %15s %15s\n" "Index Name" "Last 24hr (GB)" "Last 7d (GB)" "Last 30d (GB)"
|
||||
printf "%-50s %15s %15s %15s\n" "$(printf '%.0s-' {1..50})" "$(printf '%.0s-' {1..15})" "$(printf '%.0s-' {1..15})" "$(printf '%.0s-' {1..15})"
|
||||
|
||||
for index in $indexes; do
|
||||
[[ -z "$index" ]] && continue
|
||||
current_query="from(bucket: \"telegraf/so_long_term\")
|
||||
|> range(start: -4h)
|
||||
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|
||||
|> last()
|
||||
|> keep(columns: [\"_value\"])"
|
||||
current_result=$(run_flux_query "$current_query")
|
||||
current_size=$(read_csv_result "$current_result")
|
||||
current_size=${current_size:-0}
|
||||
|
||||
size_24h_query="from(bucket: \"telegraf/so_long_term\")
|
||||
|> range(start: -25h, stop: -23h)
|
||||
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|
||||
|> last()
|
||||
|> keep(columns: [\"_value\"])"
|
||||
size_24h_result=$(run_flux_query "$size_24h_query")
|
||||
size_24h_ago=$(read_csv_result "$size_24h_result")
|
||||
size_24h_ago=${size_24h_ago:-$current_size}
|
||||
|
||||
size_7d_query="from(bucket: \"telegraf/so_long_term\")
|
||||
|> range(start: -7d8h, stop: -7d)
|
||||
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|
||||
|> last()
|
||||
|> keep(columns: [\"_value\"])"
|
||||
size_7d_result=$(run_flux_query "$size_7d_query")
|
||||
size_7d_ago=$(read_csv_result "$size_7d_result")
|
||||
size_7d_ago=${size_7d_ago:-$current_size}
|
||||
|
||||
size_30d_query="from(bucket: \"telegraf/so_long_term\")
|
||||
|> range(start: -30d8h, stop: -30d)
|
||||
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|
||||
|> last()
|
||||
|> keep(columns: [\"_value\"])"
|
||||
size_30d_result=$(run_flux_query "$size_30d_query")
|
||||
size_30d_ago=$(read_csv_result "$size_30d_result")
|
||||
size_30d_ago=${size_30d_ago:-$current_size}
|
||||
|
||||
# if an index was recently cleaned up by ilm it will result in a negative number for 'index growth'.
|
||||
growth_24h=$(( current_size > size_24h_ago ? current_size - size_24h_ago : 0 ))
|
||||
|
||||
growth_7d=$(( current_size > size_7d_ago ? current_size - size_7d_ago : 0 ))
|
||||
|
||||
growth_30d=$(( current_size > size_30d_ago ? current_size - size_30d_ago : 0 ))
|
||||
|
||||
growth_24h_gb=$(bytes_to_gb "$growth_24h")
|
||||
growth_7d_gb=$(bytes_to_gb "$growth_7d")
|
||||
growth_30d_gb=$(bytes_to_gb "$growth_30d")
|
||||
|
||||
# Only results for indices with atleast 1 metric above 0.00
|
||||
if [[ "$growth_24h_gb" != "0.00" ]] || [[ "$growth_7d_gb" != "0.00" ]] || [[ "$growth_30d_gb" != "0.00" ]]; then
|
||||
printf "%020.2f|%-50s %15s %15s %15s\n" \
|
||||
"$growth_24h" \
|
||||
"$index" \
|
||||
"$growth_24h_gb" \
|
||||
"$growth_7d_gb" \
|
||||
"$growth_30d_gb"
|
||||
fi
|
||||
done | sort -t'|' -k1,1nr | cut -d'|' -f2-
|
||||
|
||||
@@ -21,7 +21,7 @@ while [[ "$COUNT" -le 240 ]]; do
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
# Check cluster health once connected
|
||||
so-elasticsearch-query _cluster/health?wait_for_status=yellow > /dev/null 2>&1
|
||||
so-elasticsearch-query _cluster/health?wait_for_status=yellow\&timeout=120s > /dev/null 2>&1
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
|
||||
195
salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot
Normal file
195
salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot
Normal file
@@ -0,0 +1,195 @@
|
||||
#!/bin/bash
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
BOLD='\033[1;37m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_title() {
|
||||
if [ $1 == "LOG" ]; then
|
||||
echo -e "\n${BOLD}================ $2 ================${NC}\n"
|
||||
elif [ $1 == "OK" ]; then
|
||||
echo -e "${GREEN} $2 ${NC}"
|
||||
elif [ $1 == "WARN" ]; then
|
||||
echo -e "${YELLOW} $2 ${NC}"
|
||||
elif [ $1 == "ERROR" ]; then
|
||||
echo -e "${RED} $2 ${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
health_report() {
|
||||
if ! health_report_output=$(so-elasticsearch-query _health_report?format=json --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve health report from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
non_green_count=$(echo "$health_report_output" | jq '[.indicators | to_entries[] | select(.value.status != "green")] | length')
|
||||
|
||||
if [ "$non_green_count" -gt 0 ]; then
|
||||
echo "$health_report_output" | jq -r '.indicators | to_entries[] | select(.value.status != "green") | .key' | while read -r indicator_name; do
|
||||
indicator=$(echo "$health_report_output" | jq -r ".indicators.\"$indicator_name\"")
|
||||
status=$(echo "$indicator" | jq -r '.status')
|
||||
symptom=$(echo "$indicator" | jq -r '.symptom // "No symptom available"')
|
||||
|
||||
# reormat indicator name
|
||||
display_name=$(echo "$indicator_name" | tr '_' ' ' | sed 's/\b\(.\)/\u\1/g')
|
||||
|
||||
if [ "$status" = "yellow" ]; then
|
||||
log_title "WARN" "$display_name: $symptom"
|
||||
else
|
||||
log_title "ERROR" "$display_name: $symptom"
|
||||
fi
|
||||
|
||||
# diagnosis if available
|
||||
echo "$indicator" | jq -c '.diagnosis[]? // empty' | while read -r diagnosis; do
|
||||
cause=$(echo "$diagnosis" | jq -r '.cause // "Unknown"')
|
||||
action=$(echo "$diagnosis" | jq -r '.action // "No action specified"')
|
||||
|
||||
echo -e " ${BOLD}Cause:${NC} $cause\n"
|
||||
echo -e " ${BOLD}Action:${NC} $action\n"
|
||||
|
||||
# Check for affected indices
|
||||
affected_indices=$(echo "$diagnosis" | jq -r '.affected_resources.indices[]? // empty')
|
||||
if [ -n "$affected_indices" ]; then
|
||||
echo -e " ${BOLD}Affected indices:${NC}"
|
||||
total_indices=$(echo "$affected_indices" | wc -l)
|
||||
echo "$affected_indices" | head -10 | while read -r index; do
|
||||
echo " - $index"
|
||||
done
|
||||
if [ "$total_indices" -gt 10 ]; then
|
||||
remaining=$((total_indices - 10))
|
||||
echo " ... and $remaining more indices (truncated for readability)"
|
||||
fi
|
||||
fi
|
||||
echo
|
||||
done
|
||||
done
|
||||
else
|
||||
log_title "OK" "All health indicators are green"
|
||||
fi
|
||||
}
|
||||
|
||||
elasticsearch_status() {
|
||||
log_title "LOG" "Elasticsearch Status"
|
||||
if so-elasticsearch-query / --fail --output /dev/null; then
|
||||
health_report
|
||||
else
|
||||
log_title "ERROR" "Elasticsearch API is not accessible"
|
||||
so-status
|
||||
log_title "ERROR" "Make sure Elasticsearch is running. Addtionally, check for startup errors in /opt/so/log/elasticsearch/securityonion.log${NC}\n"
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
indices_by_age() {
|
||||
log_title "LOG" "Indices by Creation Date - Size > 1KB"
|
||||
log_title "WARN" "Since high/flood watermark has been reached consider updating ILM policies.\n"
|
||||
if ! indices_output=$(so-elasticsearch-query '_cat/indices?v&s=creation.date:asc&h=creation.date.string,index,status,health,docs.count,pri.store.size&bytes=b&format=json' --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve indices list from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Filter for indices with size > 1KB (1024 bytes) and format output
|
||||
echo -e "${BOLD}Creation Date Name Size${NC}"
|
||||
echo -e "${BOLD}--------------------------------------------------------------------------------------------------------------${NC}"
|
||||
|
||||
# Create list of indices excluding .internal, so-detection*, so-case*
|
||||
echo "$indices_output" | jq -r '.[] | select((."pri.store.size" | tonumber) > 1024) | select(.index | (startswith(".internal") or startswith("so-detection") or startswith("so-case")) | not ) | "\(."creation.date.string") | \(.index) | \(."pri.store.size")"' | while IFS='|' read -r creation_date index_name size_bytes; do
|
||||
# Convert bytes to GB / MB
|
||||
if [ "$size_bytes" -gt 1073741824 ]; then
|
||||
size_human=$(echo "scale=2; $size_bytes / 1073741824" | bc)GB
|
||||
else
|
||||
size_human=$(echo "scale=2; $size_bytes / 1048576" | bc)MB
|
||||
fi
|
||||
|
||||
creation_date=$(date -d "$creation_date" '+%Y-%m-%dT%H:%MZ' )
|
||||
|
||||
# Format output with spacing
|
||||
printf "%-19s %-76s %10s\n" "$creation_date" "$index_name" "$size_human"
|
||||
done
|
||||
}
|
||||
|
||||
watermark_settings() {
|
||||
watermark_path=".defaults.cluster.routing.allocation.disk.watermark"
|
||||
if ! watermark_output=$(so-elasticsearch-query _cluster/settings?include_defaults=true\&filter_path=*.cluster.routing.allocation.disk.* --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve watermark settings from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! disk_allocation_output=$(so-elasticsearch-query _cat/nodes?v\&h=name,ip,disk.used_percent,disk.avail,disk.total,node.role\&format=json --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve disk allocation data from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
flood=$(echo $watermark_output | jq -r "$watermark_path.flood_stage" )
|
||||
high=$(echo $watermark_output | jq -r "$watermark_path.high" )
|
||||
low=$(echo $watermark_output | jq -r "$watermark_path.low" )
|
||||
|
||||
# Strip percentage signs for comparison
|
||||
flood_num=${flood%\%}
|
||||
high_num=${high%\%}
|
||||
low_num=${low%\%}
|
||||
|
||||
# Check each nodes disk usage
|
||||
log_title "LOG" "Disk Usage Check"
|
||||
echo -e "${BOLD}LOW:${GREEN}$low${NC}${BOLD} HIGH:${YELLOW}${high}${NC}${BOLD} FLOOD:${RED}${flood}${NC}\n"
|
||||
|
||||
# Only show data nodes (d=data, h=hot, w=warm, c=cold, f=frozen, s=content)
|
||||
echo "$disk_allocation_output" | jq -r '.[] | select(.["node.role"] | test("[dhwcfs]")) | "\(.name)|\(.["disk.used_percent"])"' | while IFS='|' read -r node_name disk_used; do
|
||||
disk_used_num=$(echo $disk_used | bc)
|
||||
|
||||
if (( $(echo "$disk_used_num >= $flood_num" | bc -l) )); then
|
||||
log_title "ERROR" "$node_name is at or above the flood watermark ($flood)! Disk usage: ${disk_used}%"
|
||||
touch /tmp/watermark_reached
|
||||
elif (( $(echo "$disk_used_num >= $high_num" | bc -l) )); then
|
||||
log_title "ERROR" "$node_name is at or above the high watermark ($high)! Disk usage: ${disk_used}%"
|
||||
touch /tmp/watermark_reached
|
||||
else
|
||||
log_title "OK" "$node_name disk usage: ${disk_used}%"
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if we need to show indices by age
|
||||
if [ -f /tmp/watermark_reached ]; then
|
||||
indices_by_age
|
||||
rm -f /tmp/watermark_reached
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
unassigned_shards() {
|
||||
|
||||
if ! unassigned_shards_output=$(so-elasticsearch-query _cat/shards?v\&h=index,shard,prirep,state,unassigned.reason,unassigned.details\&s=state\&format=json --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve shard data from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_title "LOG" "Unassigned Shards Check"
|
||||
# Check if there are any UNASSIGNED shards
|
||||
unassigned_count=$(echo "$unassigned_shards_output" | jq '[.[] | select(.state == "UNASSIGNED")] | length')
|
||||
|
||||
if [ "$unassigned_count" -gt 0 ]; then
|
||||
echo "$unassigned_shards_output" | jq -r '.[] | select(.state == "UNASSIGNED") | "\(.index)|\(.shard)|\(.prirep)|\(."unassigned.reason")"' | while IFS='|' read -r index shard prirep reason; do
|
||||
if [ "$prirep" = "r" ]; then
|
||||
log_title "WARN" "Replica shard for index $index is unassigned. Reason: $reason"
|
||||
elif [ "$prirep" = "p" ]; then
|
||||
log_title "ERROR" "Primary shard for index $index is unassigned. Reason: $reason"
|
||||
fi
|
||||
done
|
||||
else
|
||||
log_title "OK" "All shards are assigned"
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
elasticsearch_status
|
||||
watermark_settings
|
||||
unassigned_shards
|
||||
}
|
||||
|
||||
main
|
||||
@@ -136,7 +136,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
TEMPLATE=${i::-14}
|
||||
COMPONENT_PATTERN=${TEMPLATE:3}
|
||||
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ logs-http_endpoint\.generic|logs-winlog\.winlog ]]; then
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ \.generic|logs-winlog\.winlog ]]; then
|
||||
load_failures=$((load_failures+1))
|
||||
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
|
||||
else
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
'so-strelka-filestream'
|
||||
] %}
|
||||
|
||||
{% elif GLOBALS.role == 'so-manager' or GLOBALS.role == 'so-standalone' or GLOBALS.role == 'so-managersearch' %}
|
||||
{% elif GLOBALS.role in ['so-manager', 'so-standalone','so-managersearch', 'so-managerhype'] %}
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-dockerregistry',
|
||||
'so-elasticsearch',
|
||||
|
||||
@@ -11,13 +11,16 @@ firewall:
|
||||
endgame: []
|
||||
eval: []
|
||||
external_suricata: []
|
||||
external_kafka: []
|
||||
fleet: []
|
||||
heavynode: []
|
||||
hypervisor: []
|
||||
idh: []
|
||||
import: []
|
||||
localhost:
|
||||
- 127.0.0.1
|
||||
manager: []
|
||||
managerhype: []
|
||||
managersearch: []
|
||||
receiver: []
|
||||
searchnode: []
|
||||
@@ -103,6 +106,10 @@ firewall:
|
||||
tcp:
|
||||
- 9092
|
||||
udp: []
|
||||
kafka_external_access:
|
||||
tcp:
|
||||
- 29092
|
||||
udp: []
|
||||
kibana:
|
||||
tcp:
|
||||
- 5601
|
||||
@@ -473,6 +480,8 @@ firewall:
|
||||
external_suricata:
|
||||
portgroups:
|
||||
- external_suricata
|
||||
external_kafka:
|
||||
portgroups: []
|
||||
desktop:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
@@ -482,6 +491,15 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -534,6 +552,218 @@ firewall:
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
managerhype:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
managerhype:
|
||||
portgroups:
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- docker_registry
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- localrules
|
||||
- sensoroni
|
||||
fleet:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- beats_5056
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
idh:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
sensor:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- beats_5644
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
heavynode:
|
||||
portgroups:
|
||||
- redis
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- beats_5644
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
receiver:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
beats_endpoint_ssl:
|
||||
portgroups:
|
||||
- beats_5644
|
||||
elasticsearch_rest:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
elastic_agent_endpoint:
|
||||
portgroups:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
external_suricata:
|
||||
portgroups:
|
||||
- external_suricata
|
||||
desktop:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- ssh
|
||||
dockernet:
|
||||
portgroups:
|
||||
- all
|
||||
fleet:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
idh:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
sensor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
searchnode:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
heavynode:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -668,6 +898,8 @@ firewall:
|
||||
external_suricata:
|
||||
portgroups:
|
||||
- external_suricata
|
||||
external_kafka:
|
||||
portgroups: []
|
||||
desktop:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
@@ -677,6 +909,15 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -729,6 +970,9 @@ firewall:
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -867,6 +1111,8 @@ firewall:
|
||||
external_suricata:
|
||||
portgroups:
|
||||
- external_suricata
|
||||
external_kafka:
|
||||
portgroups: []
|
||||
strelka_frontend:
|
||||
portgroups:
|
||||
- strelka_frontend
|
||||
@@ -879,6 +1125,15 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -934,6 +1189,9 @@ firewall:
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -972,6 +1230,10 @@ firewall:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
standalone:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
@@ -1119,6 +1381,10 @@ firewall:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
standalone:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
@@ -1321,6 +1587,9 @@ firewall:
|
||||
portgroups:
|
||||
- redis
|
||||
- elastic_agent_data
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elastic_agent_data
|
||||
self:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -1337,6 +1606,8 @@ firewall:
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
external_kafka:
|
||||
portgroups: []
|
||||
receiver:
|
||||
portgroups: []
|
||||
customhostgroup0:
|
||||
@@ -1436,6 +1707,9 @@ firewall:
|
||||
managersearch:
|
||||
portgroups:
|
||||
- openssh
|
||||
managerhype:
|
||||
portgroups:
|
||||
- openssh
|
||||
standalone:
|
||||
portgroups:
|
||||
- openssh
|
||||
@@ -1459,3 +1733,66 @@ firewall:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
hypervisor:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- ssh
|
||||
dockernet:
|
||||
portgroups:
|
||||
- all
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
manager:
|
||||
portgroups: []
|
||||
managersearch:
|
||||
portgroups: []
|
||||
managerhype:
|
||||
portgroups: []
|
||||
standalone:
|
||||
portgroups: []
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
|
||||
@@ -91,6 +91,10 @@ COMMIT
|
||||
-A INPUT -m conntrack --ctstate INVALID -j DROP
|
||||
-A INPUT -p icmp -j ACCEPT
|
||||
-A INPUT -j LOGGING
|
||||
{% if GLOBALS.role in ['so-hypervisor', 'so-managerhype'] -%}
|
||||
-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
-A FORWARD -i br0 -o br0 -j ACCEPT
|
||||
{%- endif %}
|
||||
-A FORWARD -j DOCKER-USER
|
||||
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
|
||||
-A FORWARD -o sobridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||
|
||||
@@ -21,25 +21,38 @@
|
||||
{# Only add Kafka firewall items when Kafka enabled #}
|
||||
{% set role = GLOBALS.role.split('-')[1] %}
|
||||
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and role in ['manager', 'managersearch', 'standalone'] %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
{% if GLOBALS.pipeline == 'KAFKA' %}
|
||||
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
||||
{% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
|
||||
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and role == 'receiver' %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.self.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.standalone.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.manager.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.managersearch.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
{% if role.startswith('manager') or role == 'standalone' %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.pipeline == 'KAFKA' and role in ['manager', 'managersearch', 'standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
|
||||
{% if role == 'receiver' %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.self.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.standalone.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.manager.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.managersearch.portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
|
||||
{% if role.startswith('manager') or role in ['standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'managerhype', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||
{# Kafka external access only applies for Kafka nodes with the broker role. #}
|
||||
{% if role.startswith('manager') or role in ['standalone', 'receiver'] and 'broker' in kafka_node_type %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
|
||||
@@ -33,8 +33,10 @@ firewall:
|
||||
endgame: *hostgroupsettingsadv
|
||||
eval: *hostgroupsettings
|
||||
external_suricata: *hostgroupsettings
|
||||
external_kafka: *hostgroupsettings
|
||||
fleet: *hostgroupsettings
|
||||
heavynode: *hostgroupsettings
|
||||
hypervisor: *hostgroupsettings
|
||||
idh: *hostgroupsettings
|
||||
import: *hostgroupsettings
|
||||
localhost: *ROhostgroupsettingsadv
|
||||
@@ -130,6 +132,9 @@ firewall:
|
||||
kafka_data:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kafka_external_access:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
kibana:
|
||||
tcp: *tcpsettings
|
||||
udp: *udpsettings
|
||||
|
||||
@@ -43,7 +43,7 @@ global:
|
||||
global: True
|
||||
advanced: True
|
||||
pipeline:
|
||||
description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license.
|
||||
description: Sets which pipeline technology for events to use. The use of Kafka requires a Security Onion Pro license.
|
||||
regex: ^(REDIS|KAFKA)$
|
||||
options:
|
||||
- REDIS
|
||||
|
||||
125
salt/hypervisor/defaults.yaml
Normal file
125
salt/hypervisor/defaults.yaml
Normal file
@@ -0,0 +1,125 @@
|
||||
hypervisor:
|
||||
model:
|
||||
testModel:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 128
|
||||
disk:
|
||||
1: pci_0000_c7_00_0
|
||||
2: pci_0000_c8_00_0
|
||||
copper:
|
||||
1: pci_0000_c4_00_0
|
||||
2: pci_0000_c4_00_1
|
||||
3: pci_0000_c4_00_2
|
||||
4: pci_0000_c4_00_3
|
||||
sfp:
|
||||
5: pci_0000_02_00_0
|
||||
6: pci_0000_02_00_1
|
||||
7: pci_0000_41_00_0
|
||||
8: pci_0000_41_00_1
|
||||
SOSSNNV:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 256
|
||||
disk:
|
||||
1: pci_0000_42_00_0
|
||||
2: pci_0000_43_00_0
|
||||
3: pci_0000_44_00_0
|
||||
4: pci_0000_45_00_0
|
||||
copper:
|
||||
sfp:
|
||||
1: pci_0000_02_00_0
|
||||
2: pci_0000_02_00_1
|
||||
3: pci_0000_41_00_0
|
||||
4: pci_0000_41_00_1
|
||||
SOSSNNV-DE02:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 384
|
||||
disk:
|
||||
1: pci_0000_41_00_0
|
||||
2: pci_0000_42_00_0
|
||||
3: pci_0000_81_00_0
|
||||
4: pci_0000_82_00_0
|
||||
5: pci_0000_83_00_0
|
||||
6: pci_0000_84_00_0
|
||||
copper:
|
||||
1: pci_0000_85_00_0
|
||||
2: pci_0000_85_00_1
|
||||
3: pci_0000_85_00_2
|
||||
4: pci_0000_85_00_3
|
||||
sfp:
|
||||
5: pci_0000_c4_00_0
|
||||
6: pci_0000_c4_00_1
|
||||
7: pci_0000_c5_00_0
|
||||
8: pci_0000_c5_00_1
|
||||
9: pci_0000_c5_00_2
|
||||
10: pci_0000_c5_00_3
|
||||
SOSSN7200:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 256
|
||||
copper:
|
||||
1: pci_0000_03_00_0
|
||||
2: pci_0000_03_00_1
|
||||
3: pci_0000_03_00_2
|
||||
4: pci_0000_03_00_3
|
||||
sfp:
|
||||
5: pci_0000_02_00_0
|
||||
6: pci_0000_02_00_1
|
||||
7: pci_0000_81_00_0
|
||||
8: pci_0000_81_00_1
|
||||
9: pci_0000_81_00_2
|
||||
10: pci_0000_81_00_3
|
||||
SOSSN7200-DE02:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 384
|
||||
copper:
|
||||
1: pci_0000_82_00_0
|
||||
2: pci_0000_82_00_1
|
||||
3: pci_0000_82_00_2
|
||||
4: pci_0000_82_00_3
|
||||
sfp:
|
||||
5: pci_0000_c4_00_0
|
||||
6: pci_0000_c4_00_1
|
||||
7: pci_0000_c5_00_0
|
||||
8: pci_0000_c5_00_1
|
||||
9: pci_0000_c6_00_0
|
||||
10: pci_0000_c6_00_1
|
||||
11: pci_0000_c6_00_2
|
||||
12: pci_0000_c6_00_3
|
||||
SOS4000:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 256
|
||||
copper:
|
||||
1: pci_0000_03_00_0
|
||||
2: pci_0000_03_00_1
|
||||
3: pci_0000_03_00_2
|
||||
4: pci_0000_03_00_3
|
||||
sfp:
|
||||
5: pci_0000_02_00_0
|
||||
6: pci_0000_02_00_1
|
||||
7: pci_0000_81_00_0
|
||||
8: pci_0000_81_00_1
|
||||
9: pci_0000_81_00_2
|
||||
10: pci_0000_81_00_3
|
||||
SOS5000-DE02:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 384
|
||||
copper:
|
||||
1: pci_0000_82_00_0
|
||||
2: pci_0000_82_00_1
|
||||
3: pci_0000_82_00_2
|
||||
4: pci_0000_82_00_3
|
||||
sfp:
|
||||
5: pci_0000_c4_00_0
|
||||
6: pci_0000_c4_00_1
|
||||
7: pci_0000_c5_00_0
|
||||
8: pci_0000_c5_00_1
|
||||
9: pci_0000_c6_00_0
|
||||
10: pci_0000_c6_00_1
|
||||
11: pci_0000_c6_00_2
|
||||
12: pci_0000_c6_00_3
|
||||
1
salt/hypervisor/hosts/README
Normal file
1
salt/hypervisor/hosts/README
Normal file
@@ -0,0 +1 @@
|
||||
This directory will contain hypervisor hosts. We need this README in place to ensure /opt/so/saltstack/local/salt/hypervisor/hosts directory gets created during setup.
|
||||
49
salt/hypervisor/init.sls
Normal file
49
salt/hypervisor/init.sls
Normal file
@@ -0,0 +1,49 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
hypervisor_log_dir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/hypervisor
|
||||
|
||||
hypervisor_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://hypervisor/tools/sbin
|
||||
- file_mode: 744
|
||||
|
||||
hypervisor_sbin_jinja:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://hypervisor/tools/sbin_jinja
|
||||
- template: jinja
|
||||
- file_mode: 744
|
||||
|
||||
{% else %}
|
||||
{{sls}}_no_license_detected:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_no_license_detected
|
||||
- comment:
|
||||
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||
for more information about purchasing a license to enable this feature."
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
164
salt/hypervisor/map.jinja
Normal file
164
salt/hypervisor/map.jinja
Normal file
@@ -0,0 +1,164 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0.
|
||||
|
||||
Note: Per the Elastic License 2.0, the second limitation states:
|
||||
|
||||
"You may not move, change, disable, or circumvent the license key functionality
|
||||
in the software, and you may not remove or obscure any functionality in the
|
||||
software that is protected by the license key." #}
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
{# Import defaults.yaml for model hardware capabilities #}
|
||||
{% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %}
|
||||
|
||||
{# Get hypervisor nodes from pillar #}
|
||||
{% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %}
|
||||
|
||||
{# Build enhanced HYPERVISORS structure #}
|
||||
{% set HYPERVISORS = {} %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: NODES content: ' ~ NODES | tojson) %}
|
||||
{% for role, hypervisors in NODES.items() %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing role: ' ~ role) %}
|
||||
{% do HYPERVISORS.update({role: {}}) %}
|
||||
{% for hypervisor, config in hypervisors.items() %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing hypervisor: ' ~ hypervisor ~ ' with config: ' ~ config | tojson) %}
|
||||
{# Get model from cached grains using Salt runner #}
|
||||
{% set grains = salt.saltutil.runner('cache.grains', tgt=hypervisor ~ '_*', tgt_type='glob') %}
|
||||
{% set model = '' %}
|
||||
{% if grains %}
|
||||
{% set minion_id = grains.keys() | first %}
|
||||
{% set model = grains[minion_id].get('sosmodel', '') %}
|
||||
{% endif %}
|
||||
{% set model_config = DEFAULTS.hypervisor.model.get(model, {}) %}
|
||||
|
||||
{# Get VM list from VMs file #}
|
||||
{% set vms = {} %}
|
||||
{% set vm_list = [] %}
|
||||
{% set vm_list_file = 'hypervisor/hosts/' ~ hypervisor ~ 'VMs' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list file: ' ~ vm_list_file) %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_list_file) %}
|
||||
{% import_json vm_list_file as vm_list %}
|
||||
{% endif %}
|
||||
{% if vm_list %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list content: ' ~ vm_list | tojson) %}
|
||||
{% else %}
|
||||
{# we won't get here if the vm_list_file doesn't exist because we will get TemplateNotFound on the import_json #}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list empty: ' ~ vm_list_file) %}
|
||||
{% endif %}
|
||||
|
||||
{# Load status and configuration for each VM #}
|
||||
{% for vm in vm_list %}
|
||||
{# Get VM details from list entry #}
|
||||
{% set hostname = vm.get('hostname', '') %}
|
||||
{% set role = vm.get('role', '') %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing VM - hostname: ' ~ hostname ~ ', role: ' ~ role) %}
|
||||
|
||||
{# Load VM configuration from config file #}
|
||||
{% set vm_file = 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ hostname ~ '_' ~ role %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config file: ' ~ vm_file) %}
|
||||
{% import_json vm_file as vm_state %}
|
||||
{% if vm_state %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config content: ' ~ vm_state | tojson) %}
|
||||
{% set vm_data = {'config': vm_state.config} %}
|
||||
|
||||
{# Load VM status from status file #}
|
||||
{% set status_file = vm_file ~ '.status' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM status file: ' ~ status_file) %}
|
||||
{% import_json status_file as status_data %}
|
||||
{% if status_data %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM status content: ' ~ status_data | tojson) %}
|
||||
{% do vm_data.update({'status': status_data}) %}
|
||||
{% else %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Status file empty: ' ~ status_file) %}
|
||||
{% do vm_data.update({
|
||||
'status': {
|
||||
'status': '',
|
||||
'details': null,
|
||||
'timestamp': ''
|
||||
}
|
||||
}) %}
|
||||
{% endif %}
|
||||
{% do vms.update({hostname ~ '_' ~ role: vm_data}) %}
|
||||
{% else %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Config file empty: ' ~ vm_file) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Find and add destroyed VMs from status files #}
|
||||
{% set processed_vms = [] %}
|
||||
{% for vm_full_name, vm_data in vms.items() %}
|
||||
{% do processed_vms.append(vm_full_name) %}
|
||||
{% endfor %}
|
||||
|
||||
{# Find all status files for this hypervisor #}
|
||||
{% set relative_path = 'hypervisor/hosts/' ~ hypervisor %}
|
||||
{% set absolute_path = '/opt/so/saltstack/local/salt/' ~ relative_path %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Scanning for status files in: ' ~ absolute_path) %}
|
||||
|
||||
{# Try to find status files using file.find with absolute path #}
|
||||
{% set status_files = salt['file.find'](absolute_path, name='*_*.status', type='f') %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Found status files: ' ~ status_files | tojson) %}
|
||||
|
||||
{# Convert absolute paths back to relative paths for processing #}
|
||||
{% set relative_status_files = [] %}
|
||||
{% for status_file in status_files %}
|
||||
{% set relative_file = status_file | replace('/opt/so/saltstack/local/salt/', '') %}
|
||||
{% do relative_status_files.append(relative_file) %}
|
||||
{% endfor %}
|
||||
{% set status_files = relative_status_files %}
|
||||
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Converted to relative paths: ' ~ status_files | tojson) %}
|
||||
|
||||
{% for status_file in status_files %}
|
||||
{# Extract the VM name from the filename #}
|
||||
{% set basename = status_file.split('/')[-1] %}
|
||||
{% set vm_name = basename.replace('.status', '') %}
|
||||
{% set hostname = vm_name.split('_')[0] %}
|
||||
|
||||
{# Skip already processed VMs #}
|
||||
{% if vm_name in processed_vms %}
|
||||
{% continue %}
|
||||
{% endif %}
|
||||
|
||||
{# Read the status file #}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing potential destroyed VM status file: ' ~ status_file) %}
|
||||
{% import_json status_file as status_data %}
|
||||
|
||||
{# Only process files with "Destroyed Instance" status #}
|
||||
{% if status_data and status_data.status == 'Destroyed Instance' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Found VM with Destroyed Instance status: ' ~ vm_name) %}
|
||||
|
||||
{# Add to vms with minimal config #}
|
||||
{% do vms.update({
|
||||
vm_name: {
|
||||
'status': status_data,
|
||||
'config': {}
|
||||
}
|
||||
}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Merge node config with model capabilities and VM states #}
|
||||
{% do HYPERVISORS[role].update({
|
||||
hypervisor: {
|
||||
'config': config,
|
||||
'model': model,
|
||||
'hardware': model_config.get('hardware', {}),
|
||||
'vms': vms
|
||||
}
|
||||
}) %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{% do salt.log.error(
|
||||
'Hypervisor nodes are a feature supported only for customers with a valid license.'
|
||||
'Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com'
|
||||
'for more information about purchasing a license to enable this feature.'
|
||||
) %}
|
||||
|
||||
{% endif %}
|
||||
335
salt/hypervisor/tools/sbin/so-nvme-raid1.sh
Normal file
335
salt/hypervisor/tools/sbin/so-nvme-raid1.sh
Normal file
@@ -0,0 +1,335 @@
|
||||
#!/bin/bash
|
||||
|
||||
#################################################################
|
||||
# RAID-1 Setup Script for NVMe Drives
|
||||
#################################################################
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# This script automatically sets up a RAID-1 (mirrored) array using two NVMe drives
|
||||
# (/dev/nvme0n1 and /dev/nvme1n1) and mounts it at /nsm with XFS filesystem.
|
||||
#
|
||||
# FUNCTIONALITY:
|
||||
# - Detects and reports existing RAID configurations
|
||||
# - Thoroughly cleans target drives of any existing data/configurations
|
||||
# - Creates GPT partition tables with RAID-type partitions
|
||||
# - Establishes RAID-1 array (${RAID_DEVICE}) for data redundancy
|
||||
# - Formats the array with XFS filesystem for performance
|
||||
# - Automatically mounts at /nsm and configures for boot persistence
|
||||
# - Provides monitoring information for resync operations
|
||||
#
|
||||
# SAFETY FEATURES:
|
||||
# - Requires root privileges
|
||||
# - Exits gracefully if RAID already exists and is mounted
|
||||
# - Performs comprehensive cleanup to avoid conflicts
|
||||
# - Forces partition table updates and waits for system recognition
|
||||
#
|
||||
# PREREQUISITES:
|
||||
# - Two NVMe drives: /dev/nvme0n1 and /dev/nvme1n1
|
||||
# - Root access
|
||||
# - mdadm, sgdisk, and standard Linux utilities
|
||||
#
|
||||
# WARNING: This script will DESTROY all data on the target drives!
|
||||
#
|
||||
# USAGE: sudo ./so-nvme-raid1.sh
|
||||
#
|
||||
#################################################################
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
# Configuration variables
|
||||
RAID_ARRAY_NAME="md0"
|
||||
RAID_DEVICE="/dev/${RAID_ARRAY_NAME}"
|
||||
MOUNT_POINT="/nsm"
|
||||
|
||||
# Function to log messages
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||
}
|
||||
|
||||
# Function to check if running as root
|
||||
check_root() {
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
log "Error: Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to find MD arrays using specific devices
|
||||
find_md_arrays_using_devices() {
|
||||
local target_devices=("$@")
|
||||
local found_arrays=()
|
||||
|
||||
# Parse /proc/mdstat to find arrays using our target devices
|
||||
if [ -f "/proc/mdstat" ]; then
|
||||
while IFS= read -r line; do
|
||||
if [[ $line =~ ^(md[0-9]+) ]]; then
|
||||
local array_name="${BASH_REMATCH[1]}"
|
||||
local array_path="/dev/$array_name"
|
||||
|
||||
# Check if this array uses any of our target devices
|
||||
for device in "${target_devices[@]}"; do
|
||||
if echo "$line" | grep -q "${device##*/}"; then
|
||||
found_arrays+=("$array_path")
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done < /proc/mdstat
|
||||
fi
|
||||
|
||||
printf '%s\n' "${found_arrays[@]}"
|
||||
}
|
||||
|
||||
# Function to check if RAID is already set up
|
||||
check_existing_raid() {
|
||||
local target_devices=("/dev/nvme0n1p1" "/dev/nvme1n1p1")
|
||||
local found_arrays=($(find_md_arrays_using_devices "${target_devices[@]}"))
|
||||
|
||||
# Check if we found any arrays using our target devices
|
||||
if [ ${#found_arrays[@]} -gt 0 ]; then
|
||||
for array_path in "${found_arrays[@]}"; do
|
||||
if mdadm --detail "$array_path" &>/dev/null; then
|
||||
local raid_state=$(mdadm --detail "$array_path" | grep "State" | awk '{print $3}')
|
||||
local mount_point="/nsm"
|
||||
|
||||
log "Found existing RAID array $array_path (State: $raid_state)"
|
||||
|
||||
# Check what's currently mounted at /nsm
|
||||
local current_mount=$(findmnt -n -o SOURCE "$mount_point" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$current_mount" ]; then
|
||||
if [ "$current_mount" = "$array_path" ]; then
|
||||
log "RAID array $array_path is already correctly mounted at $mount_point"
|
||||
log "Current RAID details:"
|
||||
mdadm --detail "$array_path"
|
||||
|
||||
# Check if resyncing
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing:"
|
||||
grep resync /proc/mdstat
|
||||
log "You can monitor progress with: watch -n 60 cat /proc/mdstat"
|
||||
else
|
||||
log "RAID is fully synced and operational"
|
||||
fi
|
||||
|
||||
# Show disk usage
|
||||
log "Current disk usage:"
|
||||
df -h "$mount_point"
|
||||
|
||||
exit 0
|
||||
else
|
||||
log "Found $mount_point mounted on $current_mount, but RAID array $array_path exists"
|
||||
log "Will unmount current filesystem and remount on RAID array"
|
||||
|
||||
# Unmount current filesystem
|
||||
log "Unmounting $mount_point"
|
||||
umount "$mount_point"
|
||||
|
||||
# Remove old fstab entry
|
||||
log "Removing old fstab entry for $current_mount"
|
||||
sed -i "\|$current_mount|d" /etc/fstab
|
||||
|
||||
# Mount the RAID array
|
||||
log "Mounting RAID array $array_path at $mount_point"
|
||||
mount "$array_path" "$mount_point"
|
||||
|
||||
# Update fstab
|
||||
log "Updating fstab for RAID array"
|
||||
sed -i "\|${array_path}|d" /etc/fstab
|
||||
echo "${array_path} ${mount_point} xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
|
||||
log "RAID array is now mounted at $mount_point"
|
||||
log "Current RAID details:"
|
||||
mdadm --detail "$array_path"
|
||||
|
||||
# Check if resyncing
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing:"
|
||||
grep resync /proc/mdstat
|
||||
log "You can monitor progress with: watch -n 60 cat /proc/mdstat"
|
||||
else
|
||||
log "RAID is fully synced and operational"
|
||||
fi
|
||||
|
||||
# Show disk usage
|
||||
log "Current disk usage:"
|
||||
df -h "$mount_point"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
# /nsm not mounted, mount the RAID array
|
||||
log "Mounting RAID array $array_path at $mount_point"
|
||||
mount "$array_path" "$mount_point"
|
||||
|
||||
# Update fstab
|
||||
log "Updating fstab for RAID array"
|
||||
sed -i "\|${array_path}|d" /etc/fstab
|
||||
echo "${array_path} ${mount_point} xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
|
||||
log "RAID array is now mounted at $mount_point"
|
||||
log "Current RAID details:"
|
||||
mdadm --detail "$array_path"
|
||||
|
||||
# Show disk usage
|
||||
log "Current disk usage:"
|
||||
df -h "$mount_point"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Check if any of the target devices are in use
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
if mdadm --examine "$device" &>/dev/null || mdadm --examine "${device}p1" &>/dev/null; then
|
||||
# Find the actual array name for this device
|
||||
local device_arrays=($(find_md_arrays_using_devices "${device}p1"))
|
||||
local array_name=""
|
||||
|
||||
if [ ${#device_arrays[@]} -gt 0 ]; then
|
||||
array_name="${device_arrays[0]}"
|
||||
else
|
||||
# Fallback: try to find array name from /proc/mdstat
|
||||
local partition_name="${device##*/}p1"
|
||||
array_name=$(grep -l "$partition_name" /proc/mdstat 2>/dev/null | head -1)
|
||||
if [ -n "$array_name" ]; then
|
||||
array_name=$(grep "^md[0-9]" /proc/mdstat | grep "$partition_name" | awk '{print "/dev/" $1}' | head -1)
|
||||
fi
|
||||
# Final fallback
|
||||
if [ -z "$array_name" ]; then
|
||||
array_name="$RAID_DEVICE"
|
||||
fi
|
||||
fi
|
||||
|
||||
log "Error: $device appears to be part of an existing RAID array"
|
||||
log "To reuse this device, you must first:"
|
||||
log "1. Unmount any filesystems"
|
||||
log "2. Stop the RAID array: mdadm --stop $array_name"
|
||||
log "3. Zero the superblock: mdadm --zero-superblock ${device}p1"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Function to ensure devices are not in use
|
||||
ensure_devices_free() {
|
||||
local device=$1
|
||||
|
||||
log "Cleaning up device $device"
|
||||
|
||||
# Kill any processes using the device
|
||||
fuser -k "${device}"* 2>/dev/null || true
|
||||
|
||||
# Force unmount any partitions
|
||||
for part in "${device}"*; do
|
||||
if mount | grep -q "$part"; then
|
||||
umount -f "$part" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Stop any MD arrays using this device
|
||||
for md in $(ls /dev/md* 2>/dev/null || true); do
|
||||
if mdadm --detail "$md" 2>/dev/null | grep -q "$device"; then
|
||||
mdadm --stop "$md" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Clear MD superblock
|
||||
mdadm --zero-superblock "${device}"* 2>/dev/null || true
|
||||
|
||||
# Remove LVM PV if exists
|
||||
pvremove -ff -y "$device" 2>/dev/null || true
|
||||
|
||||
# Clear all signatures
|
||||
wipefs -af "$device" 2>/dev/null || true
|
||||
|
||||
# Delete partition table
|
||||
dd if=/dev/zero of="$device" bs=512 count=2048 2>/dev/null || true
|
||||
dd if=/dev/zero of="$device" bs=512 seek=$(( $(blockdev --getsz "$device") - 2048 )) count=2048 2>/dev/null || true
|
||||
|
||||
# Force kernel to reread
|
||||
blockdev --rereadpt "$device" 2>/dev/null || true
|
||||
partprobe -s "$device" 2>/dev/null || true
|
||||
sleep 2
|
||||
}
|
||||
|
||||
# Main script
|
||||
main() {
|
||||
log "Starting RAID setup script"
|
||||
|
||||
# Check if running as root
|
||||
check_root
|
||||
|
||||
# Check for existing RAID setup
|
||||
check_existing_raid
|
||||
|
||||
# Clean up any existing MD arrays
|
||||
log "Cleaning up existing MD arrays"
|
||||
mdadm --stop --scan 2>/dev/null || true
|
||||
|
||||
# Clear mdadm configuration
|
||||
log "Clearing mdadm configuration"
|
||||
echo "DEVICE partitions" > /etc/mdadm.conf
|
||||
|
||||
# Clean and prepare devices
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
ensure_devices_free "$device"
|
||||
|
||||
log "Creating new partition table on $device"
|
||||
sgdisk -Z "$device"
|
||||
sgdisk -o "$device"
|
||||
|
||||
log "Creating RAID partition"
|
||||
sgdisk -n 1:0:0 -t 1:fd00 "$device"
|
||||
|
||||
partprobe "$device"
|
||||
udevadm settle
|
||||
sleep 5
|
||||
done
|
||||
|
||||
log "Final verification of partition availability"
|
||||
if ! [ -b "/dev/nvme0n1p1" ] || ! [ -b "/dev/nvme1n1p1" ]; then
|
||||
log "Error: Partitions not available after creation"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Creating RAID array"
|
||||
mdadm --create "$RAID_DEVICE" --level=1 --raid-devices=2 \
|
||||
--metadata=1.2 \
|
||||
/dev/nvme0n1p1 /dev/nvme1n1p1 \
|
||||
--force --run
|
||||
|
||||
log "Creating XFS filesystem"
|
||||
mkfs.xfs -f "$RAID_DEVICE"
|
||||
|
||||
log "Creating mount point"
|
||||
mkdir -p /nsm
|
||||
|
||||
log "Updating fstab"
|
||||
sed -i "\|${RAID_DEVICE}|d" /etc/fstab
|
||||
echo "${RAID_DEVICE} ${MOUNT_POINT} xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
|
||||
log "Reloading systemd daemon"
|
||||
systemctl daemon-reload
|
||||
|
||||
log "Mounting filesystem"
|
||||
mount -a
|
||||
|
||||
log "Saving RAID configuration"
|
||||
mdadm --detail --scan > /etc/mdadm.conf
|
||||
|
||||
log "RAID setup complete"
|
||||
log "RAID array details:"
|
||||
mdadm --detail "$RAID_DEVICE"
|
||||
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing. You can monitor progress with:"
|
||||
log "watch -n 60 cat /proc/mdstat"
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
424
salt/hypervisor/tools/sbin/so-qcow2-network-predictable
Normal file
424
salt/hypervisor/tools/sbin/so-qcow2-network-predictable
Normal file
@@ -0,0 +1,424 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
"""
|
||||
Script for configuring network interface predictability in Security Onion VMs.
|
||||
This script modifies the necessary files to ensure consistent network interface naming.
|
||||
|
||||
The script performs the following operations:
|
||||
1. Modifies the BLS entry to set net.ifnames=1
|
||||
2. Removes any existing persistent network rules
|
||||
3. Updates GRUB configuration
|
||||
|
||||
**Usage:**
|
||||
so-qcow2-network-predictable -n <domain_name> [-I <qcow2_image_path>]
|
||||
|
||||
**Options:**
|
||||
-n, --name Domain name of the VM to configure
|
||||
-I, --image (Optional) Path to the QCOW2 image. If not provided,
|
||||
defaults to /nsm/libvirt/images/<domain_name>/<domain_name>.qcow2
|
||||
|
||||
**Examples:**
|
||||
|
||||
1. **Configure using domain name:**
|
||||
```bash
|
||||
so-qcow2-network-predictable -n sool9
|
||||
```
|
||||
This command will:
|
||||
- Use default image path: /nsm/libvirt/images/sool9/sool9.qcow2
|
||||
- Configure network interface predictability
|
||||
|
||||
2. **Configure using custom image path:**
|
||||
```bash
|
||||
so-qcow2-network-predictable -n sool9 -I /path/to/custom/image.qcow2
|
||||
```
|
||||
This command will:
|
||||
- Use the specified image path
|
||||
- Configure network interface predictability
|
||||
|
||||
**Notes:**
|
||||
- The VM must not be running when executing this script
|
||||
- Requires root privileges
|
||||
- Will automatically find and modify the appropriate BLS entry
|
||||
- Removes /etc/udev/rules.d/70-persistent-net.rules if it exists
|
||||
- Updates GRUB configuration after changes
|
||||
|
||||
**Exit Codes:**
|
||||
- 0: Success
|
||||
- 1: General error (invalid arguments, file operations, etc.)
|
||||
- 2: VM is running
|
||||
- 3: Required files not found
|
||||
- 4: Permission denied
|
||||
|
||||
**Logging:**
|
||||
- Logs are written to /opt/so/log/hypervisor/so-qcow2-network-predictable.log
|
||||
- Both file and console logging are enabled
|
||||
- Log entries include:
|
||||
- Timestamps
|
||||
- Operation details
|
||||
- Error messages
|
||||
- Configuration changes
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import guestfs
|
||||
import glob
|
||||
import libvirt
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from so_logging_utils import setup_logging
|
||||
|
||||
# Set up logging
|
||||
logger = setup_logging(
|
||||
logger_name='so-qcow2-network-predictable',
|
||||
log_file_path='/opt/so/log/hypervisor/so-qcow2-network-predictable.log',
|
||||
log_level=logging.INFO,
|
||||
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
def check_domain_status(domain_name):
|
||||
"""
|
||||
Check if the specified domain exists and is not running.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the libvirt domain to check
|
||||
|
||||
Returns:
|
||||
bool: True if domain exists and is not running, False otherwise
|
||||
|
||||
Raises:
|
||||
RuntimeError: If domain is running or connection to libvirt fails
|
||||
"""
|
||||
try:
|
||||
conn = libvirt.open('qemu:///system')
|
||||
try:
|
||||
dom = conn.lookupByName(domain_name)
|
||||
is_running = dom.isActive()
|
||||
if is_running:
|
||||
logger.error(f"Domain '{domain_name}' is running - cannot modify configuration")
|
||||
raise RuntimeError(f"Domain '{domain_name}' must not be running")
|
||||
logger.info(f"Domain '{domain_name}' exists and is not running")
|
||||
return True
|
||||
except libvirt.libvirtError as e:
|
||||
if "no domain with matching name" in str(e):
|
||||
logger.error(f"Domain '{domain_name}' not found")
|
||||
raise RuntimeError(f"Domain '{domain_name}' not found")
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"Failed to connect to libvirt: {e}")
|
||||
raise RuntimeError(f"Failed to connect to libvirt: {e}")
|
||||
|
||||
def modify_bls_entry(g):
|
||||
"""
|
||||
Find and modify the BLS entry to set net.ifnames=1.
|
||||
|
||||
Args:
|
||||
g: Mounted guestfs handle
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False if no changes needed
|
||||
|
||||
Raises:
|
||||
RuntimeError: If BLS entry cannot be found or modified
|
||||
"""
|
||||
bls_dir = "/boot/loader/entries"
|
||||
logger.info(f"Checking BLS directory: {bls_dir}")
|
||||
if g.is_dir(bls_dir):
|
||||
logger.info("BLS directory exists")
|
||||
else:
|
||||
logger.info("Listing /boot contents:")
|
||||
try:
|
||||
boot_contents = g.ls("/boot")
|
||||
logger.info(f"/boot contains: {boot_contents}")
|
||||
if g.is_dir("/boot/loader"):
|
||||
logger.info("Listing /boot/loader contents:")
|
||||
loader_contents = g.ls("/boot/loader")
|
||||
logger.info(f"/boot/loader contains: {loader_contents}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing /boot contents: {e}")
|
||||
raise RuntimeError(f"BLS directory not found: {bls_dir}")
|
||||
|
||||
# Find BLS entry file
|
||||
entries = g.glob_expand(f"{bls_dir}/*.conf")
|
||||
logger.info(f"Found BLS entries: {entries}")
|
||||
if not entries:
|
||||
logger.error("No BLS entry files found")
|
||||
raise RuntimeError("No BLS entry files found")
|
||||
|
||||
# Use the first entry found
|
||||
bls_file = entries[0]
|
||||
logger.info(f"Found BLS entry file: {bls_file}")
|
||||
|
||||
try:
|
||||
logger.info(f"Reading BLS file contents from: {bls_file}")
|
||||
content = g.read_file(bls_file).decode('utf-8')
|
||||
logger.info("Current BLS file content:")
|
||||
logger.info("---BEGIN BLS CONTENT---")
|
||||
logger.info(content)
|
||||
logger.info("---END BLS CONTENT---")
|
||||
|
||||
lines = content.splitlines()
|
||||
modified = False
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith('options '):
|
||||
logger.info(f"Found options line: {line}")
|
||||
|
||||
# First remove any existing net.ifnames parameters (both =0 and =1)
|
||||
new_line = re.sub(r'\s*net\.ifnames=[01]\s*', ' ', line)
|
||||
# Also remove any quoted versions
|
||||
new_line = re.sub(r'\s*"net\.ifnames=[01]"\s*', ' ', new_line)
|
||||
# Clean up multiple spaces
|
||||
new_line = re.sub(r'\s+', ' ', new_line).strip()
|
||||
|
||||
# Now add net.ifnames=1 at the end
|
||||
new_line = f"{new_line} net.ifnames=1"
|
||||
|
||||
if new_line != line:
|
||||
lines[i] = new_line
|
||||
modified = True
|
||||
logger.info(f"Updated options line. New line: {new_line}")
|
||||
break
|
||||
|
||||
if modified:
|
||||
new_content = '\n'.join(lines) + '\n'
|
||||
logger.info("New BLS file content:")
|
||||
logger.info("---BEGIN NEW BLS CONTENT---")
|
||||
logger.info(new_content)
|
||||
logger.info("---END NEW BLS CONTENT---")
|
||||
g.write(bls_file, new_content.encode('utf-8'))
|
||||
logger.info("Successfully updated BLS entry")
|
||||
return True
|
||||
|
||||
logger.info("No changes needed for BLS entry")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to modify BLS entry: {e}")
|
||||
raise RuntimeError(f"Failed to modify BLS entry: {e}")
|
||||
|
||||
def remove_persistent_net_rules(g):
|
||||
"""
|
||||
Remove the persistent network rules file if it exists.
|
||||
|
||||
Args:
|
||||
g: Mounted guestfs handle
|
||||
|
||||
Returns:
|
||||
bool: True if file was removed, False if it didn't exist
|
||||
"""
|
||||
rules_file = "/etc/udev/rules.d/70-persistent-net.rules"
|
||||
logger.info(f"Checking for persistent network rules file: {rules_file}")
|
||||
try:
|
||||
if g.is_file(rules_file):
|
||||
logger.info("Found persistent network rules file, removing...")
|
||||
g.rm(rules_file)
|
||||
logger.info(f"Successfully removed persistent network rules file: {rules_file}")
|
||||
return True
|
||||
logger.info("No persistent network rules file found")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to remove persistent network rules: {e}")
|
||||
raise RuntimeError(f"Failed to remove persistent network rules: {e}")
|
||||
|
||||
def update_grub_config(g):
|
||||
"""
|
||||
Update GRUB configuration.
|
||||
|
||||
Args:
|
||||
g: Mounted guestfs handle
|
||||
|
||||
Raises:
|
||||
RuntimeError: If GRUB update fails
|
||||
"""
|
||||
try:
|
||||
# First, read the current grubenv to get the existing kernelopts
|
||||
logger.info("Reading current grubenv...")
|
||||
grubenv_content = g.read_file('/boot/grub2/grubenv').decode('utf-8')
|
||||
logger.info("Current grubenv content:")
|
||||
logger.info(grubenv_content)
|
||||
|
||||
# Extract current kernelopts
|
||||
kernelopts_match = re.search(r'^kernelopts="([^"]+)"', grubenv_content, re.MULTILINE)
|
||||
if kernelopts_match:
|
||||
current_kernelopts = kernelopts_match.group(1)
|
||||
logger.info(f"Current kernelopts: {current_kernelopts}")
|
||||
|
||||
# Remove any existing net.ifnames parameters
|
||||
new_kernelopts = re.sub(r'\s*net\.ifnames=[01]\s*', ' ', current_kernelopts)
|
||||
# Clean up multiple spaces
|
||||
new_kernelopts = re.sub(r'\s+', ' ', new_kernelopts).strip()
|
||||
# Add net.ifnames=1
|
||||
new_kernelopts = f"{new_kernelopts} net.ifnames=1"
|
||||
|
||||
logger.info(f"New kernelopts: {new_kernelopts}")
|
||||
|
||||
# Update grubenv with the new kernelopts
|
||||
logger.info("Setting kernelopts with net.ifnames=1...")
|
||||
output_editenv = g.command(['grub2-editenv', '-', 'set', f'kernelopts={new_kernelopts}'])
|
||||
logger.info("grub2-editenv output:")
|
||||
logger.info(output_editenv)
|
||||
else:
|
||||
# If we can't find existing kernelopts, use the default
|
||||
logger.warning("Could not find existing kernelopts, using default")
|
||||
output_editenv = g.command(['grub2-editenv', '-', 'set', 'kernelopts=console=tty0 no_timer_check biosdevname=0 resume=/dev/mapper/vg_main-lv_swap rd.lvm.lv=vg_main/lv_root rd.lvm.lv=vg_main/lv_swap net.ifnames=1 crashkernel=1G-64G:448M,64G-:512M'])
|
||||
logger.info("grub2-editenv output:")
|
||||
logger.info(output_editenv)
|
||||
|
||||
logger.info("Updating grubby with net.ifnames=1...")
|
||||
# First remove any existing net.ifnames arguments
|
||||
output_grubby_remove = g.command(['grubby', '--update-kernel=ALL', '--remove-args=net.ifnames=0 net.ifnames=1'])
|
||||
logger.info("grubby remove output:")
|
||||
logger.info(output_grubby_remove)
|
||||
|
||||
# Then add net.ifnames=1
|
||||
output_grubby_add = g.command(['grubby', '--update-kernel=ALL', '--args=net.ifnames=1'])
|
||||
logger.info("grubby add output:")
|
||||
logger.info(output_grubby_add)
|
||||
|
||||
logger.info("Updating GRUB configuration...")
|
||||
output_mkconfig = g.command(['grub2-mkconfig', '-o', '/boot/grub2/grub.cfg'])
|
||||
logger.info("GRUB update output:")
|
||||
logger.info(output_mkconfig)
|
||||
logger.info("Successfully updated GRUB configuration")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update GRUB configuration: {e}")
|
||||
raise RuntimeError(f"Failed to update GRUB configuration: {e}")
|
||||
|
||||
def configure_network_predictability(domain_name, image_path=None):
|
||||
"""
|
||||
Configure network interface predictability for a VM.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to configure
|
||||
image_path (str, optional): Path to the QCOW2 image
|
||||
|
||||
Raises:
|
||||
RuntimeError: If configuration fails
|
||||
"""
|
||||
# Check domain status
|
||||
check_domain_status(domain_name)
|
||||
|
||||
# Use default image path if none provided
|
||||
if not image_path:
|
||||
image_path = f"/nsm/libvirt/images/{domain_name}/{domain_name}.qcow2"
|
||||
|
||||
if not os.path.exists(image_path):
|
||||
logger.error(f"Image file not found: {image_path}")
|
||||
raise RuntimeError(f"Image file not found: {image_path}")
|
||||
|
||||
if not os.access(image_path, os.R_OK | os.W_OK):
|
||||
logger.error(f"Permission denied: Cannot access image file {image_path}")
|
||||
raise RuntimeError(f"Permission denied: Cannot access image file {image_path}")
|
||||
|
||||
logger.info(f"Configuring network predictability for domain: {domain_name}")
|
||||
logger.info(f"Using image: {image_path}")
|
||||
|
||||
g = guestfs.GuestFS(python_return_dict=True)
|
||||
try:
|
||||
logger.info("Initializing guestfs...")
|
||||
g.set_network(False)
|
||||
g.selinux = False
|
||||
g.add_drive_opts(image_path, format="qcow2")
|
||||
g.launch()
|
||||
|
||||
logger.info("Inspecting operating system...")
|
||||
roots = g.inspect_os()
|
||||
if not roots:
|
||||
raise RuntimeError("No operating system found in image")
|
||||
|
||||
root = roots[0]
|
||||
logger.info(f"Found root filesystem: {root}")
|
||||
logger.info(f"Operating system type: {g.inspect_get_type(root)}")
|
||||
logger.info(f"Operating system distro: {g.inspect_get_distro(root)}")
|
||||
logger.info(f"Operating system major version: {g.inspect_get_major_version(root)}")
|
||||
logger.info(f"Operating system minor version: {g.inspect_get_minor_version(root)}")
|
||||
|
||||
logger.info("Getting mount points...")
|
||||
mountpoints = g.inspect_get_mountpoints(root)
|
||||
logger.info(f"Found mount points: {mountpoints}")
|
||||
logger.info("Converting mount points to sortable list...")
|
||||
# Convert dictionary to list of tuples
|
||||
mountpoints = list(mountpoints.items())
|
||||
logger.info(f"Converted mount points: {mountpoints}")
|
||||
logger.info("Sorting mount points by path length for proper mount order...")
|
||||
mountpoints.sort(key=lambda m: len(m[0]))
|
||||
logger.info(f"Mount order will be: {[mp[0] for mp in mountpoints]}")
|
||||
|
||||
for mp_path, mp_device in mountpoints:
|
||||
try:
|
||||
logger.info(f"Attempting to mount {mp_device} at {mp_path}")
|
||||
g.mount(mp_device, mp_path)
|
||||
logger.info(f"Successfully mounted {mp_device} at {mp_path}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not mount {mp_device} at {mp_path}: {str(e)}")
|
||||
# Continue with other mounts
|
||||
|
||||
# Perform configuration steps
|
||||
bls_modified = modify_bls_entry(g)
|
||||
rules_removed = remove_persistent_net_rules(g)
|
||||
|
||||
if bls_modified or rules_removed:
|
||||
update_grub_config(g)
|
||||
logger.info("Network predictability configuration completed successfully")
|
||||
else:
|
||||
logger.info("No changes were necessary")
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to configure network predictability: {e}")
|
||||
finally:
|
||||
try:
|
||||
logger.info("Unmounting all filesystems...")
|
||||
g.umount_all()
|
||||
logger.info("Successfully unmounted all filesystems")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error unmounting filesystems: {e}")
|
||||
g.close()
|
||||
|
||||
def parse_arguments():
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Configure network interface predictability for Security Onion VMs"
|
||||
)
|
||||
parser.add_argument("-n", "--name", required=True,
|
||||
help="Domain name of the VM to configure")
|
||||
parser.add_argument("-I", "--image",
|
||||
help="Path to the QCOW2 image (optional)")
|
||||
return parser.parse_args()
|
||||
|
||||
def main():
|
||||
"""Main entry point for the script."""
|
||||
try:
|
||||
args = parse_arguments()
|
||||
configure_network_predictability(args.name, args.image)
|
||||
sys.exit(0)
|
||||
except RuntimeError as e:
|
||||
if "must not be running" in str(e):
|
||||
logger.error(str(e))
|
||||
sys.exit(2)
|
||||
elif "not found" in str(e):
|
||||
logger.error(str(e))
|
||||
sys.exit(3)
|
||||
elif "Permission denied" in str(e):
|
||||
logger.error(str(e))
|
||||
sys.exit(4)
|
||||
else:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except KeyboardInterrupt:
|
||||
logger.error("Operation cancelled by user")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
206
salt/hypervisor/tools/sbin/so-wait-cloud-init
Normal file
206
salt/hypervisor/tools/sbin/so-wait-cloud-init
Normal file
@@ -0,0 +1,206 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
"""
|
||||
Script for waiting for cloud-init to complete on a Security Onion VM.
|
||||
Monitors VM state to ensure proper cloud-init initialization and shutdown.
|
||||
|
||||
**Usage:**
|
||||
so-wait-cloud-init -n <domain_name>
|
||||
|
||||
**Options:**
|
||||
-n, --name Domain name of the VM to monitor
|
||||
|
||||
**Exit Codes:**
|
||||
- 0: Success (cloud-init completed and VM shutdown)
|
||||
- 1: General error
|
||||
- 2: VM never started
|
||||
- 3: VM stopped too quickly
|
||||
- 4: VM failed to shutdown
|
||||
|
||||
**Description:**
|
||||
This script monitors a VM's state to ensure proper cloud-init initialization and completion:
|
||||
1. Waits for VM to start running
|
||||
2. Verifies VM remains running (not an immediate crash)
|
||||
3. Waits for VM to shutdown (indicating cloud-init completion)
|
||||
4. Verifies VM remains shutdown
|
||||
|
||||
The script is typically used in the libvirt.images state after creating a new VM
|
||||
to ensure cloud-init completes its initialization before proceeding with further
|
||||
configuration.
|
||||
|
||||
**Logging:**
|
||||
- Logs are written to /opt/so/log/hypervisor/so-wait-cloud-init.log
|
||||
- Both file and console logging are enabled
|
||||
- Log entries include:
|
||||
- Timestamps
|
||||
- State changes
|
||||
- Error conditions
|
||||
- Verification steps
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from so_logging_utils import setup_logging
|
||||
|
||||
# Set up logging
|
||||
logger = setup_logging(
|
||||
logger_name='so-wait-cloud-init',
|
||||
log_file_path='/opt/so/log/hypervisor/so-wait-cloud-init.log',
|
||||
log_level=logging.INFO,
|
||||
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
def check_vm_running(domain_name):
|
||||
"""
|
||||
Check if VM is in running state.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to check
|
||||
|
||||
Returns:
|
||||
bool: True if VM is running, False otherwise
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(['virsh', 'list', '--state-running', '--name'],
|
||||
capture_output=True, text=True, check=True)
|
||||
return domain_name in result.stdout.splitlines()
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to check VM state: {e}")
|
||||
return False
|
||||
|
||||
def wait_for_vm_start(domain_name, timeout=300):
|
||||
"""
|
||||
Wait for VM to start running.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to monitor
|
||||
timeout (int): Maximum time to wait in seconds
|
||||
|
||||
Returns:
|
||||
bool: True if VM started, False if timeout occurred
|
||||
"""
|
||||
logger.info(f"Waiting for VM {domain_name} to start...")
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
if check_vm_running(domain_name):
|
||||
logger.info("VM is running")
|
||||
return True
|
||||
time.sleep(1)
|
||||
|
||||
logger.error(f"Timeout waiting for VM {domain_name} to start")
|
||||
return False
|
||||
|
||||
def verify_vm_running(domain_name):
|
||||
"""
|
||||
Verify VM remains running after initial start.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to verify
|
||||
|
||||
Returns:
|
||||
bool: True if VM is still running after verification period
|
||||
"""
|
||||
logger.info("Verifying VM remains running...")
|
||||
time.sleep(5) # Wait to ensure VM is stable
|
||||
|
||||
if not check_vm_running(domain_name):
|
||||
logger.error("VM stopped too quickly after starting")
|
||||
return False
|
||||
|
||||
logger.info("VM verified running")
|
||||
return True
|
||||
|
||||
def wait_for_vm_shutdown(domain_name, timeout=600):
|
||||
"""
|
||||
Wait for VM to shutdown.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to monitor
|
||||
timeout (int): Maximum time to wait in seconds
|
||||
|
||||
Returns:
|
||||
bool: True if VM shutdown, False if timeout occurred
|
||||
"""
|
||||
logger.info("Waiting for cloud-init to complete and VM to shutdown...")
|
||||
start_time = time.time()
|
||||
check_count = 0
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
if not check_vm_running(domain_name):
|
||||
logger.info("VM has shutdown")
|
||||
return True
|
||||
|
||||
# Log status every minute (after 12 checks at 5 second intervals)
|
||||
check_count += 1
|
||||
if check_count % 12 == 0:
|
||||
elapsed = int(time.time() - start_time)
|
||||
logger.info(f"Still waiting for cloud-init... ({elapsed} seconds elapsed)")
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
logger.error(f"Timeout waiting for VM {domain_name} to shutdown")
|
||||
return False
|
||||
|
||||
def verify_vm_shutdown(domain_name):
|
||||
"""
|
||||
Verify VM remains shutdown.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to verify
|
||||
|
||||
Returns:
|
||||
bool: True if VM remains shutdown after verification period
|
||||
"""
|
||||
logger.info("Verifying VM remains shutdown...")
|
||||
time.sleep(5) # Wait to ensure VM state is stable
|
||||
|
||||
if check_vm_running(domain_name):
|
||||
logger.error("VM is still running after shutdown check")
|
||||
return False
|
||||
|
||||
logger.info("VM verified shutdown")
|
||||
return True
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Wait for cloud-init to complete on a Security Onion VM"
|
||||
)
|
||||
parser.add_argument("-n", "--name", required=True,
|
||||
help="Domain name of the VM to monitor")
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
# Wait for VM to start
|
||||
if not wait_for_vm_start(args.name):
|
||||
sys.exit(2) # VM never started
|
||||
|
||||
# Verify VM remains running
|
||||
if not verify_vm_running(args.name):
|
||||
sys.exit(3) # VM stopped too quickly
|
||||
|
||||
# Wait for VM to shutdown
|
||||
if not wait_for_vm_shutdown(args.name):
|
||||
sys.exit(4) # VM failed to shutdown
|
||||
|
||||
# Verify VM remains shutdown
|
||||
if not verify_vm_shutdown(args.name):
|
||||
sys.exit(4) # VM failed to stay shutdown
|
||||
|
||||
logger.info("Cloud-init completed successfully")
|
||||
sys.exit(0)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
60
salt/hypervisor/tools/sbin/so_vm_utils.py
Normal file
60
salt/hypervisor/tools/sbin/so_vm_utils.py
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
import sys
|
||||
import time
|
||||
import libvirt
|
||||
import logging
|
||||
|
||||
def stop_vm(conn, vm_name, logger):
|
||||
"""
|
||||
Stops the specified virtual machine if it is running.
|
||||
|
||||
Parameters:
|
||||
conn (libvirt.virConnect): The libvirt connection object.
|
||||
vm_name (str): The name of the virtual machine.
|
||||
logger (logging.Logger): The logger object.
|
||||
|
||||
Returns:
|
||||
libvirt.virDomain: The domain object of the VM.
|
||||
|
||||
Raises:
|
||||
SystemExit: If the VM cannot be found or an error occurs.
|
||||
"""
|
||||
try:
|
||||
dom = conn.lookupByName(vm_name)
|
||||
if dom.isActive():
|
||||
logger.info(f"Shutting down VM '{vm_name}'...")
|
||||
dom.shutdown()
|
||||
# Wait for the VM to shut down
|
||||
while dom.isActive():
|
||||
time.sleep(1)
|
||||
logger.info(f"VM '{vm_name}' has been stopped.")
|
||||
else:
|
||||
logger.info(f"VM '{vm_name}' is already stopped.")
|
||||
return dom
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"Failed to stop VM '{vm_name}': {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def start_vm(dom, logger):
|
||||
"""
|
||||
Starts the specified virtual machine.
|
||||
|
||||
Parameters:
|
||||
dom (libvirt.virDomain): The domain object of the VM.
|
||||
logger (logging.Logger): The logger object.
|
||||
|
||||
Raises:
|
||||
SystemExit: If the VM cannot be started.
|
||||
"""
|
||||
try:
|
||||
dom.create()
|
||||
logger.info(f"VM '{dom.name()}' started successfully.")
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"Failed to start VM '{dom.name()}': {e}")
|
||||
sys.exit(1)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user