mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Compare commits
873 Commits
2.4.160-20
...
c372cd533d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c372cd533d | ||
|
|
999f83ce57 | ||
|
|
6fbed2dd9f | ||
|
|
875de88cb4 | ||
|
|
63bb44886e | ||
|
|
bda83a47a2 | ||
|
|
e96cfd35f7 | ||
|
|
65c96b2edf | ||
|
|
87477ae4f6 | ||
|
|
89a9106d79 | ||
|
|
1284150382 | ||
|
|
edf3c9464f | ||
|
|
4bb0a7c9d9 | ||
|
|
ced3af818c | ||
|
|
9c06713f32 | ||
|
|
23da0d4ba0 | ||
|
|
d5f2cfb354 | ||
|
|
fb5ad4193d | ||
|
|
1f5f283c06 | ||
|
|
cf048030c4 | ||
|
|
2d716b44a8 | ||
|
|
d70d652310 | ||
|
|
c5db7c8752 | ||
|
|
6f42ff3442 | ||
|
|
433dab7376 | ||
|
|
97c1a46013 | ||
|
|
fbe97221bb | ||
|
|
841ce6b6ec | ||
|
|
dd0b4c3820 | ||
|
|
b407c68d88 | ||
|
|
5b6a7035af | ||
|
|
12d490ad4a | ||
|
|
76cbd18d2c | ||
|
|
148ef7ef21 | ||
|
|
1b55642c86 | ||
|
|
af7f7d0728 | ||
|
|
a7337c95e1 | ||
|
|
3f7c3326ea | ||
|
|
bf41de8c14 | ||
|
|
136a829509 | ||
|
|
bcec999be4 | ||
|
|
7c73b4713f | ||
|
|
45b4b1d963 | ||
|
|
fcfd74ec1e | ||
|
|
68b0cd7549 | ||
|
|
715d801ce8 | ||
|
|
4a810696e7 | ||
|
|
6b525a2c21 | ||
|
|
a5d8385f07 | ||
|
|
211bf7e77b | ||
|
|
1542b74133 | ||
|
|
431e5abf89 | ||
|
|
4314c79f85 | ||
|
|
da9717bc79 | ||
|
|
f047677d8a | ||
|
|
045cf7866c | ||
|
|
431e0b0780 | ||
|
|
e782266caa | ||
|
|
a4666b2c08 | ||
|
|
dcc3206e51 | ||
|
|
8358b6ea6f | ||
|
|
d1a66a91c6 | ||
|
|
7fdcb92614 | ||
|
|
cec1890b6b | ||
|
|
b2606b6094 | ||
|
|
b1b66045ea | ||
|
|
33b22bf2e4 | ||
|
|
3a38886345 | ||
|
|
7be70faab6 | ||
|
|
2729fdbea6 | ||
|
|
bfd08d1d2e | ||
|
|
37b3fd9b7b | ||
|
|
573dded921 | ||
|
|
fed75c7b39 | ||
|
|
3427df2a54 | ||
|
|
be11c718f6 | ||
|
|
235dfd78f1 | ||
|
|
7c8b9b4374 | ||
|
|
81d7c313af | ||
|
|
9a6ff75793 | ||
|
|
1f24796eba | ||
|
|
7762faf075 | ||
|
|
80fbb31372 | ||
|
|
7c45db2295 | ||
|
|
0545e1d33b | ||
|
|
55bbbdb58d | ||
|
|
3a8a6bf5ff | ||
|
|
13789bc56f | ||
|
|
11518f6eea | ||
|
|
08147e27b0 | ||
|
|
c9153617be | ||
|
|
245ceb2d49 | ||
|
|
4c65975907 | ||
|
|
dfef7036ce | ||
|
|
44594ba726 | ||
|
|
1876c4d9df | ||
|
|
a2ff66b5d0 | ||
|
|
e3972dc5af | ||
|
|
18c0f197b2 | ||
|
|
5b371c220c | ||
|
|
78c193f0a2 | ||
|
|
274295bc97 | ||
|
|
6c7ef622c1 | ||
|
|
da1cac0d53 | ||
|
|
a84df14137 | ||
|
|
4a49f9d004 | ||
|
|
1eb4b5379a | ||
|
|
35c7fc06d7 | ||
|
|
b69d453a68 | ||
|
|
2f6fb717c1 | ||
|
|
b7e1989d45 | ||
|
|
202b03b32b | ||
|
|
1aa871ec94 | ||
|
|
4ffbb0bbd9 | ||
|
|
f859fe6517 | ||
|
|
021b425b8b | ||
|
|
d95122ca01 | ||
|
|
81d3c7351b | ||
|
|
ccb8ffd6eb | ||
|
|
5a8ea57a1b | ||
|
|
60228ec6e6 | ||
|
|
574703e551 | ||
|
|
fa154f1a8f | ||
|
|
635545630b | ||
|
|
df8afda999 | ||
|
|
f80b090c93 | ||
|
|
806173f7e3 | ||
|
|
2f6c1b82a6 | ||
|
|
b8c2808abe | ||
|
|
9027e4e065 | ||
|
|
8ca5276a0e | ||
|
|
ee45a5524d | ||
|
|
70d4223a75 | ||
|
|
7ab2840381 | ||
|
|
78c951cb70 | ||
|
|
a0a3a80151 | ||
|
|
3ecffd5588 | ||
|
|
8ea66bb0e9 | ||
|
|
9359fbbad6 | ||
|
|
1949be90c2 | ||
|
|
30970acfaf | ||
|
|
6d12a8bfa1 | ||
|
|
2fb41c8d65 | ||
|
|
835b2609b6 | ||
|
|
10ae53f108 | ||
|
|
68bfceb727 | ||
|
|
f348c7168f | ||
|
|
627d9bf45d | ||
|
|
2aee8ab511 | ||
|
|
de9d3c9726 | ||
|
|
39572f36f4 | ||
|
|
0994cd515a | ||
|
|
bdcd1e099d | ||
|
|
c64760b5f4 | ||
|
|
d2aa60b961 | ||
|
|
83d615d236 | ||
|
|
e910de0a06 | ||
|
|
26b80aba38 | ||
|
|
ee617eeff4 | ||
|
|
463766782c | ||
|
|
d9f70898dd | ||
|
|
7e15c89510 | ||
|
|
ed5bd19f0e | ||
|
|
feba97738f | ||
|
|
348809bdbb | ||
|
|
ca0edb1cab | ||
|
|
0172f64f15 | ||
|
|
48f8944e3b | ||
|
|
3e22043ea6 | ||
|
|
e572b854b9 | ||
|
|
c8aad2b03b | ||
|
|
8773ebc3dc | ||
|
|
2baf2478da | ||
|
|
378d37d74e | ||
|
|
f8c8e5d8e5 | ||
|
|
dca38c286a | ||
|
|
860710f5f9 | ||
|
|
d56af4acab | ||
|
|
793e98f75c | ||
|
|
f9c5aa3fef | ||
|
|
254e782da6 | ||
|
|
fe3caf66a1 | ||
|
|
09d699432a | ||
|
|
79b44586ce | ||
|
|
feddd90e41 | ||
|
|
ca935e4272 | ||
|
|
8f75bfb0a4 | ||
|
|
e551c6e037 | ||
|
|
1c5a72ee85 | ||
|
|
8a8ea04088 | ||
|
|
92be8df95d | ||
|
|
f730e23e30 | ||
|
|
a3e7649a3c | ||
|
|
af42c31740 | ||
|
|
a22c9f6bcf | ||
|
|
bad9a16ebb | ||
|
|
7827e05c24 | ||
|
|
e45b0bf871 | ||
|
|
659c039ba8 | ||
|
|
c7edaac42a | ||
|
|
a1a8f75409 | ||
|
|
23e25fa2d7 | ||
|
|
f077484121 | ||
|
|
c16bf50493 | ||
|
|
564374a8fb | ||
|
|
4ab4264f77 | ||
|
|
60cccb21b4 | ||
|
|
39432198cc | ||
|
|
7af95317db | ||
|
|
8675193d1f | ||
|
|
ac0d6c57e1 | ||
|
|
3db6542398 | ||
|
|
9fd1b9aec1 | ||
|
|
e5563eb9b8 | ||
|
|
e8de9e3c26 | ||
|
|
c8a3603577 | ||
|
|
05321cf1ed | ||
|
|
7deef44ff6 | ||
|
|
9752d61699 | ||
|
|
6b8e2e2643 | ||
|
|
b1acbf3114 | ||
|
|
e3ac1dd1b4 | ||
|
|
86eca53d4b | ||
|
|
bfd3d822b1 | ||
|
|
030e4961d7 | ||
|
|
14bd92067b | ||
|
|
066e227325 | ||
|
|
f1cfb9cd91 | ||
|
|
5a2e704909 | ||
|
|
f04e54d1d5 | ||
|
|
e9af46a8cb | ||
|
|
b4b051908b | ||
|
|
0148e5638c | ||
|
|
c8814d0632 | ||
|
|
6c892fed78 | ||
|
|
8043e09ec1 | ||
|
|
e775299480 | ||
|
|
c4ca9c62aa | ||
|
|
c37aeff364 | ||
|
|
cdac49052f | ||
|
|
8e5fa9576c | ||
|
|
25c746bb14 | ||
|
|
cd04d1e5a7 | ||
|
|
1fb558cc77 | ||
|
|
7f1b76912c | ||
|
|
3a2ceb0b6f | ||
|
|
1345756fce | ||
|
|
d81d9a0722 | ||
|
|
55074fda69 | ||
|
|
23e12811a1 | ||
|
|
5d1edf6d86 | ||
|
|
a91e8b26f6 | ||
|
|
c836dd2acd | ||
|
|
e826ea5d04 | ||
|
|
3a87af805f | ||
|
|
328ac329ec | ||
|
|
a3401aad11 | ||
|
|
5a67b89a80 | ||
|
|
431f71cc82 | ||
|
|
23a9780ebb | ||
|
|
4587301cca | ||
|
|
9cb8ebbaa7 | ||
|
|
14ddbd32ad | ||
|
|
4599b95ae7 | ||
|
|
c92dc580a2 | ||
|
|
4666aa9818 | ||
|
|
f066baf6ba | ||
|
|
ba710c9944 | ||
|
|
198695af03 | ||
|
|
fec78f5fb5 | ||
|
|
d03dd7ac2d | ||
|
|
d2dd52b42a | ||
|
|
c9db52433f | ||
|
|
138849d258 | ||
|
|
a9ec12e402 | ||
|
|
87281efc24 | ||
|
|
29ac4f23c6 | ||
|
|
878a3f8962 | ||
|
|
21e27bce87 | ||
|
|
336ca0dbbd | ||
|
|
d9eba3cd0e | ||
|
|
81b7e2b420 | ||
|
|
cd5483623b | ||
|
|
faa112eddf | ||
|
|
f663f22628 | ||
|
|
8b07ff453d | ||
|
|
24a0fa3f6d | ||
|
|
a5011b398d | ||
|
|
5b70398c0a | ||
|
|
f3aaee1e41 | ||
|
|
d0e875928d | ||
|
|
3e16bc8335 | ||
|
|
c1d85493df | ||
|
|
e01d0f81ea | ||
|
|
376d0f3295 | ||
|
|
4418623f73 | ||
|
|
d1f4e26e29 | ||
|
|
5166db1caa | ||
|
|
ff5ad586af | ||
|
|
9e24d21282 | ||
|
|
5806999f63 | ||
|
|
4dae1afe0b | ||
|
|
456cad1ada | ||
|
|
ded520c2c1 | ||
|
|
a77157391c | ||
|
|
063a2b3348 | ||
|
|
bcd2e95fbe | ||
|
|
94e8cd84e6 | ||
|
|
948d72c282 | ||
|
|
bdeb92ab05 | ||
|
|
fdb5ad810a | ||
|
|
f588a80ec7 | ||
|
|
562b7e54cb | ||
|
|
3c847bca8b | ||
|
|
ce2cc26224 | ||
|
|
f3c574679c | ||
|
|
5da3fed1ce | ||
|
|
e6bcf5db6b | ||
|
|
4d24c57903 | ||
|
|
0606c0a454 | ||
|
|
bb984e05e3 | ||
|
|
b35b0aaf2c | ||
|
|
62f04fa5dd | ||
|
|
d89df5f0dd | ||
|
|
f0c1922600 | ||
|
|
ab2cdd18ed | ||
|
|
889bb7ddf4 | ||
|
|
a959f90d0b | ||
|
|
a54cd004d6 | ||
|
|
5100032fbd | ||
|
|
0f235baa7e | ||
|
|
e5660b8c8e | ||
|
|
588a1b86d1 | ||
|
|
46f0afa24b | ||
|
|
a7651b2734 | ||
|
|
890f76e45c | ||
|
|
03892bad5e | ||
|
|
e6eecc93c8 | ||
|
|
8dc0f8d20e | ||
|
|
fbdc0c4705 | ||
|
|
d1a2b57aa2 | ||
|
|
f5ec1d4b7c | ||
|
|
0aa556e375 | ||
|
|
d9e86c15bc | ||
|
|
4107fa006f | ||
|
|
29980ea958 | ||
|
|
8f36d2ec00 | ||
|
|
10511b8431 | ||
|
|
2535ae953d | ||
|
|
2f68cd7483 | ||
|
|
6655276410 | ||
|
|
9f7bcb0f7d | ||
|
|
aa43177d8c | ||
|
|
12959d114c | ||
|
|
855b489c4b | ||
|
|
673f9cb544 | ||
|
|
0a3ff47008 | ||
|
|
834e34128d | ||
|
|
73776f8d11 | ||
|
|
120e61e45c | ||
|
|
fc2d450de0 | ||
|
|
cea4eaf081 | ||
|
|
b1753f86f9 | ||
|
|
6323fbf46b | ||
|
|
ba601c39b3 | ||
|
|
ec27517bdd | ||
|
|
624ec3c93e | ||
|
|
f318a84c18 | ||
|
|
8cca58dba9 | ||
|
|
6c196ea61a | ||
|
|
207572f2f9 | ||
|
|
4afc986f48 | ||
|
|
ba5d140d4b | ||
|
|
348f9dcaec | ||
|
|
915b9e7bd7 | ||
|
|
dfec29d18e | ||
|
|
77fef02116 | ||
|
|
38ef4a6046 | ||
|
|
f3328c41fb | ||
|
|
a007fa6505 | ||
|
|
1a32a0897c | ||
|
|
e26310d172 | ||
|
|
c7cdb0b466 | ||
|
|
df0b484b45 | ||
|
|
2181cddf49 | ||
|
|
a2b6968cef | ||
|
|
285fbc2783 | ||
|
|
94c5a1fd98 | ||
|
|
19362fe5e5 | ||
|
|
a7a81e9825 | ||
|
|
31484d1158 | ||
|
|
f51cd008f2 | ||
|
|
a5675a79fe | ||
|
|
1ea7b3c09f | ||
|
|
d9127a288f | ||
|
|
23ae259c82 | ||
|
|
ebb78bc9bd | ||
|
|
e5920b6465 | ||
|
|
153a99a002 | ||
|
|
69a5e1e2f5 | ||
|
|
0858160be2 | ||
|
|
ccd79c814d | ||
|
|
45f25ca62d | ||
|
|
a8a01b8191 | ||
|
|
ac2c044a94 | ||
|
|
e10d00d114 | ||
|
|
cbdd369a18 | ||
|
|
b2e7f58b3d | ||
|
|
a6600b8762 | ||
|
|
5479d49379 | ||
|
|
304985b61e | ||
|
|
d6c725299b | ||
|
|
d99857002d | ||
|
|
2a6c74917e | ||
|
|
9f0bd4bad3 | ||
|
|
924b06976c | ||
|
|
1357f19e48 | ||
|
|
c91e9ea4e0 | ||
|
|
c2c96dad6e | ||
|
|
1a08833e77 | ||
|
|
d16dfcf4e8 | ||
|
|
b79c7b0540 | ||
|
|
9f45792217 | ||
|
|
d3108c3549 | ||
|
|
7d883cb5e0 | ||
|
|
ebd81c1df9 | ||
|
|
418dbee9fa | ||
|
|
cccc3bf625 | ||
|
|
a3e0072631 | ||
|
|
220e485312 | ||
|
|
67f8fca043 | ||
|
|
0e0ab8384c | ||
|
|
58228f70ca | ||
|
|
7968de06b4 | ||
|
|
87fdd90f56 | ||
|
|
65e7e56fbe | ||
|
|
424fdff934 | ||
|
|
f72996d9d1 | ||
|
|
d77556c672 | ||
|
|
c412e9bad2 | ||
|
|
87a28e8ce7 | ||
|
|
9ca0c7d53a | ||
|
|
2e94e452ed | ||
|
|
6a0d40ee0d | ||
|
|
0cebcf4432 | ||
|
|
ed0e24fcaf | ||
|
|
24be2f869b | ||
|
|
f8058a4a3a | ||
|
|
d0ba6df2fc | ||
|
|
95bee91b12 | ||
|
|
751b5bd556 | ||
|
|
77273449c9 | ||
|
|
46e1f1bc5c | ||
|
|
884bec7465 | ||
|
|
8d3220f94b | ||
|
|
9cb42911dc | ||
|
|
a3cc6f025e | ||
|
|
6fae4a9974 | ||
|
|
f7a1a3a172 | ||
|
|
292e1ad782 | ||
|
|
af1fe86586 | ||
|
|
97100cdfdd | ||
|
|
5f60ef1541 | ||
|
|
c7e7a0a871 | ||
|
|
f09eff530e | ||
|
|
50b34a116a | ||
|
|
42874fb0d0 | ||
|
|
482847187c | ||
|
|
a19b99268d | ||
|
|
3c5a03d7b6 | ||
|
|
c1a5c2b2d1 | ||
|
|
baf0f7ba95 | ||
|
|
ee27965314 | ||
|
|
d02093295b | ||
|
|
6381444fdc | ||
|
|
01b313868d | ||
|
|
3859ebd69c | ||
|
|
9753e431e3 | ||
|
|
b307667ae2 | ||
|
|
5d7dcbbcee | ||
|
|
281b395053 | ||
|
|
3518f39d39 | ||
|
|
ae0ffc4977 | ||
|
|
bc2f716c99 | ||
|
|
9617da1791 | ||
|
|
2ba5d7d64b | ||
|
|
437b9016ca | ||
|
|
c5db0a7195 | ||
|
|
82894d88b6 | ||
|
|
4a4146f515 | ||
|
|
59a4d0129f | ||
|
|
5cf2149218 | ||
|
|
453c32df0d | ||
|
|
1df10b80b2 | ||
|
|
9d96a11753 | ||
|
|
e9e3252bb5 | ||
|
|
930c8147e7 | ||
|
|
378ecad94c | ||
|
|
02299a6742 | ||
|
|
15cbc626c4 | ||
|
|
8720a4540a | ||
|
|
7b5980bfe5 | ||
|
|
ebfb670f6a | ||
|
|
c98042fa80 | ||
|
|
70181e3e08 | ||
|
|
adb1e01c7a | ||
|
|
cdb7f0602c | ||
|
|
d52e817dd5 | ||
|
|
07305d8799 | ||
|
|
fbf5bafae7 | ||
|
|
d49cd3cb85 | ||
|
|
b60b9e7743 | ||
|
|
26fd8562c5 | ||
|
|
84b38daf62 | ||
|
|
a0f9d5dc61 | ||
|
|
e8c25d157f | ||
|
|
214f4f0f0c | ||
|
|
7ae0369a3b | ||
|
|
2e5682f11c | ||
|
|
2e7cb0e362 | ||
|
|
56748ea6e7 | ||
|
|
621f03994c | ||
|
|
ab8ad72920 | ||
|
|
3fc244ee85 | ||
|
|
4728b96c51 | ||
|
|
f303363a73 | ||
|
|
2a166af524 | ||
|
|
ab4d055fd1 | ||
|
|
af49a8e4ef | ||
|
|
669d219fdc | ||
|
|
442aecb9f4 | ||
|
|
beda0bc89c | ||
|
|
64fd6bf979 | ||
|
|
1955434416 | ||
|
|
ab6a083fa8 | ||
|
|
eabca5df18 | ||
|
|
5dac3ff2a6 | ||
|
|
93024738d3 | ||
|
|
05a368681a | ||
|
|
246161018c | ||
|
|
f27714890a | ||
|
|
47831eb300 | ||
|
|
0b1f2252ee | ||
|
|
3ce6b555f7 | ||
|
|
c29f11863e | ||
|
|
952403b696 | ||
|
|
b3eb06f53e | ||
|
|
5198d0cdf0 | ||
|
|
e61e2f04b3 | ||
|
|
1aa876f4eb | ||
|
|
a3fb2f13be | ||
|
|
9e77eae71e | ||
|
|
cd5de5cd05 | ||
|
|
98a67530f5 | ||
|
|
58ffe576d7 | ||
|
|
b0a515f2c3 | ||
|
|
a037421809 | ||
|
|
6bb6c24641 | ||
|
|
617834a044 | ||
|
|
2c5c0e7830 | ||
|
|
81d2c52867 | ||
|
|
4f8bd16910 | ||
|
|
ab9d03bc2e | ||
|
|
10bf3e8fab | ||
|
|
f8108e93d5 | ||
|
|
3108556495 | ||
|
|
f97b2444e7 | ||
|
|
415f456661 | ||
|
|
e49b3fc260 | ||
|
|
9b125fbe53 | ||
|
|
10e3b32fed | ||
|
|
5386c07b66 | ||
|
|
7149d20b42 | ||
|
|
8a57b79b77 | ||
|
|
a4e8e7ea53 | ||
|
|
95ba327eb3 | ||
|
|
3056410fd1 | ||
|
|
bf8da60605 | ||
|
|
226f858866 | ||
|
|
317d7dea7d | ||
|
|
4e548ceb6e | ||
|
|
d846fe55e1 | ||
|
|
3b2942651e | ||
|
|
fa6f4100dd | ||
|
|
33e2d18aa7 | ||
|
|
a03764d956 | ||
|
|
3fb703cd22 | ||
|
|
f1cbe23f57 | ||
|
|
07a22a0b4b | ||
|
|
b9d813cef2 | ||
|
|
76ab0eac03 | ||
|
|
08a2ad2c40 | ||
|
|
47bbc9987e | ||
|
|
59628ec8b7 | ||
|
|
bef2fa9e8d | ||
|
|
d4f0cbcb67 | ||
|
|
9e96b12e94 | ||
|
|
42552810fb | ||
|
|
4bf2c931e9 | ||
|
|
beda6ac20d | ||
|
|
d8be6e42e1 | ||
|
|
4fb7fe9e45 | ||
|
|
6d7066c381 | ||
|
|
d003e1380f | ||
|
|
ef8badaef1 | ||
|
|
dea9c149d7 | ||
|
|
56c9fa3129 | ||
|
|
a86105294b | ||
|
|
33c23c30d3 | ||
|
|
fe76a79ebd | ||
|
|
5035ec2539 | ||
|
|
9f35b20664 | ||
|
|
b93c6c0270 | ||
|
|
e5dd403dd1 | ||
|
|
493359e5a2 | ||
|
|
b0f5218775 | ||
|
|
8fdc7049f9 | ||
|
|
d79d7e2ba1 | ||
|
|
596b3e2614 | ||
|
|
59f8544324 | ||
|
|
daaad3699c | ||
|
|
1e9f3a65a4 | ||
|
|
b2acf2f807 | ||
|
|
34e561f358 | ||
|
|
e5a07170b3 | ||
|
|
02dbbc5289 | ||
|
|
5e62d3ecb2 | ||
|
|
373ef9fe91 | ||
|
|
fbb6d8146a | ||
|
|
0602601655 | ||
|
|
480e248131 | ||
|
|
19fb081fa0 | ||
|
|
d3b1a4f928 | ||
|
|
4729e194a0 | ||
|
|
ab6060c484 | ||
|
|
0b65021f75 | ||
|
|
bd4f2093db | ||
|
|
48dfcab9f0 | ||
|
|
849f8f13bc | ||
|
|
07359ad6ec | ||
|
|
1e2453eddf | ||
|
|
4c9773c68d | ||
|
|
4666670f4f | ||
|
|
0f71b45e0f | ||
|
|
3efe0eac13 | ||
|
|
d9fb79403b | ||
|
|
2ef89be67d | ||
|
|
395c4e37ba | ||
|
|
6e1e617124 | ||
|
|
08d99a3890 | ||
|
|
b3c48674c5 | ||
|
|
40531dd919 | ||
|
|
05dfce62fb | ||
|
|
502e1e1f1b | ||
|
|
e5b12ecdb9 | ||
|
|
be5e41227f | ||
|
|
08f208cd38 | ||
|
|
18d899a7f9 | ||
|
|
b2650da057 | ||
|
|
31df0b5d7d | ||
|
|
a430a47a30 | ||
|
|
a32aac7111 | ||
|
|
b0a8191f59 | ||
|
|
28aedcf50b | ||
|
|
6988f03ebc | ||
|
|
9e0f13cce5 | ||
|
|
8c37a4454c | ||
|
|
ef436026d5 | ||
|
|
a595bc4b31 | ||
|
|
a167e5e520 | ||
|
|
26d7ceebb2 | ||
|
|
e5c0f8a46c | ||
|
|
5965459423 | ||
|
|
3a31d80a85 | ||
|
|
5a8e542f96 | ||
|
|
7a60afdd5a | ||
|
|
c3b3e0ab21 | ||
|
|
6246e25fbe | ||
|
|
102ddaf262 | ||
|
|
151db2af30 | ||
|
|
b2bd8577b9 | ||
|
|
4df3070a1d | ||
|
|
142609ea67 | ||
|
|
ed80c4e13b | ||
|
|
285d73d526 | ||
|
|
0bcb6040c9 | ||
|
|
07ef3d632c | ||
|
|
21bb325157 | ||
|
|
888ab162bd | ||
|
|
8ab38956d1 | ||
|
|
0f120f7500 | ||
|
|
f6a0e62853 | ||
|
|
cc0e91aa96 | ||
|
|
bf9f92b04e | ||
|
|
8f3664f26c | ||
|
|
445afca6ee | ||
|
|
3083e3bc63 | ||
|
|
9e16c03d25 | ||
|
|
b22fe5bd3d | ||
|
|
a60e55e5cd | ||
|
|
e7aa4428de | ||
|
|
64f71143dc | ||
|
|
7aad298720 | ||
|
|
4165b33995 | ||
|
|
f9bf4e4130 | ||
|
|
269919b980 | ||
|
|
2dc977ddd8 | ||
|
|
28c7362cfa | ||
|
|
c93a5de460 | ||
|
|
44a5b3b1e5 | ||
|
|
ae94722eda | ||
|
|
ae993c47c1 | ||
|
|
c784a6e440 | ||
|
|
c66cd3b2f3 | ||
|
|
f30938ed59 | ||
|
|
6c472dd383 | ||
|
|
2c5861a0c2 | ||
|
|
8047e196fe | ||
|
|
c6c979dc19 | ||
|
|
c8a1c8377a | ||
|
|
4e954c24f7 | ||
|
|
52839e2a7d | ||
|
|
1a9d5f151f | ||
|
|
d6f527881a | ||
|
|
5811b184be | ||
|
|
e0a3b51ca2 | ||
|
|
b5276a6a1d | ||
|
|
cc1b030c00 | ||
|
|
c896785480 | ||
|
|
0006948c29 | ||
|
|
6ac14f832e | ||
|
|
fd9a4966ec | ||
|
|
3246176c0a | ||
|
|
b68f561e6f | ||
|
|
8ffd4fc664 | ||
|
|
f46548ed88 | ||
|
|
0d335e3056 | ||
|
|
6ff701bd5c | ||
|
|
c34be5313d | ||
|
|
ec2fc0a5f2 | ||
|
|
ad54afe39a | ||
|
|
eb4cd75218 | ||
|
|
a84f5a1e32 | ||
|
|
e193347fb4 | ||
|
|
ad27c8674b | ||
|
|
5123a86062 | ||
|
|
010c205eec | ||
|
|
160c84ec1a | ||
|
|
924c0b63bd | ||
|
|
9b8dce0c77 | ||
|
|
7159678385 | ||
|
|
c8e232c598 | ||
|
|
a3013ff85b | ||
|
|
65c5abfa88 | ||
|
|
0114e36cfa | ||
|
|
5c56e0f498 | ||
|
|
61992ae787 | ||
|
|
08bbeedbd7 | ||
|
|
a5f2db8c80 | ||
|
|
8d1ce0460f | ||
|
|
3c85b48291 | ||
|
|
ea2e026c56 | ||
|
|
8b3f310212 | ||
|
|
87136e9e2b | ||
|
|
5a6a9d6ec2 | ||
|
|
d3b3a0eb8a | ||
|
|
91fc59cffc | ||
|
|
e32dbad0d0 | ||
|
|
b66aafd168 | ||
|
|
2cd0f69069 | ||
|
|
0177f641c8 | ||
|
|
b3969a6ce0 | ||
|
|
ab97d3b8b7 | ||
|
|
213df68d04 | ||
|
|
9db3cd901c | ||
|
|
64c9230423 | ||
|
|
17943ef0db | ||
|
|
8ed3f0b1cc | ||
|
|
7c50a5e17b | ||
|
|
c13c85bd2d | ||
|
|
ae01dc9639 | ||
|
|
a74ed0daf0 | ||
|
|
60387651d2 | ||
|
|
3a78be68d6 | ||
|
|
a896332db3 | ||
|
|
54eeb0e327 | ||
|
|
1f13554bd9 | ||
|
|
4cc3691489 | ||
|
|
24eadf2507 | ||
|
|
a274bfb744 | ||
|
|
2277c792b9 | ||
|
|
61f5614ac9 | ||
|
|
6367aed62a | ||
|
|
739f592061 | ||
|
|
116c2b73c1 | ||
|
|
58be7ae5db | ||
|
|
0e0fb885d2 | ||
|
|
e8546b82f8 | ||
|
|
837fbab96d | ||
|
|
cbd2d88000 | ||
|
|
01ac1cdcca | ||
|
|
161e8a6c21 | ||
|
|
2e3c1adc63 | ||
|
|
776afa4a36 | ||
|
|
3cac19d498 | ||
|
|
2ba8a87c9d | ||
|
|
d677dc51de | ||
|
|
ebbfcd169c | ||
|
|
574d2994d1 | ||
|
|
ecc5d64584 | ||
|
|
6888682f92 | ||
|
|
0197cdb33d | ||
|
|
3c59858f70 | ||
|
|
6f0161e9da | ||
|
|
f2bd735f51 | ||
|
|
7a8fd8c3e5 | ||
|
|
b24aa2f797 | ||
|
|
5e4f1fc279 | ||
|
|
e779d180f9 | ||
|
|
a84a32c075 | ||
|
|
5649986834 | ||
|
|
7eaa8d54dc | ||
|
|
61a1fbde6e | ||
|
|
a0a18973d8 | ||
|
|
efbf62f56a | ||
|
|
39391c8088 | ||
|
|
9ac5ef09ad | ||
|
|
3394588602 | ||
|
|
c64a05f2ff | ||
|
|
0c4426a55e | ||
|
|
feb700393e | ||
|
|
0476585370 | ||
|
|
dcc1738978 | ||
|
|
0b0ff62bc5 | ||
|
|
9f76371449 | ||
|
|
50bd8448cc | ||
|
|
0b326370bd | ||
|
|
d0963baad4 | ||
|
|
75e8c60fe2 | ||
|
|
e7ea27a1b3 | ||
|
|
aaa48f6a1a | ||
|
|
0766a5da91 | ||
|
|
267d1a27ac | ||
|
|
f5e6e49075 | ||
|
|
d44ce0a070 | ||
|
|
9ddccba780 | ||
|
|
301894f6e8 | ||
|
|
a425a7fda2 | ||
|
|
21c3835322 | ||
|
|
d110503639 | ||
|
|
64bf7eb363 | ||
|
|
205560cc95 | ||
|
|
7698243caf | ||
|
|
67f0934930 | ||
|
|
30e998edf7 | ||
|
|
2a35e45920 | ||
|
|
aa5de9f7bd | ||
|
|
f9eeb76518 | ||
|
|
957235a656 | ||
|
|
64a0c171f3 | ||
|
|
a28ac3bee6 | ||
|
|
3643303a51 | ||
|
|
81d407f0ff | ||
|
|
d29b0660f0 | ||
|
|
59b94177d6 | ||
|
|
9d2c5d54b0 | ||
|
|
a6f1a0245a | ||
|
|
fcf859ffed | ||
|
|
fe3f87e1fd | ||
|
|
5a24a7775e | ||
|
|
52e52f35f7 | ||
|
|
810be2c9d2 | ||
|
|
8e4777a5ff |
3
.github/.gitleaks.toml
vendored
3
.github/.gitleaks.toml
vendored
@@ -541,5 +541,6 @@ paths = [
|
||||
'''gitleaks.toml''',
|
||||
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
||||
'''(go.mod|go.sum)$''',
|
||||
'''salt/nginx/files/enterprise-attack.json'''
|
||||
'''salt/nginx/files/enterprise-attack.json''',
|
||||
'''(.*?)whl$'''
|
||||
]
|
||||
|
||||
4
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
4
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -29,6 +29,10 @@ body:
|
||||
- 2.4.141
|
||||
- 2.4.150
|
||||
- 2.4.160
|
||||
- 2.4.170
|
||||
- 2.4.180
|
||||
- 2.4.190
|
||||
- 2.4.200
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
2
.github/workflows/pythontest.yml
vendored
2
.github/workflows/pythontest.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "salt/sensoroni/files/analyzers/**"
|
||||
- "salt/manager/tools/sbin"
|
||||
- "salt/manager/tools/sbin/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
|
||||
# Created by https://www.gitignore.io/api/macos,windows
|
||||
# Edit at https://www.gitignore.io/?templates=macos,windows
|
||||
|
||||
@@ -67,4 +66,4 @@ __pycache__
|
||||
|
||||
# Analyzer dev/test config files
|
||||
*_dev.yaml
|
||||
site-packages
|
||||
site-packages
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
### 2.4.160-20250625 ISO image released on 2025/06/25
|
||||
### 2.4.190-20251024 ISO image released on 2025/10/24
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.160-20250625 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.160-20250625.iso
|
||||
2.4.190-20251024 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
|
||||
|
||||
MD5: 78CF5602EFFAB84174C56AD2826E6E4E
|
||||
SHA1: FC7EEC3EC95D97D3337501BAA7CA8CAE7C0E15EA
|
||||
SHA256: 0ED965E8BEC80EE16AE90A0F0F96A3046CEF2D92720A587278DDDE3B656C01C2
|
||||
MD5: 25358481FB876226499C011FC0710358
|
||||
SHA1: 0B26173C0CE136F2CA40A15046D1DFB78BCA1165
|
||||
SHA256: 4FD9F62EDA672408828B3C0C446FE5EA9FF3C4EE8488A7AB1101544A3C487872
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.160-20250625.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.160-20250625.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.160-20250625.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.160-20250625.iso.sig securityonion-2.4.160-20250625.iso
|
||||
gpg --verify securityonion-2.4.190-20251024.iso.sig securityonion-2.4.190-20251024.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Wed 25 Jun 2025 10:13:33 AM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Thu 23 Oct 2025 07:21:46 AM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
34
pillar/hypervisor/nodes.sls
Normal file
34
pillar/hypervisor/nodes.sls
Normal file
@@ -0,0 +1,34 @@
|
||||
{% set node_types = {} %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='G@role:so-hypervisor or G@role:so-managerhype',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='compound') | dictsort()
|
||||
%}
|
||||
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% set hostname = minionid.split('_') | first %}
|
||||
{% set node_type = minionid.split('_') | last %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
hypervisor:
|
||||
nodes:
|
||||
{% for node_type, values in node_types.items() %}
|
||||
{{node_type}}:
|
||||
{% for hostname, ip in values.items() %}
|
||||
{{hostname}}:
|
||||
ip: {{ip}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
@@ -18,6 +18,7 @@ base:
|
||||
- telegraf.adv_telegraf
|
||||
- versionlock.soc_versionlock
|
||||
- versionlock.adv_versionlock
|
||||
- soc.license
|
||||
|
||||
'* and not *_desktop':
|
||||
- firewall.soc_firewall
|
||||
@@ -25,7 +26,12 @@ base:
|
||||
- nginx.soc_nginx
|
||||
- nginx.adv_nginx
|
||||
|
||||
'*_manager or *_managersearch':
|
||||
'salt-cloud:driver:libvirt':
|
||||
- match: grain
|
||||
- vm.soc_vm
|
||||
- vm.adv_vm
|
||||
|
||||
'*_manager or *_managersearch or *_managerhype':
|
||||
- match: compound
|
||||
- node_data.ips
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
@@ -37,14 +43,11 @@ base:
|
||||
- secrets
|
||||
- manager.soc_manager
|
||||
- manager.adv_manager
|
||||
- idstools.soc_idstools
|
||||
- idstools.adv_idstools
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- kratos.soc_kratos
|
||||
@@ -70,6 +73,9 @@ base:
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
- hypervisor.nodes
|
||||
- hypervisor.soc_hypervisor
|
||||
- hypervisor.adv_hypervisor
|
||||
- stig.soc_stig
|
||||
|
||||
'*_sensor':
|
||||
@@ -87,7 +93,6 @@ base:
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
|
||||
'*_eval':
|
||||
- node_data.ips
|
||||
@@ -110,11 +115,8 @@ base:
|
||||
- elastalert.adv_elastalert
|
||||
- manager.soc_manager
|
||||
- manager.adv_manager
|
||||
- idstools.soc_idstools
|
||||
- idstools.adv_idstools
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
@@ -152,8 +154,6 @@ base:
|
||||
{% endif %}
|
||||
- secrets
|
||||
- healthcheck.standalone
|
||||
- idstools.soc_idstools
|
||||
- idstools.adv_idstools
|
||||
- kratos.soc_kratos
|
||||
- kratos.adv_kratos
|
||||
- hydra.soc_hydra
|
||||
@@ -174,7 +174,6 @@ base:
|
||||
- manager.adv_manager
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
@@ -240,7 +239,6 @@ base:
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
@@ -258,8 +256,9 @@ base:
|
||||
- minions.adv_{{ grains.id }}
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
- soc.license
|
||||
- stig.soc_stig
|
||||
- elasticfleet.soc_elasticfleet
|
||||
- elasticfleet.adv_elasticfleet
|
||||
|
||||
'*_import':
|
||||
- node_data.ips
|
||||
@@ -283,7 +282,6 @@ base:
|
||||
- manager.adv_manager
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- backup.soc_backup
|
||||
@@ -318,9 +316,15 @@ base:
|
||||
- elasticfleet.adv_elasticfleet
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
|
||||
'*_hypervisor':
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
|
||||
'*_desktop':
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
|
||||
|
||||
91
salt/_modules/hypervisor.py
Normal file
91
salt/_modules/hypervisor.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/opt/saltstack/salt/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
"""
|
||||
Salt execution module for hypervisor operations.
|
||||
|
||||
This module provides functions for managing hypervisor configurations,
|
||||
including VM file management.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'hypervisor'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
"""
|
||||
Only load this module if we're on a system that can manage hypervisors.
|
||||
"""
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def remove_vm_from_vms_file(vms_file_path, vm_hostname, vm_role):
|
||||
"""
|
||||
Remove a VM entry from the hypervisorVMs file.
|
||||
|
||||
Args:
|
||||
vms_file_path (str): Path to the hypervisorVMs file
|
||||
vm_hostname (str): Hostname of the VM to remove (without role suffix)
|
||||
vm_role (str): Role of the VM
|
||||
|
||||
Returns:
|
||||
dict: Result dictionary with success status and message
|
||||
|
||||
CLI Example:
|
||||
salt '*' hypervisor.remove_vm_from_vms_file /opt/so/saltstack/local/salt/hypervisor/hosts/hypervisor1VMs node1 nsm
|
||||
"""
|
||||
try:
|
||||
# Check if file exists
|
||||
if not os.path.exists(vms_file_path):
|
||||
msg = f"VMs file not found: {vms_file_path}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
|
||||
# Read current VMs
|
||||
with open(vms_file_path, 'r') as f:
|
||||
content = f.read().strip()
|
||||
vms = json.loads(content) if content else []
|
||||
|
||||
# Find and remove the VM entry
|
||||
original_count = len(vms)
|
||||
vms = [vm for vm in vms if not (vm.get('hostname') == vm_hostname and vm.get('role') == vm_role)]
|
||||
|
||||
if len(vms) < original_count:
|
||||
# VM was found and removed, write back to file
|
||||
with open(vms_file_path, 'w') as f:
|
||||
json.dump(vms, f, indent=2)
|
||||
|
||||
# Set socore:socore ownership (939:939)
|
||||
os.chown(vms_file_path, 939, 939)
|
||||
|
||||
msg = f"Removed VM {vm_hostname}_{vm_role} from {vms_file_path}"
|
||||
log.info(msg)
|
||||
return {'result': True, 'comment': msg}
|
||||
else:
|
||||
msg = f"VM {vm_hostname}_{vm_role} not found in {vms_file_path}"
|
||||
log.warning(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
msg = f"Failed to parse JSON in {vms_file_path}: {str(e)}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
except Exception as e:
|
||||
msg = f"Failed to remove VM {vm_hostname}_{vm_role} from {vms_file_path}: {str(e)}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
335
salt/_modules/qcow2.py
Normal file
335
salt/_modules/qcow2.py
Normal file
@@ -0,0 +1,335 @@
|
||||
#!py
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
"""
|
||||
Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions
|
||||
for modifying network configurations within QCOW2 images, adjusting virtual machine hardware settings, and
|
||||
creating virtual storage volumes. It serves as a Salt interface to the so-qcow2-modify-network,
|
||||
so-kvm-modify-hardware, and so-kvm-create-volume scripts.
|
||||
|
||||
The module offers three main capabilities:
|
||||
1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images
|
||||
2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough)
|
||||
3. Volume Management: Create and attach virtual storage volumes for NSM data
|
||||
|
||||
This module is intended to work with Security Onion's virtualization infrastructure and is typically
|
||||
used in conjunction with salt-cloud for VM provisioning and management.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
import shlex
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'qcow2'
|
||||
|
||||
def __virtual__():
|
||||
return __virtualname__
|
||||
|
||||
def modify_network_config(image, interface, mode, vm_name, ip4=None, gw4=None, dns4=None, search4=None):
|
||||
'''
|
||||
Usage:
|
||||
salt '*' qcow2.modify_network_config image=<path> interface=<iface> mode=<mode> vm_name=<name> [ip4=<addr>] [gw4=<addr>] [dns4=<servers>] [search4=<domain>]
|
||||
|
||||
Options:
|
||||
image
|
||||
Path to the QCOW2 image file that will be modified
|
||||
interface
|
||||
Network interface name to configure (e.g., 'enp1s0')
|
||||
mode
|
||||
Network configuration mode, either 'dhcp4' or 'static4'
|
||||
vm_name
|
||||
Full name of the VM (hostname_role)
|
||||
ip4
|
||||
IPv4 address with CIDR notation (e.g., '192.168.1.10/24')
|
||||
Required when mode='static4'
|
||||
gw4
|
||||
IPv4 gateway address (e.g., '192.168.1.1')
|
||||
Required when mode='static4'
|
||||
dns4
|
||||
Comma-separated list of IPv4 DNS servers (e.g., '8.8.8.8,8.8.4.4')
|
||||
Optional for both DHCP and static configurations
|
||||
search4
|
||||
DNS search domain for IPv4 (e.g., 'example.local')
|
||||
Optional for both DHCP and static configurations
|
||||
|
||||
Examples:
|
||||
1. **Configure DHCP:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='dhcp4'
|
||||
```
|
||||
This configures enp1s0 to use DHCP for IP assignment
|
||||
|
||||
2. **Configure Static IP:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='static4' ip4='192.168.1.10/24' gw4='192.168.1.1' dns4='192.168.1.1,8.8.8.8' search4='example.local'
|
||||
```
|
||||
This sets a static IP configuration with DNS servers and search domain
|
||||
|
||||
Notes:
|
||||
- The QCOW2 image must be accessible and writable by the salt minion
|
||||
- The image should not be in use by a running VM when modified
|
||||
- Network changes take effect on next VM boot
|
||||
- Requires so-qcow2-modify-network script to be installed
|
||||
|
||||
Description:
|
||||
This function modifies network configuration within a QCOW2 image file by executing
|
||||
the so-qcow2-modify-network script. It supports both DHCP and static IPv4 configuration.
|
||||
The script mounts the image, modifies the network configuration files, and unmounts
|
||||
safely. All operations are logged for troubleshooting purposes.
|
||||
|
||||
Exit Codes:
|
||||
0: Success
|
||||
1: Invalid parameters or configuration
|
||||
2: Image access or mounting error
|
||||
3: Network configuration error
|
||||
4: System command error
|
||||
255: Unexpected error
|
||||
|
||||
Logging:
|
||||
- All operations are logged to the salt minion log
|
||||
- Log entries are prefixed with 'qcow2 module:'
|
||||
- Error conditions include detailed error messages and stack traces
|
||||
- Success/failure status is logged for verification
|
||||
'''
|
||||
|
||||
cmd = ['/usr/sbin/so-qcow2-modify-network', '-I', image, '-i', interface, '-n', vm_name]
|
||||
|
||||
if mode.lower() == 'dhcp4':
|
||||
cmd.append('--dhcp4')
|
||||
elif mode.lower() == 'static4':
|
||||
cmd.append('--static4')
|
||||
if not ip4 or not gw4:
|
||||
raise ValueError('Both ip4 and gw4 are required for static configuration.')
|
||||
cmd.extend(['--ip4', ip4, '--gw4', gw4])
|
||||
if dns4:
|
||||
cmd.extend(['--dns4', dns4])
|
||||
if search4:
|
||||
cmd.extend(['--search4', search4])
|
||||
else:
|
||||
raise ValueError("Invalid mode '{}'. Expected 'dhcp4' or 'static4'.".format(mode))
|
||||
|
||||
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
ret = {
|
||||
'retcode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
if result.returncode != 0:
|
||||
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||
else:
|
||||
log.info('qcow2 module: Script executed successfully.')
|
||||
return ret
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
|
||||
def modify_hardware_config(vm_name, cpu=None, memory=None, pci=None, start=False):
|
||||
'''
|
||||
Usage:
|
||||
salt '*' qcow2.modify_hardware_config vm_name=<name> [cpu=<count>] [memory=<size>] [pci=<id>] [pci=<id>] [start=<bool>]
|
||||
|
||||
Options:
|
||||
vm_name
|
||||
Name of the virtual machine to modify
|
||||
cpu
|
||||
Number of virtual CPUs to assign (positive integer)
|
||||
Optional - VM's current CPU count retained if not specified
|
||||
memory
|
||||
Amount of memory to assign in MiB (positive integer)
|
||||
Optional - VM's current memory size retained if not specified
|
||||
pci
|
||||
PCI hardware ID(s) to passthrough to the VM (e.g., '0000:c7:00.0')
|
||||
Can be specified multiple times for multiple devices
|
||||
Optional - no PCI passthrough if not specified
|
||||
start
|
||||
Boolean flag to start the VM after modification
|
||||
Optional - defaults to False
|
||||
|
||||
Examples:
|
||||
1. **Modify CPU and Memory:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=4 memory=8192
|
||||
```
|
||||
This assigns 4 CPUs and 8GB memory to the VM
|
||||
|
||||
2. **Enable PCI Passthrough:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_hardware_config vm_name='sensor1' pci='0000:c7:00.0' pci='0000:c4:00.0' start=True
|
||||
```
|
||||
This configures PCI passthrough and starts the VM
|
||||
|
||||
3. **Complete Hardware Configuration:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=8 memory=16384 pci='0000:c7:00.0' start=True
|
||||
```
|
||||
This sets CPU, memory, PCI passthrough, and starts the VM
|
||||
|
||||
Notes:
|
||||
- VM must be stopped before modification unless only the start flag is set
|
||||
- Memory is specified in MiB (1024 = 1GB)
|
||||
- PCI devices must be available and not in use by the host
|
||||
- CPU count should align with host capabilities
|
||||
- Requires so-kvm-modify-hardware script to be installed
|
||||
|
||||
Description:
|
||||
This function modifies the hardware configuration of a KVM virtual machine using
|
||||
the so-kvm-modify-hardware script. It can adjust CPU count, memory allocation,
|
||||
and PCI device passthrough. Changes are applied to the VM's libvirt configuration.
|
||||
The VM can optionally be started after modifications are complete.
|
||||
|
||||
Exit Codes:
|
||||
0: Success
|
||||
1: Invalid parameters
|
||||
2: VM state error (running when should be stopped)
|
||||
3: Hardware configuration error
|
||||
4: System command error
|
||||
255: Unexpected error
|
||||
|
||||
Logging:
|
||||
- All operations are logged to the salt minion log
|
||||
- Log entries are prefixed with 'qcow2 module:'
|
||||
- Hardware configuration changes are logged
|
||||
- Errors include detailed messages and stack traces
|
||||
- Final status of modification is logged
|
||||
'''
|
||||
|
||||
cmd = ['/usr/sbin/so-kvm-modify-hardware', '-v', vm_name]
|
||||
|
||||
if cpu is not None:
|
||||
if isinstance(cpu, int) and cpu > 0:
|
||||
cmd.extend(['-c', str(cpu)])
|
||||
else:
|
||||
raise ValueError('cpu must be a positive integer.')
|
||||
if memory is not None:
|
||||
if isinstance(memory, int) and memory > 0:
|
||||
cmd.extend(['-m', str(memory)])
|
||||
else:
|
||||
raise ValueError('memory must be a positive integer.')
|
||||
if pci:
|
||||
# Handle PCI IDs (can be a single device or comma-separated list)
|
||||
if isinstance(pci, str):
|
||||
devices = [dev.strip() for dev in pci.split(',') if dev.strip()]
|
||||
elif isinstance(pci, list):
|
||||
devices = pci
|
||||
else:
|
||||
devices = [pci]
|
||||
|
||||
# Add each device with its own -p flag
|
||||
for device in devices:
|
||||
cmd.extend(['-p', str(device)])
|
||||
if start:
|
||||
cmd.append('-s')
|
||||
|
||||
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
ret = {
|
||||
'retcode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
if result.returncode != 0:
|
||||
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||
else:
|
||||
log.info('qcow2 module: Script executed successfully.')
|
||||
return ret
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
|
||||
def create_volume_config(vm_name, size_gb, start=False):
|
||||
'''
|
||||
Usage:
|
||||
salt '*' qcow2.create_volume_config vm_name=<name> size_gb=<size> [start=<bool>]
|
||||
|
||||
Options:
|
||||
vm_name
|
||||
Name of the virtual machine to attach the volume to
|
||||
size_gb
|
||||
Volume size in GB (positive integer)
|
||||
This determines the capacity of the virtual storage volume
|
||||
start
|
||||
Boolean flag to start the VM after volume creation
|
||||
Optional - defaults to False
|
||||
|
||||
Examples:
|
||||
1. **Create 500GB Volume:**
|
||||
```bash
|
||||
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=500
|
||||
```
|
||||
This creates a 500GB virtual volume for NSM storage
|
||||
|
||||
2. **Create 1TB Volume and Start VM:**
|
||||
```bash
|
||||
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=1000 start=True
|
||||
```
|
||||
This creates a 1TB volume and starts the VM after attachment
|
||||
|
||||
Notes:
|
||||
- VM must be stopped before volume creation
|
||||
- Volume is created as a qcow2 image and attached to the VM
|
||||
- This is an alternative to disk passthrough via modify_hardware_config
|
||||
- Volume is automatically attached to the VM's libvirt configuration
|
||||
- Requires so-kvm-create-volume script to be installed
|
||||
- Volume files are stored in the hypervisor's VM storage directory
|
||||
|
||||
Description:
|
||||
This function creates and attaches a virtual storage volume to a KVM virtual machine
|
||||
using the so-kvm-create-volume script. It creates a qcow2 disk image of the specified
|
||||
size and attaches it to the VM for NSM (Network Security Monitoring) storage purposes.
|
||||
This provides an alternative to physical disk passthrough, allowing flexible storage
|
||||
allocation without requiring dedicated hardware. The VM can optionally be started
|
||||
after the volume is successfully created and attached.
|
||||
|
||||
Exit Codes:
|
||||
0: Success
|
||||
1: Invalid parameters
|
||||
2: VM state error (running when should be stopped)
|
||||
3: Volume creation error
|
||||
4: System command error
|
||||
255: Unexpected error
|
||||
|
||||
Logging:
|
||||
- All operations are logged to the salt minion log
|
||||
- Log entries are prefixed with 'qcow2 module:'
|
||||
- Volume creation and attachment operations are logged
|
||||
- Errors include detailed messages and stack traces
|
||||
- Final status of volume creation is logged
|
||||
'''
|
||||
|
||||
# Validate size_gb parameter
|
||||
if not isinstance(size_gb, int) or size_gb <= 0:
|
||||
raise ValueError('size_gb must be a positive integer.')
|
||||
|
||||
cmd = ['/usr/sbin/so-kvm-create-volume', '-v', vm_name, '-s', str(size_gb)]
|
||||
|
||||
if start:
|
||||
cmd.append('-S')
|
||||
|
||||
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
ret = {
|
||||
'retcode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
if result.returncode != 0:
|
||||
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||
else:
|
||||
log.info('qcow2 module: Script executed successfully.')
|
||||
return ret
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
1171
salt/_runners/setup_hypervisor.py
Normal file
1171
salt/_runners/setup_hypervisor.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,264 +1,178 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
|
||||
{% set saltversion = saltversion.salt.minion.version %}
|
||||
|
||||
{# this is the list we are returning from this map file, it gets built below #}
|
||||
{% set allowed_states= [] %}
|
||||
{# Define common state groups to reduce redundancy #}
|
||||
{% set base_states = [
|
||||
'common',
|
||||
'patch.os.schedule',
|
||||
'motd',
|
||||
'salt.minion-check',
|
||||
'sensoroni',
|
||||
'salt.lasthighstate',
|
||||
'salt.minion'
|
||||
] %}
|
||||
|
||||
{% set ssl_states = [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
] %}
|
||||
|
||||
{% set manager_states = [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'utility'
|
||||
] %}
|
||||
|
||||
{% set sensor_states = [
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'tcpreplay',
|
||||
'zeek',
|
||||
'strelka'
|
||||
] %}
|
||||
|
||||
{% set kafka_states = [
|
||||
'kafka'
|
||||
] %}
|
||||
|
||||
{% set stig_states = [
|
||||
'stig'
|
||||
] %}
|
||||
|
||||
{% set elastic_stack_states = [
|
||||
'elasticsearch',
|
||||
'elasticsearch.auth',
|
||||
'kibana',
|
||||
'kibana.secrets',
|
||||
'elastalert',
|
||||
'logstash',
|
||||
'redis'
|
||||
] %}
|
||||
|
||||
{# Initialize the allowed_states list #}
|
||||
{% set allowed_states = [] %}
|
||||
|
||||
{% if grains.saltversion | string == saltversion | string %}
|
||||
{# Map role-specific states #}
|
||||
{% set role_states = {
|
||||
'so-eval': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states +
|
||||
elastic_stack_states | reject('equalto', 'logstash') | list
|
||||
),
|
||||
'so-heavynode': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
|
||||
),
|
||||
'so-idh': (
|
||||
ssl_states +
|
||||
['idh']
|
||||
),
|
||||
'so-import': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
|
||||
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
|
||||
),
|
||||
'so-manager': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managerhype': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managersearch': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-searchnode': (
|
||||
ssl_states +
|
||||
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
|
||||
stig_states
|
||||
),
|
||||
'so-standalone': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
|
||||
sensor_states +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-sensor': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['nginx'] +
|
||||
stig_states
|
||||
),
|
||||
'so-fleet': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['logstash', 'nginx', 'healthcheck', 'elasticfleet']
|
||||
),
|
||||
'so-receiver': (
|
||||
ssl_states +
|
||||
kafka_states +
|
||||
stig_states +
|
||||
['logstash', 'redis']
|
||||
),
|
||||
'so-hypervisor': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['hypervisor', 'libvirt']
|
||||
),
|
||||
'so-desktop': (
|
||||
['ssl', 'docker_clean', 'telegraf'] +
|
||||
stig_states
|
||||
)
|
||||
} %}
|
||||
|
||||
{% set allowed_states= salt['grains.filter_by']({
|
||||
'so-eval': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'healthcheck',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'utility',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-heavynode': [
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'elasticagent',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-idh': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'idh',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-import': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'influxdb',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'pcap',
|
||||
'utility',
|
||||
'suricata',
|
||||
'zeek',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry'
|
||||
],
|
||||
'so-manager': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-managersearch': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elastic-fleet-package-registry',
|
||||
'elasticfleet',
|
||||
'firewall',
|
||||
'manager',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-searchnode': [
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka.ca',
|
||||
'kafka.ssl'
|
||||
],
|
||||
'so-standalone': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elastic-fleet-package-registry',
|
||||
'elasticfleet',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'utility',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-sensor': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'nginx',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
],
|
||||
'so-fleet': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'logstash',
|
||||
'nginx',
|
||||
'healthcheck',
|
||||
'schedule',
|
||||
'elasticfleet',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-receiver': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'kafka',
|
||||
'stig'
|
||||
],
|
||||
'so-desktop': [
|
||||
'ssl',
|
||||
'docker_clean',
|
||||
'telegraf',
|
||||
'stig'
|
||||
],
|
||||
}, grain='role') %}
|
||||
|
||||
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('zeek') %}
|
||||
{%- endif %}
|
||||
|
||||
{% if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('strelka') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch.auth') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('kibana') %}
|
||||
{% do allowed_states.append('kibana.secrets') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% do allowed_states.append('elastalert') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% do allowed_states.append('logstash') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver', 'so-eval'] %}
|
||||
{% do allowed_states.append('redis') %}
|
||||
{% endif %}
|
||||
|
||||
{# all nodes on the right salt version can run the following states #}
|
||||
{% do allowed_states.append('common') %}
|
||||
{% do allowed_states.append('patch.os.schedule') %}
|
||||
{% do allowed_states.append('motd') %}
|
||||
{% do allowed_states.append('salt.minion-check') %}
|
||||
{% do allowed_states.append('sensoroni') %}
|
||||
{% do allowed_states.append('salt.lasthighstate') %}
|
||||
{# Get states for the current role #}
|
||||
{% if grains.role in role_states %}
|
||||
{% set allowed_states = role_states[grains.role] %}
|
||||
{% endif %}
|
||||
|
||||
{# Add base states that apply to all roles #}
|
||||
{% for state in base_states %}
|
||||
{% do allowed_states.append(state) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
{# Add airgap state if needed #}
|
||||
{% if ISAIRGAP %}
|
||||
{% do allowed_states.append('airgap') %}
|
||||
{% do allowed_states.append('airgap') %}
|
||||
{% endif %}
|
||||
|
||||
{# all nodes can always run salt.minion state #}
|
||||
{% do allowed_states.append('salt.minion') %}
|
||||
|
||||
@@ -11,6 +11,10 @@ TODAY=$(date '+%Y_%m_%d')
|
||||
BACKUPDIR={{ DESTINATION }}
|
||||
BACKUPFILE="$BACKUPDIR/so-config-backup-$TODAY.tar"
|
||||
MAXBACKUPS=7
|
||||
EXCLUSIONS=(
|
||||
"--exclude=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers"
|
||||
)
|
||||
|
||||
|
||||
# Create backup dir if it does not exist
|
||||
mkdir -p /nsm/backup
|
||||
@@ -23,7 +27,7 @@ if [ ! -f $BACKUPFILE ]; then
|
||||
|
||||
# Loop through all paths defined in global.sls, and append them to backup file
|
||||
{%- for LOCATION in BACKUPLOCATIONS %}
|
||||
tar -rf $BACKUPFILE {{ LOCATION }}
|
||||
tar -rf $BACKUPFILE "${EXCLUSIONS[@]}" {{ LOCATION }}
|
||||
{%- endfor %}
|
||||
|
||||
fi
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set PCAP_BPF_STATUS = 0 %}
|
||||
{% set STENO_BPF_COMPILED = "" %}
|
||||
|
||||
{% if GLOBALS.pcap_engine == "TRANSITION" %}
|
||||
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
|
||||
{% else %}
|
||||
@@ -8,3 +11,11 @@
|
||||
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
|
||||
{% set PCAPBPF = BPFMERGED.pcap %}
|
||||
{% endif %}
|
||||
|
||||
{% if PCAPBPF %}
|
||||
{% set PCAP_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ PCAPBPF|join(" "), cwd='/root') %}
|
||||
{% if PCAP_BPF_CALC['retcode'] == 0 %}
|
||||
{% set PCAP_BPF_STATUS = 1 %}
|
||||
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
bpf:
|
||||
pcap:
|
||||
description: List of BPF filters to apply to Stenographer.
|
||||
description: List of BPF filters to apply to the PCAP engine.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
suricata:
|
||||
description: List of BPF filters to apply to Suricata.
|
||||
description: List of BPF filters to apply to Suricata. This will apply to alerts and, if enabled, to metadata and PCAP logs generated by Suricata.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||
{% set SURICATA_BPF_STATUS = 0 %}
|
||||
{% import 'bpf/macros.jinja' as MACROS %}
|
||||
|
||||
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
|
||||
|
||||
{% set SURICATABPF = BPFMERGED.suricata %}
|
||||
|
||||
{% if SURICATABPF %}
|
||||
{% set SURICATA_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ SURICATABPF|join(" "), cwd='/root') %}
|
||||
{% if SURICATA_BPF_CALC['retcode'] == 0 %}
|
||||
{% set SURICATA_BPF_STATUS = 1 %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||
{% set ZEEK_BPF_STATUS = 0 %}
|
||||
{% import 'bpf/macros.jinja' as MACROS %}
|
||||
|
||||
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
|
||||
|
||||
{% set ZEEKBPF = BPFMERGED.zeek %}
|
||||
|
||||
{% if ZEEKBPF %}
|
||||
{% set ZEEK_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ ZEEKBPF|join(" "), cwd='/root') %}
|
||||
{% if ZEEK_BPF_CALC['retcode'] == 0 %}
|
||||
{% set ZEEK_BPF_STATUS = 1 %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
21
salt/common/grains.sls
Normal file
21
salt/common/grains.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% set nsm_exists = salt['file.directory_exists']('/nsm') %}
|
||||
{% if nsm_exists %}
|
||||
{% set nsm_total = salt['cmd.shell']('df -BG /nsm | tail -1 | awk \'{print $2}\'') %}
|
||||
|
||||
nsm_total:
|
||||
grains.present:
|
||||
- name: nsm_total
|
||||
- value: {{ nsm_total }}
|
||||
|
||||
{% else %}
|
||||
|
||||
nsm_missing:
|
||||
test.succeed_without_changes:
|
||||
- name: /nsm does not exist, skipping grain assignment
|
||||
|
||||
{% endif %}
|
||||
@@ -4,6 +4,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- common.grains
|
||||
- common.packages
|
||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||
- manager.elasticsearch # needed for elastic_curl_config state
|
||||
@@ -106,7 +107,7 @@ Etc/UTC:
|
||||
timezone.system
|
||||
|
||||
# Sync curl configuration for Elasticsearch authentication
|
||||
{% if GLOBALS.role in ['so-eval', 'so-heavynode', 'so-import', 'so-manager', 'so-managersearch', 'so-searchnode', 'so-standalone'] %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-searchnode'] %}
|
||||
elastic_curl_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/curl.config
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% if GLOBALS.os_family == 'Debian' %}
|
||||
# we cannot import GLOBALS from vars/globals.map.jinja in this state since it is called in setup.virt.init
|
||||
# since it is early in setup of a new VM, the pillars imported in GLOBALS are not yet defined
|
||||
{% if grains.os_family == 'Debian' %}
|
||||
commonpkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: True
|
||||
@@ -46,7 +46,7 @@ python-rich:
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.os_family == 'RedHat' %}
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
|
||||
remove_mariadb:
|
||||
pkg.removed:
|
||||
|
||||
@@ -29,9 +29,26 @@ fi
|
||||
|
||||
interface="$1"
|
||||
shift
|
||||
tcpdump -i $interface -ddd $@ | tail -n+2 |
|
||||
while read line; do
|
||||
|
||||
# Capture tcpdump output and exit code
|
||||
tcpdump_output=$(tcpdump -i "$interface" -ddd "$@" 2>&1)
|
||||
tcpdump_exit=$?
|
||||
|
||||
if [ $tcpdump_exit -ne 0 ]; then
|
||||
echo "$tcpdump_output" >&2
|
||||
exit $tcpdump_exit
|
||||
fi
|
||||
|
||||
# Process the output, skipping the first line
|
||||
echo "$tcpdump_output" | tail -n+2 | while read -r line; do
|
||||
cols=( $line )
|
||||
printf "%04x%02x%02x%08x" ${cols[0]} ${cols[1]} ${cols[2]} ${cols[3]}
|
||||
printf "%04x%02x%02x%08x" "${cols[0]}" "${cols[1]}" "${cols[2]}" "${cols[3]}"
|
||||
done
|
||||
|
||||
# Check if the pipeline succeeded
|
||||
if [ "${PIPESTATUS[0]}" -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
exit 0
|
||||
|
||||
@@ -220,12 +220,22 @@ compare_es_versions() {
|
||||
}
|
||||
|
||||
copy_new_files() {
|
||||
# Define files to exclude from deletion (relative to their respective base directories)
|
||||
local EXCLUDE_FILES=(
|
||||
"salt/hypervisor/soc_hypervisor.yaml"
|
||||
)
|
||||
|
||||
# Build rsync exclude arguments
|
||||
local EXCLUDE_ARGS=()
|
||||
for file in "${EXCLUDE_FILES[@]}"; do
|
||||
EXCLUDE_ARGS+=(--exclude="$file")
|
||||
done
|
||||
|
||||
# Copy new files over to the salt dir
|
||||
cd $UPDATE_DIR
|
||||
rsync -a salt $DEFAULT_SALT_DIR/ --delete
|
||||
rsync -a pillar $DEFAULT_SALT_DIR/ --delete
|
||||
rsync -a salt $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
||||
rsync -a pillar $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
||||
chown -R socore:socore $DEFAULT_SALT_DIR/
|
||||
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
|
||||
cd /tmp
|
||||
}
|
||||
|
||||
@@ -385,7 +395,7 @@ is_manager_node() {
|
||||
}
|
||||
|
||||
is_sensor_node() {
|
||||
# Check to see if this is a sensor (forward) node
|
||||
# Check to see if this is a sensor node
|
||||
is_single_node_grid && return 0
|
||||
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null
|
||||
}
|
||||
@@ -441,8 +451,7 @@ lookup_grain() {
|
||||
|
||||
lookup_role() {
|
||||
id=$(lookup_grain id)
|
||||
pieces=($(echo $id | tr '_' ' '))
|
||||
echo ${pieces[1]}
|
||||
echo "${id##*_}"
|
||||
}
|
||||
|
||||
is_feature_enabled() {
|
||||
|
||||
@@ -25,7 +25,6 @@ container_list() {
|
||||
if [ $MANAGERCHECK == 'so-import' ]; then
|
||||
TRUSTED_CONTAINERS=(
|
||||
"so-elasticsearch"
|
||||
"so-idstools"
|
||||
"so-influxdb"
|
||||
"so-kibana"
|
||||
"so-kratos"
|
||||
@@ -49,7 +48,6 @@ container_list() {
|
||||
"so-elastic-fleet-package-registry"
|
||||
"so-elasticsearch"
|
||||
"so-idh"
|
||||
"so-idstools"
|
||||
"so-influxdb"
|
||||
"so-kafka"
|
||||
"so-kibana"
|
||||
@@ -62,8 +60,6 @@ container_list() {
|
||||
"so-soc"
|
||||
"so-steno"
|
||||
"so-strelka-backend"
|
||||
"so-strelka-filestream"
|
||||
"so-strelka-frontend"
|
||||
"so-strelka-manager"
|
||||
"so-suricata"
|
||||
"so-telegraf"
|
||||
@@ -71,7 +67,6 @@ container_list() {
|
||||
)
|
||||
else
|
||||
TRUSTED_CONTAINERS=(
|
||||
"so-idstools"
|
||||
"so-elasticsearch"
|
||||
"so-logstash"
|
||||
"so-nginx"
|
||||
|
||||
@@ -158,6 +158,8 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding index lifecycle policy" # false positive (elasticsearch policy names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
@@ -220,6 +222,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
|
||||
fi
|
||||
|
||||
RESULT=0
|
||||
@@ -266,6 +269,13 @@ for log_file in $(cat /tmp/log_check_files); do
|
||||
tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check
|
||||
check_for_errors
|
||||
done
|
||||
# Look for OOM specific errors in /var/log/messages which can lead to odd behavior / test failures
|
||||
if [[ -f /var/log/messages ]]; then
|
||||
status "Checking log file /var/log/messages"
|
||||
if journalctl --since "24 hours ago" | grep -iE 'out of memory|oom-kill'; then
|
||||
RESULT=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Cleanup temp files
|
||||
rm -f /tmp/log_check_files
|
||||
|
||||
53
salt/common/tools/sbin/so_logging_utils.py
Normal file
53
salt/common/tools/sbin/so_logging_utils.py
Normal file
@@ -0,0 +1,53 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
def setup_logging(logger_name, log_file_path, log_level=logging.INFO, format_str='%(asctime)s - %(levelname)s - %(message)s'):
|
||||
"""
|
||||
Sets up logging for a script.
|
||||
|
||||
Parameters:
|
||||
logger_name (str): The name of the logger.
|
||||
log_file_path (str): The file path for the log file.
|
||||
log_level (int): The logging level (e.g., logging.INFO, logging.DEBUG).
|
||||
format_str (str): The format string for log messages.
|
||||
|
||||
Returns:
|
||||
logging.Logger: Configured logger object.
|
||||
"""
|
||||
logger = logging.getLogger(logger_name)
|
||||
logger.setLevel(log_level)
|
||||
|
||||
# Create directory for log file if it doesn't exist
|
||||
log_file_dir = os.path.dirname(log_file_path)
|
||||
if log_file_dir and not os.path.exists(log_file_dir):
|
||||
try:
|
||||
os.makedirs(log_file_dir)
|
||||
except OSError as e:
|
||||
print(f"Error creating directory {log_file_dir}: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Create handlers
|
||||
c_handler = logging.StreamHandler()
|
||||
f_handler = logging.FileHandler(log_file_path)
|
||||
c_handler.setLevel(log_level)
|
||||
f_handler.setLevel(log_level)
|
||||
|
||||
# Create formatter and add it to handlers
|
||||
formatter = logging.Formatter(format_str)
|
||||
c_handler.setFormatter(formatter)
|
||||
f_handler.setFormatter(formatter)
|
||||
|
||||
# Add handlers to the logger if they are not already added
|
||||
if not logger.hasHandlers():
|
||||
logger.addHandler(c_handler)
|
||||
logger.addHandler(f_handler)
|
||||
|
||||
return logger
|
||||
@@ -173,7 +173,7 @@ for PCAP in $INPUT_FILES; do
|
||||
status "- assigning unique identifier to import: $HASH"
|
||||
|
||||
pcap_data=$(pcapinfo "${PCAP}")
|
||||
if ! echo "$pcap_data" | grep -q "First packet time:" || echo "$pcap_data" |egrep -q "Last packet time: 1970-01-01|Last packet time: n/a"; then
|
||||
if ! echo "$pcap_data" | grep -q "Earliest packet time:" || echo "$pcap_data" |egrep -q "Latest packet time: 1970-01-01|Latest packet time: n/a"; then
|
||||
status "- this PCAP file is invalid; skipping"
|
||||
INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1))
|
||||
else
|
||||
@@ -205,8 +205,8 @@ for PCAP in $INPUT_FILES; do
|
||||
HASHES="${HASHES} ${HASH}"
|
||||
fi
|
||||
|
||||
START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}')
|
||||
END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}')
|
||||
START=$(pcapinfo "${PCAP}" -a |grep "Earliest packet time:" | awk '{print $4}')
|
||||
END=$(pcapinfo "${PCAP}" -e |grep "Latest packet time:" | awk '{print $4}')
|
||||
status "- found PCAP data spanning dates $START through $END"
|
||||
|
||||
# compare $START to $START_OLDEST
|
||||
@@ -248,7 +248,7 @@ fi
|
||||
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
|
||||
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
|
||||
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
|
||||
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source.as.organization.name%20source.geo.country_name%20%7C%20groupby%20destination.as.organization.name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
|
||||
|
||||
status "Import complete!"
|
||||
status
|
||||
|
||||
@@ -0,0 +1,132 @@
|
||||
#!/opt/saltstack/salt/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) -%}
|
||||
|
||||
"""
|
||||
Script for emitting VM deployment status events to the Salt event bus.
|
||||
|
||||
This script provides functionality to emit status events for VM deployment operations,
|
||||
used by various Security Onion VM management tools.
|
||||
|
||||
Usage:
|
||||
so-salt-emit-vm-deployment-status-event -v <vm_name> -H <hypervisor> -s <status>
|
||||
|
||||
Arguments:
|
||||
-v, --vm-name Name of the VM (hostname_role)
|
||||
-H, --hypervisor Name of the hypervisor
|
||||
-s, --status Current deployment status of the VM
|
||||
|
||||
Example:
|
||||
so-salt-emit-vm-deployment-status-event -v sensor1_sensor -H hypervisor1 -s "Creating"
|
||||
"""
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import logging
|
||||
import salt.client
|
||||
from typing import Dict, Any
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def emit_event(vm_name: str, hypervisor: str, status: str) -> bool:
|
||||
"""
|
||||
Emit a VM deployment status event to the salt event bus.
|
||||
|
||||
Args:
|
||||
vm_name: Name of the VM (hostname_role)
|
||||
hypervisor: Name of the hypervisor
|
||||
status: Current deployment status of the VM
|
||||
|
||||
Returns:
|
||||
bool: True if event was sent successfully, False otherwise
|
||||
|
||||
Raises:
|
||||
ValueError: If status is not a valid deployment status
|
||||
"""
|
||||
log.info("Attempting to emit deployment event...")
|
||||
|
||||
try:
|
||||
caller = salt.client.Caller()
|
||||
event_data = {
|
||||
'vm_name': vm_name,
|
||||
'hypervisor': hypervisor,
|
||||
'status': status
|
||||
}
|
||||
|
||||
# Use consistent event tag structure
|
||||
event_tag = f'soc/dyanno/hypervisor/{status.lower()}'
|
||||
|
||||
ret = caller.cmd(
|
||||
'event.send',
|
||||
event_tag,
|
||||
event_data
|
||||
)
|
||||
|
||||
if not ret:
|
||||
log.error("Failed to emit VM deployment status event: %s", event_data)
|
||||
return False
|
||||
|
||||
log.info("Successfully emitted VM deployment status event: %s", event_data)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
log.error("Error emitting VM deployment status event: %s", str(e))
|
||||
return False
|
||||
|
||||
def parse_args():
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Emit VM deployment status events to the Salt event bus.'
|
||||
)
|
||||
parser.add_argument('-v', '--vm-name', required=True,
|
||||
help='Name of the VM (hostname_role)')
|
||||
parser.add_argument('-H', '--hypervisor', required=True,
|
||||
help='Name of the hypervisor')
|
||||
parser.add_argument('-s', '--status', required=True,
|
||||
help='Current deployment status of the VM')
|
||||
return parser.parse_args()
|
||||
|
||||
def main():
|
||||
"""Main entry point for the script."""
|
||||
try:
|
||||
args = parse_args()
|
||||
|
||||
success = emit_event(
|
||||
vm_name=args.vm_name,
|
||||
hypervisor=args.hypervisor,
|
||||
status=args.status
|
||||
)
|
||||
|
||||
if not success:
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
log.error("Failed to emit status event: %s", str(e))
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
{%- else -%}
|
||||
|
||||
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||
for more information about purchasing a license to enable this feature."
|
||||
|
||||
{% endif -%}
|
||||
@@ -24,11 +24,6 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-idstools':
|
||||
final_octet: 25
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-influxdb':
|
||||
final_octet: 26
|
||||
port_bindings:
|
||||
|
||||
@@ -41,7 +41,6 @@ docker:
|
||||
forcedType: "[]string"
|
||||
so-elastic-fleet: *dockerOptions
|
||||
so-elasticsearch: *dockerOptions
|
||||
so-idstools: *dockerOptions
|
||||
so-influxdb: *dockerOptions
|
||||
so-kibana: *dockerOptions
|
||||
so-kratos: *dockerOptions
|
||||
@@ -102,4 +101,4 @@ docker:
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
so-zeek: *dockerOptions
|
||||
so-kafka: *dockerOptions
|
||||
so-kafka: *dockerOptions
|
||||
|
||||
@@ -9,3 +9,6 @@ fleetartifactdir:
|
||||
- user: 947
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
|
||||
@@ -9,6 +9,9 @@
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{% set node_data = salt['pillar.get']('node_data') %}
|
||||
|
||||
include:
|
||||
- elasticfleet.artifact_registry
|
||||
|
||||
# Add EA Group
|
||||
elasticfleetgroup:
|
||||
group.present:
|
||||
@@ -166,7 +169,7 @@ eaoptionalintegrationsdir:
|
||||
|
||||
{% for minion in node_data %}
|
||||
{% set role = node_data[minion]["role"] %}
|
||||
{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %}
|
||||
{% if role in [ "eval","fleet","heavynode","import","manager", "managerhype", "managersearch","standalone" ] %}
|
||||
{% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %}
|
||||
{% set integration_keys = optional_integrations.keys() %}
|
||||
fleet_server_integrations_{{ minion }}:
|
||||
|
||||
@@ -15,7 +15,6 @@ elasticfleet:
|
||||
logging:
|
||||
zeek:
|
||||
excluded:
|
||||
- analyzer
|
||||
- broker
|
||||
- capture_loss
|
||||
- cluster
|
||||
@@ -38,6 +37,7 @@ elasticfleet:
|
||||
- elasticsearch
|
||||
- endpoint
|
||||
- fleet_server
|
||||
- filestream
|
||||
- http_endpoint
|
||||
- httpjson
|
||||
- log
|
||||
|
||||
@@ -32,6 +32,16 @@ so-elastic-fleet-auto-configure-logstash-outputs:
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
|
||||
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
||||
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-outputs-update --force --certs
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
- onchanges:
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
{% endif %}
|
||||
|
||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||
@@ -67,6 +77,8 @@ so-elastic-fleet-auto-configure-artifact-urls:
|
||||
elasticagent_syncartifacts:
|
||||
file.recurse:
|
||||
- name: /nsm/elastic-fleet/artifacts/beats
|
||||
- user: 947
|
||||
- group: 947
|
||||
- source: salt://beats
|
||||
{% endif %}
|
||||
|
||||
@@ -133,12 +145,18 @@ so-elastic-fleet-package-statefile:
|
||||
so-elastic-fleet-package-upgrade:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-package-upgrade
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
- onchanges:
|
||||
- file: /opt/so/state/elastic_fleet_packages.txt
|
||||
|
||||
so-elastic-fleet-integrations:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
|
||||
so-elastic-agent-grid-upgrade:
|
||||
cmd.run:
|
||||
@@ -150,7 +168,11 @@ so-elastic-agent-grid-upgrade:
|
||||
so-elastic-fleet-integration-upgrade:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
|
||||
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
|
||||
so-elastic-fleet-addon-integrations:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
|
||||
|
||||
@@ -1,32 +1,33 @@
|
||||
{
|
||||
"name": "elastic-defend-endpoints",
|
||||
"namespace": "default",
|
||||
"description": "",
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.17.0",
|
||||
"requires_root": true
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_id": "endpoints-initial",
|
||||
"vars": {},
|
||||
"inputs": [
|
||||
{
|
||||
"type": "endpoint",
|
||||
"enabled": true,
|
||||
"config": {
|
||||
"integration_config": {
|
||||
"value": {
|
||||
"type": "endpoint",
|
||||
"endpointConfig": {
|
||||
"preset": "DataCollection"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"streams": []
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
"name": "elastic-defend-endpoints",
|
||||
"namespace": "default",
|
||||
"description": "",
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.18.1",
|
||||
"requires_root": true
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_ids": [
|
||||
"endpoints-initial"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": [
|
||||
{
|
||||
"type": "ENDPOINT_INTEGRATION_CONFIG",
|
||||
"enabled": true,
|
||||
"config": {
|
||||
"_config": {
|
||||
"value": {
|
||||
"type": "endpoint",
|
||||
"endpointConfig": {
|
||||
"preset": "DataCollection"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"streams": []
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "agent-monitor",
|
||||
"namespace": "",
|
||||
"description": "",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"output_id": null,
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/agents/agent-monitor.log"
|
||||
],
|
||||
"data_stream.dataset": "agentmonitor",
|
||||
"pipeline": "elasticagent.monitor",
|
||||
"parsers": "",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: gridmetrics",
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": true,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": 64,
|
||||
"file_identity_native": false,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -40,7 +40,7 @@
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/elasticsearch/*.log"
|
||||
"/opt/so/log/elasticsearch/*.json"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
],
|
||||
"data_stream.dataset": "idh",
|
||||
"tags": [],
|
||||
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
||||
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
||||
"custom": "pipeline: common"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"custom": "",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.67.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-2.5.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.67.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.67.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-2.5.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"tags": [
|
||||
"import"
|
||||
]
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"tcp-tcp": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"tcp.generic": {
|
||||
"tcp.tcp": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"listen_address": "0.0.0.0",
|
||||
@@ -23,7 +23,8 @@
|
||||
"syslog"
|
||||
],
|
||||
"syslog_options": "field: message\n#format: auto\n#timezone: Local",
|
||||
"ssl": ""
|
||||
"ssl": "",
|
||||
"custom": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,26 +2,30 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
|
||||
{%- set GRIDNODETOKENGENERAL = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
|
||||
{%- set GRIDNODETOKENHEAVY = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
|
||||
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
|
||||
{% if grains.role == 'so-heavynode' %}
|
||||
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
|
||||
{% endif %}
|
||||
|
||||
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
|
||||
{% if not AGENT_STATUS %}
|
||||
|
||||
{% if grains.role not in ['so-heavynode'] %}
|
||||
run_installer:
|
||||
cmd.script:
|
||||
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- cwd: /opt/so
|
||||
- args: -token={{ GRIDNODETOKENGENERAL }}
|
||||
- retry: True
|
||||
{% else %}
|
||||
run_installer:
|
||||
cmd.script:
|
||||
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- cwd: /opt/so
|
||||
- args: -token={{ GRIDNODETOKENHEAVY }}
|
||||
- retry: True
|
||||
{% endif %}
|
||||
pull_agent_installer:
|
||||
file.managed:
|
||||
- name: /opt/so/so-elastic-agent_linux_amd64
|
||||
- source: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
run_installer:
|
||||
cmd.run:
|
||||
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }}
|
||||
- cwd: /opt/so
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 20
|
||||
|
||||
cleanup_agent_installer:
|
||||
file.absent:
|
||||
- name: /opt/so/so-elastic-agent_linux_amd64
|
||||
{% endif %}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
|
||||
{% import_json '/opt/so/state/esfleet_package_components.json' as ADDON_PACKAGE_COMPONENTS %}
|
||||
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
|
||||
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
|
||||
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
|
||||
@@ -14,6 +15,7 @@
|
||||
'awsfirehose.logs': 'awsfirehose',
|
||||
'awsfirehose.metrics': 'aws.cloudwatch',
|
||||
'cribl.logs': 'cribl',
|
||||
'cribl.metrics': 'cribl',
|
||||
'sentinel_one_cloud_funnel.logins': 'sentinel_one_cloud_funnel.login',
|
||||
'azure_application_insights.app_insights': 'azure.app_insights',
|
||||
'azure_application_insights.app_state': 'azure.app_state',
|
||||
@@ -45,7 +47,10 @@
|
||||
'synthetics.browser_screenshot': 'synthetics-browser.screenshot',
|
||||
'synthetics.http': 'synthetics-http',
|
||||
'synthetics.icmp': 'synthetics-icmp',
|
||||
'synthetics.tcp': 'synthetics-tcp'
|
||||
'synthetics.tcp': 'synthetics-tcp',
|
||||
'swimlane.swimlane_api': 'swimlane.api',
|
||||
'swimlane.tenant_api': 'swimlane.tenant',
|
||||
'swimlane.turbine_api': 'turbine.api'
|
||||
} %}
|
||||
|
||||
{% for pkg in ADDON_PACKAGE_COMPONENTS %}
|
||||
@@ -62,70 +67,90 @@
|
||||
{% else %}
|
||||
{% set integration_type = "" %}
|
||||
{% endif %}
|
||||
{% set component_name = pkg.name ~ "." ~ pattern.title %}
|
||||
{# fix weirdly named components #}
|
||||
{% if component_name in WEIRD_INTEGRATIONS %}
|
||||
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
||||
{% endif %}
|
||||
{% set component_name = pkg.name ~ "." ~ pattern.title %}
|
||||
{% set index_pattern = pattern.name %}
|
||||
|
||||
{# fix weirdly named components #}
|
||||
{% if component_name in WEIRD_INTEGRATIONS %}
|
||||
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
||||
{% endif %}
|
||||
|
||||
{# create duplicate of component_name, so we can split generics from @custom component templates in the index template below and overwrite the default @package when needed
|
||||
eg. having to replace unifiedlogs.generic@package with filestream.generic@package, but keep the ability to customize unifiedlogs.generic@custom and its ILM policy #}
|
||||
{% set custom_component_name = component_name %}
|
||||
|
||||
{# duplicate integration_type to assist with sometimes needing to overwrite component templates with 'logs-filestream.generic@package' (there is no metrics-filestream.generic@package) #}
|
||||
{% set generic_integration_type = integration_type %}
|
||||
|
||||
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
||||
{% set component_name_x = component_name.replace(".","_x_") %}
|
||||
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
||||
{% set integration_key = "so-" ~ integration_type ~ component_name_x %}
|
||||
|
||||
{# if its a .generic template make sure that a .generic@package for the integration exists. Else default to logs-filestream.generic@package #}
|
||||
{% if ".generic" in component_name and integration_type ~ component_name ~ "@package" not in INSTALLED_COMPONENT_TEMPLATES %}
|
||||
{# these generic templates by default are directed to index_pattern of 'logs-generic-*', overwrite that here to point to eg gcp_pubsub.generic-* #}
|
||||
{% set index_pattern = integration_type ~ component_name ~ "-*" %}
|
||||
{# includes use of .generic component template, but it doesn't exist in installed component templates. Redirect it to filestream.generic@package #}
|
||||
{% set component_name = "filestream.generic" %}
|
||||
{% set generic_integration_type = "logs-" %}
|
||||
{% endif %}
|
||||
|
||||
{# Default integration settings #}
|
||||
{% set integration_defaults = {
|
||||
"index_sorting": false,
|
||||
"index_template": {
|
||||
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||
"data_stream": {
|
||||
"allow_custom_routing": false,
|
||||
"hidden": false
|
||||
},
|
||||
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
|
||||
"index_patterns": [pattern.name],
|
||||
"priority": 501,
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
|
||||
"number_of_replicas": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 0}
|
||||
},
|
||||
"min_age": "60d"
|
||||
"index_sorting": false,
|
||||
"index_template": {
|
||||
"composed_of": [generic_integration_type ~ component_name ~ "@package", integration_type ~ custom_component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||
"data_stream": {
|
||||
"allow_custom_routing": false,
|
||||
"hidden": false
|
||||
},
|
||||
"ignore_missing_component_templates": [integration_type ~ custom_component_name ~ "@custom"],
|
||||
"index_patterns": [index_pattern],
|
||||
"priority": 501,
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {"name": "so-" ~ integration_type ~ custom_component_name ~ "-logs"},
|
||||
"number_of_replicas": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 0}
|
||||
},
|
||||
"min_age": "60d"
|
||||
},
|
||||
"delete": {
|
||||
"actions": {
|
||||
"delete": {}
|
||||
},
|
||||
"min_age": "365d"
|
||||
},
|
||||
"hot": {
|
||||
"actions": {
|
||||
"rollover": {
|
||||
"max_age": "30d",
|
||||
"max_primary_shard_size": "50gb"
|
||||
},
|
||||
"set_priority": {"priority": 100}
|
||||
},
|
||||
"delete": {
|
||||
"actions": {
|
||||
"delete": {}
|
||||
},
|
||||
"min_age": "365d"
|
||||
},
|
||||
"hot": {
|
||||
"actions": {
|
||||
"rollover": {
|
||||
"max_age": "30d",
|
||||
"max_primary_shard_size": "50gb"
|
||||
},
|
||||
"set_priority": {"priority": 100}
|
||||
},
|
||||
"min_age": "0ms"
|
||||
},
|
||||
"warm": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 50}
|
||||
},
|
||||
"min_age": "30d"
|
||||
}
|
||||
}
|
||||
}
|
||||
} %}
|
||||
"min_age": "0ms"
|
||||
},
|
||||
"warm": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 50}
|
||||
},
|
||||
"min_age": "30d"
|
||||
}
|
||||
}
|
||||
}
|
||||
} %}
|
||||
|
||||
{% do ADDON_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
@@ -23,6 +23,13 @@ fi
|
||||
# Define a banner to separate sections
|
||||
banner="========================================================================="
|
||||
|
||||
fleet_api() {
|
||||
local QUERYPATH=$1
|
||||
shift
|
||||
|
||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --retry-delay 10 --fail 2>/dev/null
|
||||
}
|
||||
|
||||
elastic_fleet_integration_check() {
|
||||
|
||||
AGENT_POLICY=$1
|
||||
@@ -39,7 +46,9 @@ elastic_fleet_integration_create() {
|
||||
|
||||
JSON_STRING=$1
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +65,10 @@ elastic_fleet_integration_remove() {
|
||||
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
||||
)
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies/delete" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo "Error: Unable to delete '$NAME' from '$AGENT_POLICY'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_update() {
|
||||
@@ -65,7 +77,9 @@ elastic_fleet_integration_update() {
|
||||
|
||||
JSON_STRING=$2
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPUT -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_upgrade() {
|
||||
@@ -77,101 +91,116 @@ elastic_fleet_integration_policy_upgrade() {
|
||||
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
||||
)
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies/upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
elastic_fleet_package_version_check() {
|
||||
PACKAGE=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version'
|
||||
|
||||
if output=$(fleet_api "epm/packages/$PACKAGE"); then
|
||||
echo "$output" | jq -r '.item.version'
|
||||
else
|
||||
echo "Error: Failed to get current package version for '$PACKAGE'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_package_latest_version_check() {
|
||||
PACKAGE=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.latestVersion'
|
||||
if output=$(fleet_api "epm/packages/$PACKAGE"); then
|
||||
if version=$(jq -e -r '.item.latestVersion' <<< $output); then
|
||||
echo "$version"
|
||||
fi
|
||||
else
|
||||
echo "Error: Failed to get latest version for '$PACKAGE'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_package_install() {
|
||||
PKG=$1
|
||||
VERSION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}' "localhost:5601/api/fleet/epm/packages/$PKG/$VERSION"
|
||||
if ! fleet_api "epm/packages/$PKG/$VERSION" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}'; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_bulk_package_install() {
|
||||
BULK_PKG_LIST=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$1 "localhost:5601/api/fleet/epm/packages/_bulk"
|
||||
}
|
||||
|
||||
elastic_fleet_package_is_installed() {
|
||||
PACKAGE=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status'
|
||||
}
|
||||
|
||||
elastic_fleet_installed_packages() {
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' -H 'Content-Type: application/json' "localhost:5601/api/fleet/epm/packages/installed?perPage=500"
|
||||
}
|
||||
|
||||
elastic_fleet_agent_policy_ids() {
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].id
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
if ! fleet_api "epm/packages/_bulk" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$BULK_PKG_LIST; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_agent_policy_names() {
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].name
|
||||
if [ $? -ne 0 ]; then
|
||||
elastic_fleet_installed_packages() {
|
||||
if ! fleet_api "epm/packages/installed?perPage=500"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_agent_policy_ids() {
|
||||
if output=$(fleet_api "agent_policies"); then
|
||||
echo "$output" | jq -r .items[].id
|
||||
else
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_names() {
|
||||
AGENT_POLICY=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r .item.package_policies[].name
|
||||
if [ $? -ne 0 ]; then
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
echo "$output" | jq -r .item.package_policies[].name
|
||||
else
|
||||
echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_package_name() {
|
||||
AGENT_POLICY=$1
|
||||
INTEGRATION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
|
||||
if [ $? -ne 0 ]; then
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
|
||||
else
|
||||
echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_package_version() {
|
||||
AGENT_POLICY=$1
|
||||
INTEGRATION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version'
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve package version for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||
exit 1
|
||||
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< "$output"); then
|
||||
echo "$version"
|
||||
fi
|
||||
else
|
||||
echo "Error: Failed to retrieve integration version for '$INTEGRATION' in policy '$AGENT_POLICY'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_id() {
|
||||
AGENT_POLICY=$1
|
||||
INTEGRATION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
|
||||
if [ $? -ne 0 ]; then
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
|
||||
else
|
||||
echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_dryrun_upgrade() {
|
||||
INTEGRATION_ID=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -L -X POST "localhost:5601/api/fleet/package_policies/upgrade/dryrun" -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"
|
||||
if [ $? -ne 0 ]; then
|
||||
if ! fleet_api "package_policies/upgrade/dryrun" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -XPOST -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"; then
|
||||
echo "Error: Failed to complete dry run for '$INTEGRATION_ID'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -180,25 +209,18 @@ elastic_fleet_policy_create() {
|
||||
NAME=$1
|
||||
DESC=$2
|
||||
FLEETSERVER=$3
|
||||
TIMEOUT=$4
|
||||
TIMEOUT=$4
|
||||
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg NAME "$NAME" \
|
||||
--arg DESC "$DESC" \
|
||||
--arg TIMEOUT $TIMEOUT \
|
||||
--arg FLEETSERVER "$FLEETSERVER" \
|
||||
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
|
||||
)
|
||||
--arg NAME "$NAME" \
|
||||
--arg DESC "$DESC" \
|
||||
--arg TIMEOUT $TIMEOUT \
|
||||
--arg FLEETSERVER "$FLEETSERVER" \
|
||||
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
|
||||
)
|
||||
# Create Fleet Policy
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "agent_policies" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
elastic_fleet_policy_update() {
|
||||
|
||||
POLICYID=$1
|
||||
JSON_STRING=$2
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
ERROR=false
|
||||
# Manage Elastic Defend Integration for Initial Endpoints Policy
|
||||
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
|
||||
do
|
||||
@@ -15,9 +16,20 @@ do
|
||||
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
|
||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
||||
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
|
||||
echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}"
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ "$ERROR" == "true" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
@@ -25,5 +25,9 @@ for POLICYNAME in $POLICY; do
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Now update the integration policy using the modified JSON
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"; then
|
||||
# exit 1 on failure to update fleet integration policies, let salt handle retries
|
||||
echo "Failed to update $POLICYNAME.."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
@@ -13,11 +13,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
/usr/sbin/so-elastic-fleet-package-upgrade
|
||||
|
||||
# Second, update Fleet Server policies
|
||||
/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
|
||||
|
||||
# Third, configure Elastic Defend Integration seperately
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
||||
|
||||
# Initial Endpoints
|
||||
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
|
||||
do
|
||||
@@ -25,10 +24,18 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -39,10 +46,18 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ "$RETURN_CODE" != "1" ]]; then
|
||||
@@ -56,11 +71,19 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
if [ "$NAME" != "elasticsearch-logs" ]; then
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
@@ -77,11 +100,19 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
if [ "$NAME" != "elasticsearch-logs" ]; then
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -24,23 +24,39 @@ fi
|
||||
|
||||
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
|
||||
|
||||
ERROR=false
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
|
||||
# this script upgrades default integration packages, exit 1 and let salt handle retrying
|
||||
exit 1
|
||||
fi
|
||||
for INTEGRATION in $integrations; do
|
||||
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
|
||||
# Get package name so we know what package to look for when checking the current and latest available version
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then
|
||||
exit 1
|
||||
fi
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||
{%- endif %}
|
||||
# Get currently installed version of package
|
||||
PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
# Get latest available version of package
|
||||
AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME")
|
||||
attempt=0
|
||||
max_attempts=3
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
if PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION") && AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME"); then
|
||||
break
|
||||
fi
|
||||
attempt=$((attempt + 1))
|
||||
done
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo "Error: Failed getting $PACKAGE_VERSION or $AVAILABLE_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get integration ID
|
||||
INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
|
||||
if ! INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION"); then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
|
||||
# Dry run of the upgrade
|
||||
@@ -48,20 +64,23 @@ for AGENT_POLICY in $agent_policies; do
|
||||
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
|
||||
echo "Upgrading $INTEGRATION..."
|
||||
echo "Starting dry run..."
|
||||
DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
|
||||
if ! DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID"); then
|
||||
exit 1
|
||||
fi
|
||||
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
|
||||
|
||||
# If no errors with dry run, proceed with actual upgrade
|
||||
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
|
||||
echo "No errors detected. Proceeding with upgrade..."
|
||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
||||
if [ $? -ne 0 ]; then
|
||||
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
|
||||
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
|
||||
exit 1
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
else
|
||||
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
|
||||
exit 1
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
@@ -70,4 +89,7 @@ for AGENT_POLICY in $agent_policies; do
|
||||
fi
|
||||
done
|
||||
done
|
||||
if [[ "$ERROR" == "true" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
|
||||
@@ -19,6 +19,7 @@ BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json
|
||||
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
|
||||
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
|
||||
PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
|
||||
COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json
|
||||
|
||||
PENDING_UPDATE=false
|
||||
|
||||
@@ -61,9 +62,17 @@ default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.l
|
||||
in_use_integrations=()
|
||||
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
|
||||
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
|
||||
# skip the agent policy if we can't get required info, let salt retry. Integrations loaded by this script are non-default integrations.
|
||||
echo "Skipping $AGENT_POLICY.. "
|
||||
continue
|
||||
fi
|
||||
for INTEGRATION in $integrations; do
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then
|
||||
echo "Not adding $INTEGRATION, couldn't get package name"
|
||||
continue
|
||||
fi
|
||||
# non-default integrations that are in-use in any policy
|
||||
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||
in_use_integrations+=("$PACKAGE_NAME")
|
||||
@@ -147,14 +156,38 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
done <<< "$(jq -c '.packages[]' "$INSTALLED_PACKAGE_LIST")"
|
||||
|
||||
if [ "$PENDING_UPDATE" = true ]; then
|
||||
# Run bulk install of packages
|
||||
elastic_fleet_bulk_package_install $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_OUTPUT
|
||||
# Run chunked install of packages
|
||||
echo "" > $BULK_INSTALL_OUTPUT
|
||||
pkg_group=1
|
||||
pkg_filename="${BULK_INSTALL_PACKAGE_LIST%.json}"
|
||||
|
||||
jq -c '.packages | _nwise(25)' $BULK_INSTALL_PACKAGE_LIST | while read -r line; do
|
||||
echo "$line" | jq '{ "packages": . }' > "${pkg_filename}_${pkg_group}.json"
|
||||
pkg_group=$((pkg_group + 1))
|
||||
done
|
||||
|
||||
for file in "${pkg_filename}_"*.json; do
|
||||
[ -e "$file" ] || continue
|
||||
if ! elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT; then
|
||||
# integrations loaded my this script are non-essential and shouldn't cause exit, skip them for now next highstate run can retry
|
||||
echo "Failed to complete a chunk of bulk package installs -- $file "
|
||||
continue
|
||||
fi
|
||||
done
|
||||
# cleanup any temp files for chunked package install
|
||||
rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST
|
||||
else
|
||||
echo "Elastic integrations don't appear to need installation/updating..."
|
||||
fi
|
||||
# Write out file for generating index/component/ilm templates
|
||||
latest_installed_package_list=$(elastic_fleet_installed_packages)
|
||||
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
||||
if latest_installed_package_list=$(elastic_fleet_installed_packages); then
|
||||
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
||||
fi
|
||||
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
|
||||
# Refresh installed component template list
|
||||
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
|
||||
echo $latest_component_templates_list > $COMPONENT_TEMPLATES
|
||||
fi
|
||||
|
||||
else
|
||||
# This is the installation of add-on integrations and upgrade of existing integrations. Exiting without error, next highstate will attempt to re-run.
|
||||
|
||||
@@ -8,6 +8,27 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
FORCE_UPDATE=false
|
||||
UPDATE_CERTS=false
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-f|--force)
|
||||
FORCE_UPDATE=true
|
||||
shift
|
||||
;;
|
||||
-c| --certs)
|
||||
UPDATE_CERTS=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option $1"
|
||||
echo "Usage: $0 [-f|--force] [-c|--certs]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Only run on Managers
|
||||
if ! is_manager_node; then
|
||||
printf "Not a Manager Node... Exiting"
|
||||
@@ -15,22 +36,74 @@ if ! is_manager_node; then
|
||||
fi
|
||||
|
||||
function update_logstash_outputs() {
|
||||
# Generate updated JSON payload
|
||||
JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
|
||||
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl')
|
||||
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
|
||||
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
|
||||
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Reuse existing secret
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
# Update certs, creating new secret
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}')
|
||||
fi
|
||||
else
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Reuse existing ssl config
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
|
||||
else
|
||||
# Update ssl config
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}')
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Update Logstash Outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
}
|
||||
function update_kafka_outputs() {
|
||||
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
|
||||
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
|
||||
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
|
||||
if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
# Update policy when fleet has secrets enabled
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
# Update policy when fleet has secrets disabled or policy hasn't been force updated
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
fi
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
else
|
||||
printf "Failed to get current Kafka output policy..."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||
@@ -124,7 +197,7 @@ NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "$
|
||||
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||
|
||||
# Compare the current & new list of outputs - if different, update the Logstash outputs
|
||||
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
||||
if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then
|
||||
printf "\nHashes match - no update needed.\n"
|
||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||
|
||||
|
||||
@@ -10,8 +10,16 @@
|
||||
|
||||
{%- for PACKAGE in SUPPORTED_PACKAGES %}
|
||||
echo "Setting up {{ PACKAGE }} package..."
|
||||
VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}")
|
||||
elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
|
||||
if VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}"); then
|
||||
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then
|
||||
# packages loaded by this script should never fail to install and REQUIRED before an installation of SO can be considered successful
|
||||
echo -e "\nERROR: Failed to install default integration package -- $PACKAGE $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
{%- endfor %}
|
||||
echo
|
||||
|
||||
@@ -10,8 +10,15 @@
|
||||
|
||||
{%- for PACKAGE in SUPPORTED_PACKAGES %}
|
||||
echo "Upgrading {{ PACKAGE }} package..."
|
||||
VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}")
|
||||
elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
|
||||
if VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}"); then
|
||||
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then
|
||||
# exit 1 on failure to upgrade a default package, allow salt to handle retries
|
||||
echo -e "\nERROR: Failed to upgrade $PACKAGE to version: $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
|
||||
fi
|
||||
echo
|
||||
{%- endfor %}
|
||||
echo
|
||||
|
||||
@@ -23,18 +23,17 @@ if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ALIASES=".fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest"
|
||||
for ALIAS in ${ALIASES}
|
||||
do
|
||||
ALIASES=(.fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest)
|
||||
for ALIAS in "${ALIASES[@]}"; do
|
||||
# Get all concrete indices from alias
|
||||
INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]')
|
||||
|
||||
# Delete all resolved indices
|
||||
for INDX in ${INDXS}
|
||||
do
|
||||
if INDXS_RAW=$(curl -sK /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" --fail 2>/dev/null); then
|
||||
INDXS=$(echo "$INDXS_RAW" | jq -r '.aliases[].indices[]')
|
||||
# Delete all resolved indices
|
||||
for INDX in ${INDXS}; do
|
||||
status "Deleting $INDX"
|
||||
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
|
||||
done
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
# Restarting Kibana...
|
||||
@@ -51,22 +50,61 @@ if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
fi
|
||||
|
||||
printf "\n### Create ES Token ###\n"
|
||||
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
|
||||
if ESTOKEN_RAW=$(fleet_api "service_tokens" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
ESTOKEN=$(echo "$ESTOKEN_RAW" | jq -r .value)
|
||||
else
|
||||
echo -e "\nFailed to create ES token..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
### Create Outputs, Fleet Policy and Fleet URLs ###
|
||||
# Create the Manager Elasticsearch Output first and set it as the default output
|
||||
printf "\nAdd Manager Elasticsearch Output...\n"
|
||||
ESCACRT=$(openssl x509 -in $INTCA)
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg ESCACRT "$ESCACRT" \
|
||||
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
ESCACRT=$(openssl x509 -in "$INTCA" -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]')
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg ESCACRT "$ESCACRT" \
|
||||
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ca_trusted_fingerprint": $ESCACRT}')
|
||||
|
||||
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to create so-elasticsearch_manager policy..."
|
||||
exit 1
|
||||
fi
|
||||
printf "\n\n"
|
||||
|
||||
# so-manager_elasticsearch should exist and be disabled. Now update it before checking its the only default policy
|
||||
MANAGER_OUTPUT_ENABLED=$(echo "$JSON_STRING" | jq 'del(.id) | .is_default = true | .is_default_monitoring = true')
|
||||
if ! curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$MANAGER_OUTPUT_ENABLED"; then
|
||||
echo -e "\n failed to update so-manager_elasticsearch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# At this point there should only be two policies. fleet-default-output & so-manager_elasticsearch
|
||||
status "Verifying so-manager_elasticsearch policy is configured as the current default"
|
||||
|
||||
# Grab the fleet-default-output policy instead of so-manager_elasticsearch, because a weird state can exist where both fleet-default-output & so-elasticsearch_manager can be set as the active default output for logs / metrics. Resulting in logs not ingesting on import/eval nodes
|
||||
if DEFAULTPOLICY=$(fleet_api "outputs/fleet-default-output"); then
|
||||
fleet_default=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default')
|
||||
fleet_default_monitoring=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default_monitoring')
|
||||
# Check that fleet-default-output isn't configured as a default for anything ( both variables return false )
|
||||
if [[ $fleet_default == "false" ]] && [[ $fleet_default_monitoring == "false" ]]; then
|
||||
echo -e "\nso-manager_elasticsearch is configured as the current default policy..."
|
||||
else
|
||||
echo -e "\nVerification of so-manager_elasticsearch policy failed... The default 'fleet-default-output' output is still active..."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# fleet-output-policy is created automatically by fleet when started. Should always exist on any installation type
|
||||
echo -e "\nDefault fleet-default-output policy doesn't exist...\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the Manager Fleet Server Host Agent Policy
|
||||
# This has to be done while the Elasticsearch Output is set to the default Output
|
||||
printf "Create Manager Fleet Server Policy...\n"
|
||||
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
|
||||
if ! elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"; then
|
||||
echo -e "\n Failed to create Manager fleet server policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Modify the default integration policy to update the policy_id with the correct naming
|
||||
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
|
||||
@@ -74,7 +112,10 @@ UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Add the Fleet Server Integration to the new Fleet Policy
|
||||
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
|
||||
if ! elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"; then
|
||||
echo -e "\nFailed to create Fleet server integration for Manager.."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Now we can create the Logstash Output and set it to to be the default Output
|
||||
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
|
||||
@@ -86,9 +127,12 @@ JSON_STRING=$( jq -n \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]},"proxy_id":null}'
|
||||
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }},"proxy_id":null}'
|
||||
)
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to create logstash fleet output"
|
||||
exit 1
|
||||
fi
|
||||
printf "\n\n"
|
||||
{%- endif %}
|
||||
|
||||
@@ -106,7 +150,10 @@ else
|
||||
fi
|
||||
|
||||
## This array replaces whatever URLs are currently configured
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/fleet_server_hosts" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "fleet_server_hosts" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to add manager fleet URL"
|
||||
exit 1
|
||||
fi
|
||||
printf "\n\n"
|
||||
|
||||
### Create Policies & Associated Integration Configuration ###
|
||||
@@ -117,13 +164,22 @@ printf "\n\n"
|
||||
/usr/sbin/so-elasticsearch-templates-load
|
||||
|
||||
# Initial Endpoints Policy
|
||||
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
|
||||
if ! elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"; then
|
||||
echo -e "\nFailed to create endpoints-initial policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Grid Nodes - General Policy
|
||||
elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"
|
||||
if ! elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"; then
|
||||
echo -e "\nFailed to create so-grid-nodes_general policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Grid Nodes - Heavy Node Policy
|
||||
elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"
|
||||
if ! elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"; then
|
||||
echo -e "\nFailed to create so-grid-nodes_heavy policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Load Integrations for default policies
|
||||
so-elastic-fleet-integration-policy-load
|
||||
@@ -135,14 +191,34 @@ JSON_STRING=$( jq -n \
|
||||
'{"name":$NAME,"host":$URL,"is_default":true}'
|
||||
)
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "agent_download_sources" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to update Elastic Agent artifact URL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
### Finalization ###
|
||||
|
||||
# Query for Enrollment Tokens for default policies
|
||||
ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||
GRIDNODESENROLLMENTOKENGENERAL=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
|
||||
GRIDNODESENROLLMENTOKENHEAVY=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
|
||||
if ENDPOINTSENROLLMENTOKEN_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
ENDPOINTSENROLLMENTOKEN=$(echo "$ENDPOINTSENROLLMENTOKEN_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||
else
|
||||
echo -e "\nFailed to query for Endpoints enrollment token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if GRIDNODESENROLLMENTOKENGENERAL_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
GRIDNODESENROLLMENTOKENGENERAL=$(echo "$GRIDNODESENROLLMENTOKENGENERAL_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
|
||||
else
|
||||
echo -e "\nFailed to query for Grid nodes - General enrollment token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if GRIDNODESENROLLMENTOKENHEAVY_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
GRIDNODESENROLLMENTOKENHEAVY=$(echo "$GRIDNODESENROLLMENTOKENHEAVY_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
|
||||
else
|
||||
echo -e "\nFailed to query for Grid nodes - Heavy enrollment token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Store needed data in minion pillar
|
||||
pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
|
||||
|
||||
@@ -5,46 +5,78 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-managerhype'] %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
force=false
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-f|--force)
|
||||
force=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option $1"
|
||||
echo "Usage: $0 [-f|--force]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check to make sure that Kibana API is up & ready
|
||||
RETURN_CODE=0
|
||||
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
||||
RETURN_CODE=$?
|
||||
|
||||
if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
|
||||
exit 1
|
||||
echo -e "\nKibana API not accessible, can't setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
|
||||
if ! echo "$output" | grep -q "so-manager_kafka"; then
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
# Create a new output policy for Kafka. Default is disabled 'is_default: false & is_default_monitoring: false'
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 10 }, "topics":[{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
|
||||
)
|
||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
|
||||
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
|
||||
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
else
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 0
|
||||
fi
|
||||
elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null) && [[ "$force" == "true" ]]; then
|
||||
# force an update to Kafka policy. Keep the current value of Kafka output policy (enabled/disabled).
|
||||
ENABLED_DISABLED=$(echo "$kafka_output" | jq -e .item.is_default)
|
||||
HOSTS=$(echo "$kafka_output" | jq -r '.item.hosts')
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
--argjson HOSTS "$HOSTS" \
|
||||
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
else
|
||||
echo -e "\nForced update to Elastic Fleet output policy for Kafka...\n"
|
||||
fi
|
||||
|
||||
elif echo "$output" | grep -q "so-manager_kafka"; then
|
||||
else
|
||||
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
|
||||
fi
|
||||
{% else %}
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %}
|
||||
{% if grains.id.split('_') | last in ['manager','managerhype','managersearch','standalone'] %}
|
||||
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
|
||||
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
elasticsearch:
|
||||
enabled: false
|
||||
version: 8.17.3
|
||||
version: 8.18.8
|
||||
index_clean: true
|
||||
config:
|
||||
action:
|
||||
@@ -284,6 +284,86 @@ elasticsearch:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-assistant-chat:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- assistant-chat-mappings
|
||||
- assistant-chat-settings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-assistant-chat*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
dynamic_templates:
|
||||
- strings_as_keyword:
|
||||
mapping:
|
||||
ignore_above: 1024
|
||||
type: keyword
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-assistant-chat-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 1s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-assistant-session:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- assistant-session-mappings
|
||||
- assistant-session-settings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-assistant-session*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
dynamic_templates:
|
||||
- strings_as_keyword:
|
||||
mapping:
|
||||
ignore_above: 1024
|
||||
type: keyword
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-assistant-session-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 1s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-endgame:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -567,6 +647,7 @@ elasticsearch:
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- winlog-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -1242,6 +1323,68 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-elastic-agent-monitor:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- event-mappings
|
||||
- so-elastic-agent-monitor
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
index_patterns:
|
||||
- logs-agentmonitor-*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-elastic-agent-monitor-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
number_of_replicas: 0
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-elastic_agent_x_apm_server:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -1848,6 +1991,70 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-elasticsearch_x_server:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- logs-elasticsearch.server@package
|
||||
- logs-elasticsearch.server@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates:
|
||||
- logs-elasticsearch.server@custom
|
||||
index_patterns:
|
||||
- logs-elasticsearch.server-*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-logs-elasticsearch.server-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
number_of_replicas: 0
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-endpoint_x_actions:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -3874,6 +4081,7 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -3987,6 +4195,7 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -4028,7 +4237,7 @@ elasticsearch:
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 1d
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
@@ -4100,6 +4309,7 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -4329,6 +4539,7 @@ elasticsearch:
|
||||
- zeek-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -4501,6 +4712,14 @@ elasticsearch:
|
||||
- data
|
||||
- remote_cluster_client
|
||||
- transform
|
||||
so-managerhype:
|
||||
config:
|
||||
node:
|
||||
roles:
|
||||
- master
|
||||
- data
|
||||
- remote_cluster_client
|
||||
- transform
|
||||
so-managersearch:
|
||||
config:
|
||||
node:
|
||||
|
||||
@@ -204,7 +204,7 @@ so-elasticsearch-roles-load:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
|
||||
{% if grains.role in ['so-managersearch', 'so-manager'] %}
|
||||
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
|
||||
{% set ap = "absent" %}
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
{
|
||||
"geoip": {
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination_geo",
|
||||
"target_field": "destination.as",
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true,
|
||||
@@ -36,13 +36,17 @@
|
||||
{
|
||||
"geoip": {
|
||||
"field": "source.ip",
|
||||
"target_field": "source_geo",
|
||||
"target_field": "source.as",
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true,
|
||||
"properties": ["ip", "asn", "organization_name", "network"]
|
||||
}
|
||||
},
|
||||
{ "rename": { "field": "destination.as.organization_name", "target_field": "destination.as.organization.name", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "rename": { "field": "source.as.organization_name", "target_field": "source.as.organization.name", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "rename": { "field": "destination.as.asn", "target_field": "destination.as.number", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "rename": { "field": "source.as.asn", "target_field": "source.as.number", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "set": { "if": "ctx.event?.severity == 1", "field": "event.severity_label", "value": "low", "override": true } },
|
||||
{ "set": { "if": "ctx.event?.severity == 2", "field": "event.severity_label", "value": "medium", "override": true } },
|
||||
{ "set": { "if": "ctx.event?.severity == 3", "field": "event.severity_label", "value": "high", "override": true } },
|
||||
|
||||
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"convert": {
|
||||
"field": "_ingest._value",
|
||||
"type": "ip",
|
||||
"target_field": "_ingest._temp_ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "temp._valid_ips",
|
||||
"allow_duplicates": false,
|
||||
"value": [
|
||||
"{{{_ingest._temp_ip}}}"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
36
salt/elasticsearch/files/ingest/elasticagent.monitor
Normal file
36
salt/elasticsearch/files/ingest/elasticagent.monitor
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "gridmetrics.agents",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.module",
|
||||
"value": "gridmetrics",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"host",
|
||||
"elastic_agent",
|
||||
"agent"
|
||||
],
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"add_to_root": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -23,8 +23,9 @@
|
||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'", "field": "event.module", "value":"elasticsearch" }},
|
||||
{"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
|
||||
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint'","description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -107,61 +107,61 @@
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-firewall",
|
||||
"name": "logs-pfsense.log-1.23.1-firewall",
|
||||
"if": "ctx.event.provider == 'filterlog'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-openvpn",
|
||||
"name": "logs-pfsense.log-1.23.1-openvpn",
|
||||
"if": "ctx.event.provider == 'openvpn'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-ipsec",
|
||||
"name": "logs-pfsense.log-1.23.1-ipsec",
|
||||
"if": "ctx.event.provider == 'charon'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-dhcp",
|
||||
"name": "logs-pfsense.log-1.23.1-dhcp",
|
||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-unbound",
|
||||
"name": "logs-pfsense.log-1.23.1-unbound",
|
||||
"if": "ctx.event.provider == 'unbound'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-haproxy",
|
||||
"name": "logs-pfsense.log-1.23.1-haproxy",
|
||||
"if": "ctx.event.provider == 'haproxy'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-php-fpm",
|
||||
"name": "logs-pfsense.log-1.23.1-php-fpm",
|
||||
"if": "ctx.event.provider == 'php-fpm'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-squid",
|
||||
"name": "logs-pfsense.log-1.23.1-squid",
|
||||
"if": "ctx.event.provider == 'squid'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-snort",
|
||||
"name": "logs-pfsense.log-1.23.1-snort",
|
||||
"if": "ctx.event.provider == 'snort'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-suricata",
|
||||
"name": "logs-pfsense.log-1.23.1-suricata",
|
||||
"if": "ctx.event.provider == 'suricata'"
|
||||
}
|
||||
},
|
||||
@@ -358,14 +358,6 @@
|
||||
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": "event.original",
|
||||
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "global@custom",
|
||||
@@ -1,15 +1,79 @@
|
||||
{
|
||||
"description" : "suricata.alert",
|
||||
"processors" : [
|
||||
{ "set": { "if": "ctx.event?.imported != true", "field": "_index", "value": "logs-suricata.alerts-so" } },
|
||||
{ "set": { "field": "tags","value": "alert" }},
|
||||
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.ref", "target_field": "rule.version", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
|
||||
{ "dissect": { "field": "rule.rule", "pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}", "ignore_missing": true, "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "common.nids" } }
|
||||
]
|
||||
"description": "suricata.alert",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"if": "ctx.event?.imported != true",
|
||||
"field": "_index",
|
||||
"value": "logs-suricata.alerts-so"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "tags",
|
||||
"value": "alert"
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.alert",
|
||||
"target_field": "rule",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.signature",
|
||||
"target_field": "rule.name",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.ref",
|
||||
"target_field": "rule.version",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.signature_id",
|
||||
"target_field": "rule.uuid",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.signature_id",
|
||||
"target_field": "rule.signature",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.payload_printable",
|
||||
"target_field": "network.data.decoded",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"dissect": {
|
||||
"field": "rule.rule",
|
||||
"pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "common.nids"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,30 +1,155 @@
|
||||
{
|
||||
"description" : "suricata.common",
|
||||
"processors" : [
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
|
||||
{ "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
|
||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
|
||||
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
|
||||
{ "date": { "field": "message2.timestamp", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "timezone": "UTC", "ignore_failure": true } },
|
||||
{ "remove":{ "field": "agent", "ignore_failure": true } },
|
||||
{"append":{"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"ignore_failure":true}},
|
||||
{
|
||||
"script": {
|
||||
"source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
|
||||
"ignore_failure": false
|
||||
}
|
||||
},
|
||||
{ "pipeline": { "if": "ctx?.event?.dataset != null", "name": "suricata.{{event.dataset}}" } }
|
||||
]
|
||||
}
|
||||
"description": "suricata.common",
|
||||
"processors": [
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"target_field": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.pkt_src",
|
||||
"target_field": "network.packet_source",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.proto",
|
||||
"target_field": "network.transport",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.in_iface",
|
||||
"target_field": "observer.ingress.interface.name",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.flow_id",
|
||||
"target_field": "log.id.uid",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.src_ip",
|
||||
"target_field": "source.ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.src_port",
|
||||
"target_field": "source.port",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dest_ip",
|
||||
"target_field": "destination.ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dest_port",
|
||||
"target_field": "destination.port",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.vlan",
|
||||
"target_field": "network.vlan.id",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.community_id",
|
||||
"target_field": "network.community_id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.xff",
|
||||
"target_field": "xff.ip",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "{{ message2.event_type }}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "observer.name",
|
||||
"value": "{{agent.name}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.ingested",
|
||||
"value": "{{@timestamp}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"field": "message2.timestamp",
|
||||
"target_field": "@timestamp",
|
||||
"formats": [
|
||||
"ISO8601",
|
||||
"UNIX"
|
||||
],
|
||||
"timezone": "UTC",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": "agent",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.ip",
|
||||
"value": [
|
||||
"{{source.ip}}",
|
||||
"{{destination.ip}}"
|
||||
],
|
||||
"allow_duplicates": false,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
|
||||
"ignore_failure": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.capture_file",
|
||||
"target_field": "suricata.capture_file",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"if": "ctx?.event?.dataset != null",
|
||||
"name": "suricata.{{event.dataset}}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,21 +1,136 @@
|
||||
{
|
||||
"description" : "suricata.dns",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.type", "target_field": "dns.query.type", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rrtype", "target_field": "dns.query.type_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.flags", "target_field": "dns.flags", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.qr", "target_field": "dns.qr", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||
{ "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
"description": "suricata.dns",
|
||||
"processors": [
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.proto",
|
||||
"target_field": "network.transport",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.app_proto",
|
||||
"target_field": "network.protocol",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.type",
|
||||
"target_field": "dns.query.type",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.tx_id",
|
||||
"target_field": "dns.tx_id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.id",
|
||||
"target_field": "dns.id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.version",
|
||||
"target_field": "dns.version",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "suricata.dnsv3",
|
||||
"ignore_missing_pipeline": true,
|
||||
"if": "ctx?.dns?.version != null && ctx?.dns?.version == 3",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rrname",
|
||||
"target_field": "dns.query.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rrtype",
|
||||
"target_field": "dns.query.type_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.flags",
|
||||
"target_field": "dns.flags",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.qr",
|
||||
"target_field": "dns.qr",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rd",
|
||||
"target_field": "dns.recursion.desired",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.ra",
|
||||
"target_field": "dns.recursion.available",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.opcode",
|
||||
"target_field": "dns.opcode",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rcode",
|
||||
"target_field": "dns.response.code_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.grouped.A",
|
||||
"target_field": "dns.answers.data",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.grouped.CNAME",
|
||||
"target_field": "dns.answers.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')",
|
||||
"name": "dns.tld"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
56
salt/elasticsearch/files/ingest/suricata.dnsv3
Normal file
56
salt/elasticsearch/files/ingest/suricata.dnsv3
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.queries",
|
||||
"target_field": "dns.queries",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.name = ctx?.dns?.queries[0].rrname;\n}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.type_name = ctx?.dns?.queries[0].rrtype;\n}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "dns.queries",
|
||||
"processor": {
|
||||
"rename": {
|
||||
"field": "_ingest._value.rrname",
|
||||
"target_field": "_ingest._value.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "dns.queries",
|
||||
"processor": {
|
||||
"rename": {
|
||||
"field": "_ingest._value.rrtype",
|
||||
"target_field": "_ingest._value.type_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "suricata.tld",
|
||||
"ignore_missing_pipeline": true,
|
||||
"if": "ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0",
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
52
salt/elasticsearch/files/ingest/suricata.tld
Normal file
52
salt/elasticsearch/files/ingest/suricata.tld
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.top_level_domain = q.name.substring(q.name.lastIndexOf('.') + 1);\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.query_without_tld = q.name.substring(0, q.name.lastIndexOf('.'));\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.parent_domain = q.query_without_tld.substring(q.query_without_tld.lastIndexOf('.') + 1);\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.subdomain = q.query_without_tld.substring(0, q.query_without_tld.lastIndexOf('.'));\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null && q.top_level_domain != null) {\n q.highest_registered_domain = q.parent_domain + \".\" + q.top_level_domain;\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.subdomain != null) {\n q.subdomain_length = q.subdomain.length();\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null) {\n q.parent_domain_length = q.parent_domain.length();\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n q.remove('query_without_tld');\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
61
salt/elasticsearch/files/ingest/zeek.analyzer
Normal file
61
salt/elasticsearch/files/ingest/zeek.analyzer
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"description": "zeek.analyzer",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "analyzer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"host"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"target_field": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.protocol",
|
||||
"copy_from": "message2.analyzer_name",
|
||||
"ignore_empty_value": true,
|
||||
"if": "ctx?.message2?.analyzer_kind == 'protocol'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.protocol",
|
||||
"ignore_empty_value": true,
|
||||
"if": "ctx?.message2?.analyzer_kind != 'protocol'",
|
||||
"copy_from": "message2.proto"
|
||||
}
|
||||
},
|
||||
{
|
||||
"lowercase": {
|
||||
"field": "network.protocol",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.failure_reason",
|
||||
"target_field": "error.reason",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "zeek.common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -24,6 +24,10 @@
|
||||
{ "rename": { "field": "message2.resp_cc", "target_field": "server.country_code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.sensorname", "target_field": "observer.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4l", "target_field": "hash.ja4l", "ignore_missing" : true, "if": "ctx.message2?.ja4l != null && ctx.message2.ja4l.length() > 0" }},
|
||||
{ "rename": { "field": "message2.ja4ls", "target_field": "hash.ja4ls", "ignore_missing" : true, "if": "ctx.message2?.ja4ls != null && ctx.message2.ja4ls.length() > 0" }},
|
||||
{ "rename": { "field": "message2.ja4t", "target_field": "hash.ja4t", "ignore_missing" : true, "if": "ctx.message2?.ja4t != null && ctx.message2.ja4t.length() > 0" }},
|
||||
{ "rename": { "field": "message2.ja4ts", "target_field": "hash.ja4ts", "ignore_missing" : true, "if": "ctx.message2?.ja4ts != null && ctx.message2.ja4ts.length() > 0" }},
|
||||
{ "script": { "lang": "painless", "source": "ctx.network.bytes = (ctx.client.bytes + ctx.server.bytes)", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.connection?.state == 'S0'", "field": "connection.state_description", "value": "Connection attempt seen, no reply" } },
|
||||
{ "set": { "if": "ctx.connection?.state == 'S1'", "field": "connection.state_description", "value": "Connection established, not terminated" } },
|
||||
|
||||
@@ -1,32 +1,227 @@
|
||||
{
|
||||
"description" : "zeek.dns",
|
||||
"processors" : [
|
||||
{ "set": { "field": "event.dataset", "value": "dns" } },
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.trans_id", "target_field": "dns.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rtt", "target_field": "event.duration", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.query", "target_field": "dns.query.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qclass", "target_field": "dns.query.class", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qclass_name", "target_field": "dns.query.class_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qtype", "target_field": "dns.query.type", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qtype_name", "target_field": "dns.query.type_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rcode", "target_field": "dns.response.code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rcode_name", "target_field": "dns.response.code_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.AA", "target_field": "dns.authoritative", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.TC", "target_field": "dns.truncated", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.RD", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||
{ "script": { "lang": "painless", "if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null", "source": "def ips = []; for (item in ctx.dns.answers.name) { if (item =~ /^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$/ || item =~ /^([a-fA-F0-9:]+:+)+[a-fA-F0-9]+$/) { ips.add(item); } } ctx.dns.resolved_ip = ips;" } },
|
||||
{ "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx._index == 'so-zeek'", "field": "_index", "value": "so-zeek_dns", "override": true } },
|
||||
{ "pipeline": { "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
||||
{ "pipeline": { "name": "zeek.common" } }
|
||||
]
|
||||
"description": "zeek.dns",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "dns"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"host"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"target_field": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"dot_expander": {
|
||||
"field": "id.orig_h",
|
||||
"path": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.proto",
|
||||
"target_field": "network.transport",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.trans_id",
|
||||
"target_field": "dns.id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rtt",
|
||||
"target_field": "event.duration",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.query",
|
||||
"target_field": "dns.query.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qclass",
|
||||
"target_field": "dns.query.class",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qclass_name",
|
||||
"target_field": "dns.query.class_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qtype",
|
||||
"target_field": "dns.query.type",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qtype_name",
|
||||
"target_field": "dns.query.type_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rcode",
|
||||
"target_field": "dns.response.code",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rcode_name",
|
||||
"target_field": "dns.response.code_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.AA",
|
||||
"target_field": "dns.authoritative",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.TC",
|
||||
"target_field": "dns.truncated",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.RD",
|
||||
"target_field": "dns.recursion.desired",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.RA",
|
||||
"target_field": "dns.recursion.available",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.Z",
|
||||
"target_field": "dns.reserved",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.answers",
|
||||
"target_field": "dns.answers.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "dns.answers.name",
|
||||
"processor": {
|
||||
"pipeline": {
|
||||
"name": "common.ip_validation"
|
||||
}
|
||||
},
|
||||
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "temp._valid_ips",
|
||||
"processor": {
|
||||
"append": {
|
||||
"field": "dns.resolved_ip",
|
||||
"allow_duplicates": false,
|
||||
"value": "{{{_ingest._value}}}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"temp"
|
||||
],
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.TTLs",
|
||||
"target_field": "dns.ttls",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rejected",
|
||||
"target_field": "dns.query.rejected",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"source": "ctx.dns.query.length = ctx.dns.query.name.length()",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"if": "ctx._index == 'so-zeek'",
|
||||
"field": "_index",
|
||||
"value": "so-zeek_dns",
|
||||
"override": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')",
|
||||
"name": "dns.tld"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "zeek.common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
"description" : "zeek.dpd",
|
||||
"processors" : [
|
||||
{ "set": { "field": "event.dataset", "value": "dpd" } },
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.analyzer", "target_field": "observer.analyzer", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.failure_reason", "target_field": "error.reason", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "zeek.common" } }
|
||||
]
|
||||
}
|
||||
@@ -27,6 +27,7 @@
|
||||
{ "rename": { "field": "message2.resp_fuids", "target_field": "log.id.resp_fuids", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4h", "target_field": "hash.ja4h", "ignore_missing": true, "if": "ctx?.message2?.ja4h != null && ctx.message2.ja4h.length() > 0" } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.useragent_length = ctx.useragent.length()", "ignore_failure": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.virtual_host_length = ctx.virtual_host.length()", "ignore_failure": true } },
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
{ "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.stream_id", "target_field": "http2.stream_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4h", "target_field": "hash.ja4h", "ignore_missing": true, "if": "ctx?.message2?.ja4h != null && ctx.message2.ja4h.length() > 0" } },
|
||||
{ "remove": { "field": "message2.tags", "ignore_failure": true } },
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
|
||||
|
||||
10
salt/elasticsearch/files/ingest/zeek.ja4ssh
Normal file
10
salt/elasticsearch/files/ingest/zeek.ja4ssh
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"description": "zeek.ja4ssh",
|
||||
"processors": [
|
||||
{"set": {"field": "event.dataset","value": "ja4ssh"}},
|
||||
{"remove": {"field": "host","ignore_missing": true,"ignore_failure": true}},
|
||||
{"json": {"field": "message","target_field": "message2","ignore_failure": true}},
|
||||
{"rename": {"field": "message2.ja4ssh", "target_field": "hash.ja4ssh", "ignore_missing": true, "if": "ctx?.message2?.ja4ssh != null && ctx.message2.ja4ssh.length() > 0" }},
|
||||
{"pipeline": {"name": "zeek.common"}}
|
||||
]
|
||||
}
|
||||
@@ -23,6 +23,8 @@
|
||||
{ "rename": { "field": "message2.validation_status","target_field": "ssl.validation_status", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja3", "target_field": "hash.ja3", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja3s", "target_field": "hash.ja3s", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4", "target_field": "hash.ja4", "ignore_missing": true, "if": "ctx?.message2?.ja4 != null && ctx.message2.ja4.length() > 0" } },
|
||||
{ "rename": { "field": "message2.ja4s", "target_field": "hash.ja4s", "ignore_missing": true, "if": "ctx?.message2?.ja4s != null && ctx.message2.ja4s.length() > 0" } },
|
||||
{ "foreach":
|
||||
{
|
||||
"if": "ctx?.tls?.client?.hash?.sha256 !=null",
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
{ "dot_expander": { "field": "basic_constraints.path_length", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.basic_constraints.path_length", "target_field": "x509.basic_constraints.path_length", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.fingerprint", "target_field": "hash.sha256", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4x", "target_field": "hash.ja4x", "ignore_missing": true, "if": "ctx?.message2?.ja4x != null && ctx.message2.ja4x.length() > 0" } },
|
||||
{ "pipeline": { "name": "zeek.common_ssl" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -20,8 +20,28 @@ appender.rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.rolling.strategy.action.type = Delete
|
||||
appender.rolling.strategy.action.basepath = /var/log/elasticsearch
|
||||
appender.rolling.strategy.action.condition.type = IfFileName
|
||||
appender.rolling.strategy.action.condition.glob = *.gz
|
||||
appender.rolling.strategy.action.condition.glob = *.log.gz
|
||||
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
|
||||
appender.rolling.strategy.action.condition.nested_condition.age = 7D
|
||||
|
||||
appender.rolling_json.type = RollingFile
|
||||
appender.rolling_json.name = rolling_json
|
||||
appender.rolling_json.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.json
|
||||
appender.rolling_json.layout.type = ECSJsonLayout
|
||||
appender.rolling_json.layout.dataset = elasticsearch.server
|
||||
appender.rolling_json.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.json.gz
|
||||
appender.rolling_json.policies.type = Policies
|
||||
appender.rolling_json.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.rolling_json.policies.time.interval = 1
|
||||
appender.rolling_json.policies.time.modulate = true
|
||||
appender.rolling_json.strategy.type = DefaultRolloverStrategy
|
||||
appender.rolling_json.strategy.action.type = Delete
|
||||
appender.rolling_json.strategy.action.basepath = /var/log/elasticsearch
|
||||
appender.rolling_json.strategy.action.condition.type = IfFileName
|
||||
appender.rolling_json.strategy.action.condition.glob = *.json.gz
|
||||
appender.rolling_json.strategy.action.condition.nested_condition.type = IfLastModified
|
||||
appender.rolling_json.strategy.action.condition.nested_condition.age = 1D
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.rolling.ref = rolling
|
||||
rootLogger.appenderRef.rolling_json.ref = rolling_json
|
||||
|
||||
@@ -392,6 +392,7 @@ elasticsearch:
|
||||
so-logs-elastic_agent_x_metricbeat: *indexSettings
|
||||
so-logs-elastic_agent_x_osquerybeat: *indexSettings
|
||||
so-logs-elastic_agent_x_packetbeat: *indexSettings
|
||||
so-logs-elasticsearch_x_server: *indexSettings
|
||||
so-metrics-endpoint_x_metadata: *indexSettings
|
||||
so-metrics-endpoint_x_metrics: *indexSettings
|
||||
so-metrics-endpoint_x_policy: *indexSettings
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
|
||||
|
||||
{# start generation of integration default index_settings #}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') %}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') and salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %}
|
||||
{% set check_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %}
|
||||
{% if check_package_components.size > 1 %}
|
||||
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
|
||||
|
||||
69
salt/elasticsearch/templates/component/ecs/hash.json
Normal file
69
salt/elasticsearch/templates/component/ecs/hash.json
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"hash": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ja3": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja3s": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"hassh": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"md5": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"sha1": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"sha256": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4l": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4ls": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4t": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4ts": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4ssh": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4h": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4x": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -841,6 +841,10 @@
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
},
|
||||
"capture_file": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"agent": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"hostname": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"last_checkin_status": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"last_checkin": {
|
||||
"type": "date"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"offline_duration_hours": {
|
||||
"type": "integer"
|
||||
},
|
||||
"policy_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"status": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,104 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"so_kind": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"so_operation": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"so_chat": {
|
||||
"properties": {
|
||||
"role": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"content": {
|
||||
"type": "object",
|
||||
"enabled": false
|
||||
},
|
||||
"sessionId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"createTime": {
|
||||
"type": "date"
|
||||
},
|
||||
"deletedAt": {
|
||||
"type": "date"
|
||||
},
|
||||
"tags": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"tool_use_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"userId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"message": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"role": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"model": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"contentStr": {
|
||||
"type": "text"
|
||||
},
|
||||
"contentBlocks": {
|
||||
"type": "nested",
|
||||
"enabled": false
|
||||
},
|
||||
"stopReason": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"stopSequence": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"usage": {
|
||||
"properties": {
|
||||
"input_tokens": {
|
||||
"type": "long"
|
||||
},
|
||||
"output_tokens": {
|
||||
"type": "long"
|
||||
},
|
||||
"credits": {
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"ecs_version": "1.12.2"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"template": {},
|
||||
"version": 1,
|
||||
"_meta": {
|
||||
"description": "default settings for common Security Onion Assistant indices"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"so_kind": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"so_session": {
|
||||
"properties": {
|
||||
"title": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"sessionId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"createTime": {
|
||||
"type": "date"
|
||||
},
|
||||
"deleteTime": {
|
||||
"type": "date"
|
||||
},
|
||||
"tags": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"userId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"ecs_version": "1.12.2"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"template": {},
|
||||
"version": 1,
|
||||
"_meta": {
|
||||
"description": "default settings for common Security Onion Assistant indices"
|
||||
}
|
||||
}
|
||||
1164
salt/elasticsearch/tools/sbin/so-elasticsearch-retention-estimate
Executable file
1164
salt/elasticsearch/tools/sbin/so-elasticsearch-retention-estimate
Executable file
File diff suppressed because it is too large
Load Diff
@@ -21,7 +21,7 @@ while [[ "$COUNT" -le 240 ]]; do
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
# Check cluster health once connected
|
||||
so-elasticsearch-query _cluster/health?wait_for_status=yellow > /dev/null 2>&1
|
||||
so-elasticsearch-query _cluster/health?wait_for_status=yellow\&timeout=120s > /dev/null 2>&1
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
|
||||
195
salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot
Normal file
195
salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot
Normal file
@@ -0,0 +1,195 @@
|
||||
#!/bin/bash
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
BOLD='\033[1;37m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_title() {
|
||||
if [ $1 == "LOG" ]; then
|
||||
echo -e "\n${BOLD}================ $2 ================${NC}\n"
|
||||
elif [ $1 == "OK" ]; then
|
||||
echo -e "${GREEN} $2 ${NC}"
|
||||
elif [ $1 == "WARN" ]; then
|
||||
echo -e "${YELLOW} $2 ${NC}"
|
||||
elif [ $1 == "ERROR" ]; then
|
||||
echo -e "${RED} $2 ${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
health_report() {
|
||||
if ! health_report_output=$(so-elasticsearch-query _health_report?format=json --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve health report from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
non_green_count=$(echo "$health_report_output" | jq '[.indicators | to_entries[] | select(.value.status != "green")] | length')
|
||||
|
||||
if [ "$non_green_count" -gt 0 ]; then
|
||||
echo "$health_report_output" | jq -r '.indicators | to_entries[] | select(.value.status != "green") | .key' | while read -r indicator_name; do
|
||||
indicator=$(echo "$health_report_output" | jq -r ".indicators.\"$indicator_name\"")
|
||||
status=$(echo "$indicator" | jq -r '.status')
|
||||
symptom=$(echo "$indicator" | jq -r '.symptom // "No symptom available"')
|
||||
|
||||
# reormat indicator name
|
||||
display_name=$(echo "$indicator_name" | tr '_' ' ' | sed 's/\b\(.\)/\u\1/g')
|
||||
|
||||
if [ "$status" = "yellow" ]; then
|
||||
log_title "WARN" "$display_name: $symptom"
|
||||
else
|
||||
log_title "ERROR" "$display_name: $symptom"
|
||||
fi
|
||||
|
||||
# diagnosis if available
|
||||
echo "$indicator" | jq -c '.diagnosis[]? // empty' | while read -r diagnosis; do
|
||||
cause=$(echo "$diagnosis" | jq -r '.cause // "Unknown"')
|
||||
action=$(echo "$diagnosis" | jq -r '.action // "No action specified"')
|
||||
|
||||
echo -e " ${BOLD}Cause:${NC} $cause\n"
|
||||
echo -e " ${BOLD}Action:${NC} $action\n"
|
||||
|
||||
# Check for affected indices
|
||||
affected_indices=$(echo "$diagnosis" | jq -r '.affected_resources.indices[]? // empty')
|
||||
if [ -n "$affected_indices" ]; then
|
||||
echo -e " ${BOLD}Affected indices:${NC}"
|
||||
total_indices=$(echo "$affected_indices" | wc -l)
|
||||
echo "$affected_indices" | head -10 | while read -r index; do
|
||||
echo " - $index"
|
||||
done
|
||||
if [ "$total_indices" -gt 10 ]; then
|
||||
remaining=$((total_indices - 10))
|
||||
echo " ... and $remaining more indices (truncated for readability)"
|
||||
fi
|
||||
fi
|
||||
echo
|
||||
done
|
||||
done
|
||||
else
|
||||
log_title "OK" "All health indicators are green"
|
||||
fi
|
||||
}
|
||||
|
||||
elasticsearch_status() {
|
||||
log_title "LOG" "Elasticsearch Status"
|
||||
if so-elasticsearch-query / --fail --output /dev/null; then
|
||||
health_report
|
||||
else
|
||||
log_title "ERROR" "Elasticsearch API is not accessible"
|
||||
so-status
|
||||
log_title "ERROR" "Make sure Elasticsearch is running. Addtionally, check for startup errors in /opt/so/log/elasticsearch/securityonion.log${NC}\n"
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
indices_by_age() {
|
||||
log_title "LOG" "Indices by Creation Date - Size > 1KB"
|
||||
log_title "WARN" "Since high/flood watermark has been reached consider updating ILM policies.\n"
|
||||
if ! indices_output=$(so-elasticsearch-query '_cat/indices?v&s=creation.date:asc&h=creation.date.string,index,status,health,docs.count,pri.store.size&bytes=b&format=json' --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve indices list from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Filter for indices with size > 1KB (1024 bytes) and format output
|
||||
echo -e "${BOLD}Creation Date Name Size${NC}"
|
||||
echo -e "${BOLD}--------------------------------------------------------------------------------------------------------------${NC}"
|
||||
|
||||
# Create list of indices excluding .internal, so-detection*, so-case*
|
||||
echo "$indices_output" | jq -r '.[] | select((."pri.store.size" | tonumber) > 1024) | select(.index | (startswith(".internal") or startswith("so-detection") or startswith("so-case")) | not ) | "\(."creation.date.string") | \(.index) | \(."pri.store.size")"' | while IFS='|' read -r creation_date index_name size_bytes; do
|
||||
# Convert bytes to GB / MB
|
||||
if [ "$size_bytes" -gt 1073741824 ]; then
|
||||
size_human=$(echo "scale=2; $size_bytes / 1073741824" | bc)GB
|
||||
else
|
||||
size_human=$(echo "scale=2; $size_bytes / 1048576" | bc)MB
|
||||
fi
|
||||
|
||||
creation_date=$(date -d "$creation_date" '+%Y-%m-%dT%H:%MZ' )
|
||||
|
||||
# Format output with spacing
|
||||
printf "%-19s %-76s %10s\n" "$creation_date" "$index_name" "$size_human"
|
||||
done
|
||||
}
|
||||
|
||||
watermark_settings() {
|
||||
watermark_path=".defaults.cluster.routing.allocation.disk.watermark"
|
||||
if ! watermark_output=$(so-elasticsearch-query _cluster/settings?include_defaults=true\&filter_path=*.cluster.routing.allocation.disk.* --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve watermark settings from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! disk_allocation_output=$(so-elasticsearch-query _cat/nodes?v\&h=name,ip,disk.used_percent,disk.avail,disk.total,node.role\&format=json --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve disk allocation data from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
flood=$(echo $watermark_output | jq -r "$watermark_path.flood_stage" )
|
||||
high=$(echo $watermark_output | jq -r "$watermark_path.high" )
|
||||
low=$(echo $watermark_output | jq -r "$watermark_path.low" )
|
||||
|
||||
# Strip percentage signs for comparison
|
||||
flood_num=${flood%\%}
|
||||
high_num=${high%\%}
|
||||
low_num=${low%\%}
|
||||
|
||||
# Check each nodes disk usage
|
||||
log_title "LOG" "Disk Usage Check"
|
||||
echo -e "${BOLD}LOW:${GREEN}$low${NC}${BOLD} HIGH:${YELLOW}${high}${NC}${BOLD} FLOOD:${RED}${flood}${NC}\n"
|
||||
|
||||
# Only show data nodes (d=data, h=hot, w=warm, c=cold, f=frozen, s=content)
|
||||
echo "$disk_allocation_output" | jq -r '.[] | select(.["node.role"] | test("[dhwcfs]")) | "\(.name)|\(.["disk.used_percent"])"' | while IFS='|' read -r node_name disk_used; do
|
||||
disk_used_num=$(echo $disk_used | bc)
|
||||
|
||||
if (( $(echo "$disk_used_num >= $flood_num" | bc -l) )); then
|
||||
log_title "ERROR" "$node_name is at or above the flood watermark ($flood)! Disk usage: ${disk_used}%"
|
||||
touch /tmp/watermark_reached
|
||||
elif (( $(echo "$disk_used_num >= $high_num" | bc -l) )); then
|
||||
log_title "ERROR" "$node_name is at or above the high watermark ($high)! Disk usage: ${disk_used}%"
|
||||
touch /tmp/watermark_reached
|
||||
else
|
||||
log_title "OK" "$node_name disk usage: ${disk_used}%"
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if we need to show indices by age
|
||||
if [ -f /tmp/watermark_reached ]; then
|
||||
indices_by_age
|
||||
rm -f /tmp/watermark_reached
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
unassigned_shards() {
|
||||
|
||||
if ! unassigned_shards_output=$(so-elasticsearch-query _cat/shards?v\&h=index,shard,prirep,state,unassigned.reason,unassigned.details\&s=state\&format=json --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve shard data from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_title "LOG" "Unassigned Shards Check"
|
||||
# Check if there are any UNASSIGNED shards
|
||||
unassigned_count=$(echo "$unassigned_shards_output" | jq '[.[] | select(.state == "UNASSIGNED")] | length')
|
||||
|
||||
if [ "$unassigned_count" -gt 0 ]; then
|
||||
echo "$unassigned_shards_output" | jq -r '.[] | select(.state == "UNASSIGNED") | "\(.index)|\(.shard)|\(.prirep)|\(."unassigned.reason")"' | while IFS='|' read -r index shard prirep reason; do
|
||||
if [ "$prirep" = "r" ]; then
|
||||
log_title "WARN" "Replica shard for index $index is unassigned. Reason: $reason"
|
||||
elif [ "$prirep" = "p" ]; then
|
||||
log_title "ERROR" "Primary shard for index $index is unassigned. Reason: $reason"
|
||||
fi
|
||||
done
|
||||
else
|
||||
log_title "OK" "All shards are assigned"
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
elasticsearch_status
|
||||
watermark_settings
|
||||
unassigned_shards
|
||||
}
|
||||
|
||||
main
|
||||
@@ -136,7 +136,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
TEMPLATE=${i::-14}
|
||||
COMPONENT_PATTERN=${TEMPLATE:3}
|
||||
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ logs-http_endpoint\.generic|logs-winlog\.winlog ]]; then
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ \.generic|logs-winlog\.winlog ]]; then
|
||||
load_failures=$((load_failures+1))
|
||||
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
|
||||
else
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
'so-strelka-filestream'
|
||||
] %}
|
||||
|
||||
{% elif GLOBALS.role == 'so-manager' or GLOBALS.role == 'so-standalone' or GLOBALS.role == 'so-managersearch' %}
|
||||
{% elif GLOBALS.role in ['so-manager', 'so-standalone','so-managersearch', 'so-managerhype'] %}
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-dockerregistry',
|
||||
'so-elasticsearch',
|
||||
|
||||
@@ -14,11 +14,13 @@ firewall:
|
||||
external_kafka: []
|
||||
fleet: []
|
||||
heavynode: []
|
||||
hypervisor: []
|
||||
idh: []
|
||||
import: []
|
||||
localhost:
|
||||
- 127.0.0.1
|
||||
manager: []
|
||||
managerhype: []
|
||||
managersearch: []
|
||||
receiver: []
|
||||
searchnode: []
|
||||
@@ -489,6 +491,15 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -541,6 +552,218 @@ firewall:
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
managerhype:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
managerhype:
|
||||
portgroups:
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- docker_registry
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- localrules
|
||||
- sensoroni
|
||||
fleet:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- beats_5056
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
idh:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
sensor:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- beats_5644
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
heavynode:
|
||||
portgroups:
|
||||
- redis
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- beats_5644
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
receiver:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
beats_endpoint_ssl:
|
||||
portgroups:
|
||||
- beats_5644
|
||||
elasticsearch_rest:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
elastic_agent_endpoint:
|
||||
portgroups:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
external_suricata:
|
||||
portgroups:
|
||||
- external_suricata
|
||||
desktop:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- ssh
|
||||
dockernet:
|
||||
portgroups:
|
||||
- all
|
||||
fleet:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
idh:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
sensor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
searchnode:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
heavynode:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -686,6 +909,15 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -738,6 +970,9 @@ firewall:
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -890,6 +1125,15 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -945,6 +1189,9 @@ firewall:
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -983,6 +1230,10 @@ firewall:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
standalone:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
@@ -1130,6 +1381,10 @@ firewall:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
standalone:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
@@ -1332,6 +1587,9 @@ firewall:
|
||||
portgroups:
|
||||
- redis
|
||||
- elastic_agent_data
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elastic_agent_data
|
||||
self:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -1449,6 +1707,9 @@ firewall:
|
||||
managersearch:
|
||||
portgroups:
|
||||
- openssh
|
||||
managerhype:
|
||||
portgroups:
|
||||
- openssh
|
||||
standalone:
|
||||
portgroups:
|
||||
- openssh
|
||||
@@ -1472,3 +1733,66 @@ firewall:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
hypervisor:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- ssh
|
||||
dockernet:
|
||||
portgroups:
|
||||
- all
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
manager:
|
||||
portgroups: []
|
||||
managersearch:
|
||||
portgroups: []
|
||||
managerhype:
|
||||
portgroups: []
|
||||
standalone:
|
||||
portgroups: []
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
|
||||
@@ -91,6 +91,10 @@ COMMIT
|
||||
-A INPUT -m conntrack --ctstate INVALID -j DROP
|
||||
-A INPUT -p icmp -j ACCEPT
|
||||
-A INPUT -j LOGGING
|
||||
{% if GLOBALS.role in ['so-hypervisor', 'so-managerhype'] -%}
|
||||
-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
-A FORWARD -i br0 -o br0 -j ACCEPT
|
||||
{%- endif %}
|
||||
-A FORWARD -j DOCKER-USER
|
||||
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
|
||||
-A FORWARD -o sobridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
||||
{% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
|
||||
|
||||
{% if role in ['manager', 'managersearch', 'standalone'] %}
|
||||
{% if role.startswith('manager') or role == 'standalone' %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
@@ -38,8 +38,8 @@
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
|
||||
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if role.startswith('manager') or role in ['standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'managerhype', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
|
||||
{% endif %}
|
||||
@@ -48,11 +48,11 @@
|
||||
|
||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||
{# Kafka external access only applies for Kafka nodes with the broker role. #}
|
||||
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] and 'broker' in kafka_node_type %}
|
||||
{% if role.startswith('manager') or role in ['standalone', 'receiver'] and 'broker' in kafka_node_type %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
|
||||
@@ -36,6 +36,7 @@ firewall:
|
||||
external_kafka: *hostgroupsettings
|
||||
fleet: *hostgroupsettings
|
||||
heavynode: *hostgroupsettings
|
||||
hypervisor: *hostgroupsettings
|
||||
idh: *hostgroupsettings
|
||||
import: *hostgroupsettings
|
||||
localhost: *ROhostgroupsettingsadv
|
||||
|
||||
125
salt/hypervisor/defaults.yaml
Normal file
125
salt/hypervisor/defaults.yaml
Normal file
@@ -0,0 +1,125 @@
|
||||
hypervisor:
|
||||
model:
|
||||
testModel:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 128
|
||||
disk:
|
||||
1: pci_0000_c7_00_0
|
||||
2: pci_0000_c8_00_0
|
||||
copper:
|
||||
1: pci_0000_c4_00_0
|
||||
2: pci_0000_c4_00_1
|
||||
3: pci_0000_c4_00_2
|
||||
4: pci_0000_c4_00_3
|
||||
sfp:
|
||||
5: pci_0000_02_00_0
|
||||
6: pci_0000_02_00_1
|
||||
7: pci_0000_41_00_0
|
||||
8: pci_0000_41_00_1
|
||||
SOSSNNV:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 256
|
||||
disk:
|
||||
1: pci_0000_42_00_0
|
||||
2: pci_0000_43_00_0
|
||||
3: pci_0000_44_00_0
|
||||
4: pci_0000_45_00_0
|
||||
copper:
|
||||
sfp:
|
||||
1: pci_0000_02_00_0
|
||||
2: pci_0000_02_00_1
|
||||
3: pci_0000_41_00_0
|
||||
4: pci_0000_41_00_1
|
||||
SOSSNNV-DE02:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 384
|
||||
disk:
|
||||
1: pci_0000_41_00_0
|
||||
2: pci_0000_42_00_0
|
||||
3: pci_0000_81_00_0
|
||||
4: pci_0000_82_00_0
|
||||
5: pci_0000_83_00_0
|
||||
6: pci_0000_84_00_0
|
||||
copper:
|
||||
1: pci_0000_85_00_0
|
||||
2: pci_0000_85_00_1
|
||||
3: pci_0000_85_00_2
|
||||
4: pci_0000_85_00_3
|
||||
sfp:
|
||||
5: pci_0000_c4_00_0
|
||||
6: pci_0000_c4_00_1
|
||||
7: pci_0000_c5_00_0
|
||||
8: pci_0000_c5_00_1
|
||||
9: pci_0000_c5_00_2
|
||||
10: pci_0000_c5_00_3
|
||||
SOSSN7200:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 256
|
||||
copper:
|
||||
1: pci_0000_03_00_0
|
||||
2: pci_0000_03_00_1
|
||||
3: pci_0000_03_00_2
|
||||
4: pci_0000_03_00_3
|
||||
sfp:
|
||||
5: pci_0000_02_00_0
|
||||
6: pci_0000_02_00_1
|
||||
7: pci_0000_81_00_0
|
||||
8: pci_0000_81_00_1
|
||||
9: pci_0000_81_00_2
|
||||
10: pci_0000_81_00_3
|
||||
SOSSN7200-DE02:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 384
|
||||
copper:
|
||||
1: pci_0000_82_00_0
|
||||
2: pci_0000_82_00_1
|
||||
3: pci_0000_82_00_2
|
||||
4: pci_0000_82_00_3
|
||||
sfp:
|
||||
5: pci_0000_c4_00_0
|
||||
6: pci_0000_c4_00_1
|
||||
7: pci_0000_c5_00_0
|
||||
8: pci_0000_c5_00_1
|
||||
9: pci_0000_c6_00_0
|
||||
10: pci_0000_c6_00_1
|
||||
11: pci_0000_c6_00_2
|
||||
12: pci_0000_c6_00_3
|
||||
SOS4000:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 256
|
||||
copper:
|
||||
1: pci_0000_03_00_0
|
||||
2: pci_0000_03_00_1
|
||||
3: pci_0000_03_00_2
|
||||
4: pci_0000_03_00_3
|
||||
sfp:
|
||||
5: pci_0000_02_00_0
|
||||
6: pci_0000_02_00_1
|
||||
7: pci_0000_81_00_0
|
||||
8: pci_0000_81_00_1
|
||||
9: pci_0000_81_00_2
|
||||
10: pci_0000_81_00_3
|
||||
SOS5000-DE02:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 384
|
||||
copper:
|
||||
1: pci_0000_82_00_0
|
||||
2: pci_0000_82_00_1
|
||||
3: pci_0000_82_00_2
|
||||
4: pci_0000_82_00_3
|
||||
sfp:
|
||||
5: pci_0000_c4_00_0
|
||||
6: pci_0000_c4_00_1
|
||||
7: pci_0000_c5_00_0
|
||||
8: pci_0000_c5_00_1
|
||||
9: pci_0000_c6_00_0
|
||||
10: pci_0000_c6_00_1
|
||||
11: pci_0000_c6_00_2
|
||||
12: pci_0000_c6_00_3
|
||||
1
salt/hypervisor/hosts/README
Normal file
1
salt/hypervisor/hosts/README
Normal file
@@ -0,0 +1 @@
|
||||
This directory will contain hypervisor hosts. We need this README in place to ensure /opt/so/saltstack/local/salt/hypervisor/hosts directory gets created during setup.
|
||||
49
salt/hypervisor/init.sls
Normal file
49
salt/hypervisor/init.sls
Normal file
@@ -0,0 +1,49 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
hypervisor_log_dir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/hypervisor
|
||||
|
||||
hypervisor_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://hypervisor/tools/sbin
|
||||
- file_mode: 744
|
||||
|
||||
hypervisor_sbin_jinja:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://hypervisor/tools/sbin_jinja
|
||||
- template: jinja
|
||||
- file_mode: 744
|
||||
|
||||
{% else %}
|
||||
{{sls}}_no_license_detected:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_no_license_detected
|
||||
- comment:
|
||||
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||
for more information about purchasing a license to enable this feature."
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
182
salt/hypervisor/map.jinja
Normal file
182
salt/hypervisor/map.jinja
Normal file
@@ -0,0 +1,182 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0.
|
||||
|
||||
Note: Per the Elastic License 2.0, the second limitation states:
|
||||
|
||||
"You may not move, change, disable, or circumvent the license key functionality
|
||||
in the software, and you may not remove or obscure any functionality in the
|
||||
software that is protected by the license key." #}
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
{# Import defaults.yaml for model hardware capabilities #}
|
||||
{% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %}
|
||||
{% set HYPERVISORMERGED = salt['pillar.get']('hypervisor', default=DEFAULTS.hypervisor, merge=True) %}
|
||||
|
||||
{# Get hypervisor nodes from pillar #}
|
||||
{% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %}
|
||||
|
||||
{# Build enhanced HYPERVISORS structure #}
|
||||
{% set HYPERVISORS = {} %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: NODES content: ' ~ NODES | tojson) %}
|
||||
{% for role, hypervisors in NODES.items() %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing role: ' ~ role) %}
|
||||
{% do HYPERVISORS.update({role: {}}) %}
|
||||
{% for hypervisor, config in hypervisors.items() %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing hypervisor: ' ~ hypervisor ~ ' with config: ' ~ config | tojson) %}
|
||||
{# Get model from cached grains using Salt runner #}
|
||||
{% set grains = salt.saltutil.runner('cache.grains', tgt=hypervisor ~ '_*', tgt_type='glob') %}
|
||||
{% set model = '' %}
|
||||
{% if grains %}
|
||||
{% set minion_id = grains.keys() | first %}
|
||||
{% set model = grains[minion_id].get('sosmodel', grains[minion_id].get('byodmodel', '')) %}
|
||||
{% endif %}
|
||||
|
||||
{% set model_config = HYPERVISORMERGED.model.get(model, {}) %}
|
||||
|
||||
{# Get VM list from VMs file #}
|
||||
{% set vms = {} %}
|
||||
{% set vm_list = [] %}
|
||||
{% set vm_list_file = 'hypervisor/hosts/' ~ hypervisor ~ 'VMs' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list file: ' ~ vm_list_file) %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_list_file) %}
|
||||
{% import_json vm_list_file as vm_list %}
|
||||
{% endif %}
|
||||
{% if vm_list %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list content: ' ~ vm_list | tojson) %}
|
||||
{% else %}
|
||||
{# we won't get here if the vm_list_file doesn't exist because we will get TemplateNotFound on the import_json #}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list empty: ' ~ vm_list_file) %}
|
||||
{% endif %}
|
||||
|
||||
{# Load status and configuration for each VM #}
|
||||
{% for vm in vm_list %}
|
||||
{# Get VM details from list entry #}
|
||||
{% set hostname = vm.get('hostname', '') %}
|
||||
{% set role = vm.get('role', '') %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing VM - hostname: ' ~ hostname ~ ', role: ' ~ role) %}
|
||||
|
||||
{# Try to load VM configuration from config file first, then .error file if config doesn't exist #}
|
||||
{% set vm_file = 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ hostname ~ '_' ~ role %}
|
||||
{% set vm_error_file = vm_file ~ '.error' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config file: ' ~ vm_file) %}
|
||||
|
||||
{# Check if base config file exists #}
|
||||
{% set config_exists = salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_file) %}
|
||||
{% set error_exists = salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_error_file) %}
|
||||
|
||||
{% set vm_state = none %}
|
||||
{% if config_exists %}
|
||||
{% import_json vm_file as vm_state %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Loaded VM config from base file') %}
|
||||
{% elif error_exists %}
|
||||
{% import_json vm_error_file as vm_state %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Loaded VM config from .error file') %}
|
||||
{% else %}
|
||||
{% do salt.log.warning('salt/hypervisor/map.jinja: No config or error file found for VM ' ~ hostname ~ '_' ~ role) %}
|
||||
{% endif %}
|
||||
|
||||
{% if vm_state %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config content: ' ~ vm_state | tojson) %}
|
||||
{% set vm_data = {'config': vm_state.config} %}
|
||||
|
||||
{# Load VM status from status file #}
|
||||
{% set status_file = vm_file ~ '.status' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM status file: ' ~ status_file) %}
|
||||
{% import_json status_file as status_data %}
|
||||
{% if status_data %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM status content: ' ~ status_data | tojson) %}
|
||||
{% do vm_data.update({'status': status_data}) %}
|
||||
{% else %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Status file empty: ' ~ status_file) %}
|
||||
{% do vm_data.update({
|
||||
'status': {
|
||||
'status': '',
|
||||
'details': null,
|
||||
'timestamp': ''
|
||||
}
|
||||
}) %}
|
||||
{% endif %}
|
||||
{% do vms.update({hostname ~ '_' ~ role: vm_data}) %}
|
||||
{% else %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Skipping VM ' ~ hostname ~ '_' ~ role ~ ' - no config available') %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Find and add destroyed VMs from status files #}
|
||||
{% set processed_vms = [] %}
|
||||
{% for vm_full_name, vm_data in vms.items() %}
|
||||
{% do processed_vms.append(vm_full_name) %}
|
||||
{% endfor %}
|
||||
|
||||
{# Find all status files for this hypervisor #}
|
||||
{% set relative_path = 'hypervisor/hosts/' ~ hypervisor %}
|
||||
{% set absolute_path = '/opt/so/saltstack/local/salt/' ~ relative_path %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Scanning for status files in: ' ~ absolute_path) %}
|
||||
|
||||
{# Try to find status files using file.find with absolute path #}
|
||||
{% set status_files = salt['file.find'](absolute_path, name='*_*.status', type='f') %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Found status files: ' ~ status_files | tojson) %}
|
||||
|
||||
{# Convert absolute paths back to relative paths for processing #}
|
||||
{% set relative_status_files = [] %}
|
||||
{% for status_file in status_files %}
|
||||
{% set relative_file = status_file | replace('/opt/so/saltstack/local/salt/', '') %}
|
||||
{% do relative_status_files.append(relative_file) %}
|
||||
{% endfor %}
|
||||
{% set status_files = relative_status_files %}
|
||||
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Converted to relative paths: ' ~ status_files | tojson) %}
|
||||
|
||||
{% for status_file in status_files %}
|
||||
{# Extract the VM name from the filename #}
|
||||
{% set basename = status_file.split('/')[-1] %}
|
||||
{% set vm_name = basename.replace('.status', '') %}
|
||||
{% set hostname = vm_name.split('_')[0] %}
|
||||
|
||||
{# Skip already processed VMs #}
|
||||
{% if vm_name in processed_vms %}
|
||||
{% continue %}
|
||||
{% endif %}
|
||||
|
||||
{# Read the status file #}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing potential destroyed VM status file: ' ~ status_file) %}
|
||||
{% import_json status_file as status_data %}
|
||||
|
||||
{# Only process files with "Destroyed Instance" status #}
|
||||
{% if status_data and status_data.status == 'Destroyed Instance' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Found VM with Destroyed Instance status: ' ~ vm_name) %}
|
||||
|
||||
{# Add to vms with minimal config #}
|
||||
{% do vms.update({
|
||||
vm_name: {
|
||||
'status': status_data,
|
||||
'config': {}
|
||||
}
|
||||
}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Merge node config with model capabilities and VM states #}
|
||||
{% do HYPERVISORS[role].update({
|
||||
hypervisor: {
|
||||
'config': config,
|
||||
'model': model,
|
||||
'hardware': model_config.get('hardware', {}),
|
||||
'vms': vms
|
||||
}
|
||||
}) %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{% do salt.log.error(
|
||||
'Hypervisor nodes are a feature supported only for customers with a valid license.'
|
||||
'Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com'
|
||||
'for more information about purchasing a license to enable this feature.'
|
||||
) %}
|
||||
|
||||
{% endif %}
|
||||
445
salt/hypervisor/tools/sbin/so-nvme-raid1.sh
Normal file
445
salt/hypervisor/tools/sbin/so-nvme-raid1.sh
Normal file
@@ -0,0 +1,445 @@
|
||||
#!/bin/bash
|
||||
|
||||
#################################################################
|
||||
# RAID-1 Setup Script for NVMe Drives
|
||||
#################################################################
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# This script automatically sets up a RAID-1 (mirrored) array using two NVMe drives
|
||||
# (/dev/nvme0n1 and /dev/nvme1n1) and mounts it at /nsm with XFS filesystem.
|
||||
#
|
||||
# FUNCTIONALITY:
|
||||
# - Detects and reports existing RAID configurations
|
||||
# - Thoroughly cleans target drives of any existing data/configurations
|
||||
# - Creates GPT partition tables with RAID-type partitions
|
||||
# - Establishes RAID-1 array (${RAID_DEVICE}) for data redundancy
|
||||
# - Formats the array with XFS filesystem for performance
|
||||
# - Automatically mounts at /nsm and configures for boot persistence
|
||||
# - Provides monitoring information for resync operations
|
||||
#
|
||||
# SAFETY FEATURES:
|
||||
# - Requires root privileges
|
||||
# - Exits gracefully if RAID already exists and is mounted
|
||||
# - Performs comprehensive cleanup to avoid conflicts
|
||||
# - Forces partition table updates and waits for system recognition
|
||||
#
|
||||
# PREREQUISITES:
|
||||
# - Two NVMe drives: /dev/nvme0n1 and /dev/nvme1n1
|
||||
# - Root access
|
||||
# - mdadm, sgdisk, and standard Linux utilities
|
||||
#
|
||||
# WARNING: This script will DESTROY all data on the target drives!
|
||||
#
|
||||
# USAGE:
|
||||
# sudo ./so-nvme-raid1.sh # Normal operation
|
||||
# sudo ./so-nvme-raid1.sh --force-cleanup # Force cleanup of existing RAID
|
||||
#
|
||||
#################################################################
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
# Configuration variables
|
||||
RAID_ARRAY_NAME="md0"
|
||||
RAID_DEVICE="/dev/${RAID_ARRAY_NAME}"
|
||||
MOUNT_POINT="/nsm"
|
||||
FORCE_CLEANUP=false
|
||||
|
||||
# Parse command line arguments
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
--force-cleanup)
|
||||
FORCE_CLEANUP=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Function to log messages
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||
}
|
||||
|
||||
# Function to check if running as root
|
||||
check_root() {
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
log "Error: Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to force cleanup all RAID components
|
||||
force_cleanup_raid() {
|
||||
log "=== FORCE CLEANUP MODE ==="
|
||||
log "This will destroy all RAID configurations and data on target drives!"
|
||||
|
||||
# Stop all MD arrays
|
||||
log "Stopping all MD arrays"
|
||||
mdadm --stop --scan 2>/dev/null || true
|
||||
|
||||
# Wait for arrays to stop
|
||||
sleep 2
|
||||
|
||||
# Remove any running md devices
|
||||
for md in /dev/md*; do
|
||||
if [ -b "$md" ]; then
|
||||
log "Stopping $md"
|
||||
mdadm --stop "$md" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Force cleanup both NVMe drives
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
log "Force cleaning $device"
|
||||
|
||||
# Kill any processes using the device
|
||||
fuser -k "${device}"* 2>/dev/null || true
|
||||
|
||||
# Unmount any mounted partitions
|
||||
for part in "${device}"*; do
|
||||
if [ -b "$part" ]; then
|
||||
umount -f "$part" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Force zero RAID superblocks on partitions
|
||||
for part in "${device}"p*; do
|
||||
if [ -b "$part" ]; then
|
||||
log "Zeroing RAID superblock on $part"
|
||||
mdadm --zero-superblock --force "$part" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Zero superblock on the device itself
|
||||
log "Zeroing RAID superblock on $device"
|
||||
mdadm --zero-superblock --force "$device" 2>/dev/null || true
|
||||
|
||||
# Remove LVM physical volumes
|
||||
pvremove -ff -y "$device" 2>/dev/null || true
|
||||
|
||||
# Wipe all filesystem and partition signatures
|
||||
log "Wiping all signatures from $device"
|
||||
wipefs -af "$device" 2>/dev/null || true
|
||||
|
||||
# Overwrite the beginning of the drive (partition table area)
|
||||
log "Clearing partition table on $device"
|
||||
dd if=/dev/zero of="$device" bs=1M count=10 2>/dev/null || true
|
||||
|
||||
# Clear the end of the drive (backup partition table area)
|
||||
local device_size=$(blockdev --getsz "$device" 2>/dev/null || echo "0")
|
||||
if [ "$device_size" -gt 0 ]; then
|
||||
dd if=/dev/zero of="$device" bs=512 seek=$(( device_size - 2048 )) count=2048 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Force kernel to re-read partition table
|
||||
blockdev --rereadpt "$device" 2>/dev/null || true
|
||||
partprobe -s "$device" 2>/dev/null || true
|
||||
done
|
||||
|
||||
# Clear mdadm configuration
|
||||
log "Clearing mdadm configuration"
|
||||
echo "DEVICE partitions" > /etc/mdadm.conf
|
||||
|
||||
# Remove any fstab entries for the RAID device or mount point
|
||||
log "Cleaning fstab entries"
|
||||
sed -i "\|${RAID_DEVICE}|d" /etc/fstab
|
||||
sed -i "\|${MOUNT_POINT}|d" /etc/fstab
|
||||
|
||||
# Wait for system to settle
|
||||
udevadm settle
|
||||
sleep 5
|
||||
|
||||
log "Force cleanup complete!"
|
||||
log "Proceeding with RAID setup..."
|
||||
}
|
||||
|
||||
# Function to find MD arrays using specific devices
|
||||
find_md_arrays_using_devices() {
|
||||
local target_devices=("$@")
|
||||
local found_arrays=()
|
||||
|
||||
# Parse /proc/mdstat to find arrays using our target devices
|
||||
if [ -f "/proc/mdstat" ]; then
|
||||
while IFS= read -r line; do
|
||||
if [[ $line =~ ^(md[0-9]+) ]]; then
|
||||
local array_name="${BASH_REMATCH[1]}"
|
||||
local array_path="/dev/$array_name"
|
||||
|
||||
# Check if this array uses any of our target devices
|
||||
for device in "${target_devices[@]}"; do
|
||||
if echo "$line" | grep -q "${device##*/}"; then
|
||||
found_arrays+=("$array_path")
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done < /proc/mdstat
|
||||
fi
|
||||
|
||||
printf '%s\n' "${found_arrays[@]}"
|
||||
}
|
||||
|
||||
# Function to check if RAID is already set up
|
||||
check_existing_raid() {
|
||||
local target_devices=("/dev/nvme0n1p1" "/dev/nvme1n1p1")
|
||||
local found_arrays=($(find_md_arrays_using_devices "${target_devices[@]}"))
|
||||
|
||||
# Check if we found any arrays using our target devices
|
||||
if [ ${#found_arrays[@]} -gt 0 ]; then
|
||||
for array_path in "${found_arrays[@]}"; do
|
||||
if mdadm --detail "$array_path" &>/dev/null; then
|
||||
local raid_state=$(mdadm --detail "$array_path" | grep "State" | awk '{print $3}')
|
||||
local mount_point="/nsm"
|
||||
|
||||
log "Found existing RAID array $array_path (State: $raid_state)"
|
||||
|
||||
# Check what's currently mounted at /nsm
|
||||
local current_mount=$(findmnt -n -o SOURCE "$mount_point" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$current_mount" ]; then
|
||||
if [ "$current_mount" = "$array_path" ]; then
|
||||
log "RAID array $array_path is already correctly mounted at $mount_point"
|
||||
log "Current RAID details:"
|
||||
mdadm --detail "$array_path"
|
||||
|
||||
# Check if resyncing
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing:"
|
||||
grep resync /proc/mdstat
|
||||
log "You can monitor progress with: watch -n 60 cat /proc/mdstat"
|
||||
else
|
||||
log "RAID is fully synced and operational"
|
||||
fi
|
||||
|
||||
# Show disk usage
|
||||
log "Current disk usage:"
|
||||
df -h "$mount_point"
|
||||
|
||||
exit 0
|
||||
else
|
||||
log "Found $mount_point mounted on $current_mount, but RAID array $array_path exists"
|
||||
log "Will unmount current filesystem and remount on RAID array"
|
||||
|
||||
# Unmount current filesystem
|
||||
log "Unmounting $mount_point"
|
||||
umount "$mount_point"
|
||||
|
||||
# Remove old fstab entry
|
||||
log "Removing old fstab entry for $current_mount"
|
||||
sed -i "\|$current_mount|d" /etc/fstab
|
||||
|
||||
# Mount the RAID array
|
||||
log "Mounting RAID array $array_path at $mount_point"
|
||||
mount "$array_path" "$mount_point"
|
||||
|
||||
# Update fstab
|
||||
log "Updating fstab for RAID array"
|
||||
sed -i "\|${array_path}|d" /etc/fstab
|
||||
echo "${array_path} ${mount_point} xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
|
||||
log "RAID array is now mounted at $mount_point"
|
||||
log "Current RAID details:"
|
||||
mdadm --detail "$array_path"
|
||||
|
||||
# Check if resyncing
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing:"
|
||||
grep resync /proc/mdstat
|
||||
log "You can monitor progress with: watch -n 60 cat /proc/mdstat"
|
||||
else
|
||||
log "RAID is fully synced and operational"
|
||||
fi
|
||||
|
||||
# Show disk usage
|
||||
log "Current disk usage:"
|
||||
df -h "$mount_point"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
# /nsm not mounted, mount the RAID array
|
||||
log "Mounting RAID array $array_path at $mount_point"
|
||||
mount "$array_path" "$mount_point"
|
||||
|
||||
# Update fstab
|
||||
log "Updating fstab for RAID array"
|
||||
sed -i "\|${array_path}|d" /etc/fstab
|
||||
echo "${array_path} ${mount_point} xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
|
||||
log "RAID array is now mounted at $mount_point"
|
||||
log "Current RAID details:"
|
||||
mdadm --detail "$array_path"
|
||||
|
||||
# Show disk usage
|
||||
log "Current disk usage:"
|
||||
df -h "$mount_point"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Check if any of the target devices are in use
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
if mdadm --examine "$device" &>/dev/null || mdadm --examine "${device}p1" &>/dev/null; then
|
||||
# Find the actual array name for this device
|
||||
local device_arrays=($(find_md_arrays_using_devices "${device}p1"))
|
||||
local array_name=""
|
||||
|
||||
if [ ${#device_arrays[@]} -gt 0 ]; then
|
||||
array_name="${device_arrays[0]}"
|
||||
else
|
||||
# Fallback: try to find array name from /proc/mdstat
|
||||
local partition_name="${device##*/}p1"
|
||||
array_name=$(grep -l "$partition_name" /proc/mdstat 2>/dev/null | head -1)
|
||||
if [ -n "$array_name" ]; then
|
||||
array_name=$(grep "^md[0-9]" /proc/mdstat | grep "$partition_name" | awk '{print "/dev/" $1}' | head -1)
|
||||
fi
|
||||
# Final fallback
|
||||
if [ -z "$array_name" ]; then
|
||||
array_name="$RAID_DEVICE"
|
||||
fi
|
||||
fi
|
||||
|
||||
log "Error: $device appears to be part of an existing RAID array"
|
||||
log "Old RAID metadata detected but array is not running."
|
||||
log ""
|
||||
log "To fix this, run the script with --force-cleanup:"
|
||||
log " sudo $0 --force-cleanup"
|
||||
log ""
|
||||
log "Or manually clean up with:"
|
||||
log "1. Stop any arrays: mdadm --stop --scan"
|
||||
log "2. Zero superblocks: mdadm --zero-superblock --force ${device}p1"
|
||||
log "3. Wipe signatures: wipefs -af $device"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Function to ensure devices are not in use
|
||||
ensure_devices_free() {
|
||||
local device=$1
|
||||
|
||||
log "Cleaning up device $device"
|
||||
|
||||
# Kill any processes using the device
|
||||
fuser -k "${device}"* 2>/dev/null || true
|
||||
|
||||
# Force unmount any partitions
|
||||
for part in "${device}"*; do
|
||||
if mount | grep -q "$part"; then
|
||||
umount -f "$part" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Stop any MD arrays using this device
|
||||
for md in $(ls /dev/md* 2>/dev/null || true); do
|
||||
if mdadm --detail "$md" 2>/dev/null | grep -q "$device"; then
|
||||
mdadm --stop "$md" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Clear MD superblock
|
||||
mdadm --zero-superblock --force "${device}"* 2>/dev/null || true
|
||||
|
||||
# Remove LVM PV if exists
|
||||
pvremove -ff -y "$device" 2>/dev/null || true
|
||||
|
||||
# Clear all signatures
|
||||
wipefs -af "$device" 2>/dev/null || true
|
||||
|
||||
# Delete partition table
|
||||
dd if=/dev/zero of="$device" bs=512 count=2048 2>/dev/null || true
|
||||
dd if=/dev/zero of="$device" bs=512 seek=$(( $(blockdev --getsz "$device") - 2048 )) count=2048 2>/dev/null || true
|
||||
|
||||
# Force kernel to reread
|
||||
blockdev --rereadpt "$device" 2>/dev/null || true
|
||||
partprobe -s "$device" 2>/dev/null || true
|
||||
sleep 2
|
||||
}
|
||||
|
||||
# Main script
|
||||
main() {
|
||||
log "Starting RAID setup script"
|
||||
|
||||
# Check if running as root
|
||||
check_root
|
||||
|
||||
# If force cleanup flag is set, do aggressive cleanup first
|
||||
if [ "$FORCE_CLEANUP" = true ]; then
|
||||
force_cleanup_raid
|
||||
fi
|
||||
|
||||
# Check for existing RAID setup
|
||||
check_existing_raid
|
||||
|
||||
# Clean up any existing MD arrays
|
||||
log "Cleaning up existing MD arrays"
|
||||
mdadm --stop --scan 2>/dev/null || true
|
||||
|
||||
# Clear mdadm configuration
|
||||
log "Clearing mdadm configuration"
|
||||
echo "DEVICE partitions" > /etc/mdadm.conf
|
||||
|
||||
# Clean and prepare devices
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
ensure_devices_free "$device"
|
||||
|
||||
log "Creating new partition table on $device"
|
||||
sgdisk -Z "$device"
|
||||
sgdisk -o "$device"
|
||||
|
||||
log "Creating RAID partition"
|
||||
sgdisk -n 1:0:0 -t 1:fd00 "$device"
|
||||
|
||||
partprobe "$device"
|
||||
udevadm settle
|
||||
sleep 5
|
||||
done
|
||||
|
||||
log "Final verification of partition availability"
|
||||
if ! [ -b "/dev/nvme0n1p1" ] || ! [ -b "/dev/nvme1n1p1" ]; then
|
||||
log "Error: Partitions not available after creation"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Creating RAID array"
|
||||
mdadm --create "$RAID_DEVICE" --level=1 --raid-devices=2 \
|
||||
--metadata=1.2 \
|
||||
/dev/nvme0n1p1 /dev/nvme1n1p1 \
|
||||
--force --run
|
||||
|
||||
log "Creating XFS filesystem"
|
||||
mkfs.xfs -f "$RAID_DEVICE"
|
||||
|
||||
log "Creating mount point"
|
||||
mkdir -p /nsm
|
||||
|
||||
log "Updating fstab"
|
||||
sed -i "\|${RAID_DEVICE}|d" /etc/fstab
|
||||
echo "${RAID_DEVICE} ${MOUNT_POINT} xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
|
||||
log "Reloading systemd daemon"
|
||||
systemctl daemon-reload
|
||||
|
||||
log "Mounting filesystem"
|
||||
mount -a
|
||||
|
||||
log "Saving RAID configuration"
|
||||
mdadm --detail --scan > /etc/mdadm.conf
|
||||
|
||||
log "RAID setup complete"
|
||||
log "RAID array details:"
|
||||
mdadm --detail "$RAID_DEVICE"
|
||||
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing. You can monitor progress with:"
|
||||
log "watch -n 60 cat /proc/mdstat"
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
424
salt/hypervisor/tools/sbin/so-qcow2-network-predictable
Normal file
424
salt/hypervisor/tools/sbin/so-qcow2-network-predictable
Normal file
@@ -0,0 +1,424 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
"""
|
||||
Script for configuring network interface predictability in Security Onion VMs.
|
||||
This script modifies the necessary files to ensure consistent network interface naming.
|
||||
|
||||
The script performs the following operations:
|
||||
1. Modifies the BLS entry to set net.ifnames=1
|
||||
2. Removes any existing persistent network rules
|
||||
3. Updates GRUB configuration
|
||||
|
||||
**Usage:**
|
||||
so-qcow2-network-predictable -n <domain_name> [-I <qcow2_image_path>]
|
||||
|
||||
**Options:**
|
||||
-n, --name Domain name of the VM to configure
|
||||
-I, --image (Optional) Path to the QCOW2 image. If not provided,
|
||||
defaults to /nsm/libvirt/images/<domain_name>/<domain_name>.qcow2
|
||||
|
||||
**Examples:**
|
||||
|
||||
1. **Configure using domain name:**
|
||||
```bash
|
||||
so-qcow2-network-predictable -n sool9
|
||||
```
|
||||
This command will:
|
||||
- Use default image path: /nsm/libvirt/images/sool9/sool9.qcow2
|
||||
- Configure network interface predictability
|
||||
|
||||
2. **Configure using custom image path:**
|
||||
```bash
|
||||
so-qcow2-network-predictable -n sool9 -I /path/to/custom/image.qcow2
|
||||
```
|
||||
This command will:
|
||||
- Use the specified image path
|
||||
- Configure network interface predictability
|
||||
|
||||
**Notes:**
|
||||
- The VM must not be running when executing this script
|
||||
- Requires root privileges
|
||||
- Will automatically find and modify the appropriate BLS entry
|
||||
- Removes /etc/udev/rules.d/70-persistent-net.rules if it exists
|
||||
- Updates GRUB configuration after changes
|
||||
|
||||
**Exit Codes:**
|
||||
- 0: Success
|
||||
- 1: General error (invalid arguments, file operations, etc.)
|
||||
- 2: VM is running
|
||||
- 3: Required files not found
|
||||
- 4: Permission denied
|
||||
|
||||
**Logging:**
|
||||
- Logs are written to /opt/so/log/hypervisor/so-qcow2-network-predictable.log
|
||||
- Both file and console logging are enabled
|
||||
- Log entries include:
|
||||
- Timestamps
|
||||
- Operation details
|
||||
- Error messages
|
||||
- Configuration changes
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import guestfs
|
||||
import glob
|
||||
import libvirt
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from so_logging_utils import setup_logging
|
||||
|
||||
# Set up logging
|
||||
logger = setup_logging(
|
||||
logger_name='so-qcow2-network-predictable',
|
||||
log_file_path='/opt/so/log/hypervisor/so-qcow2-network-predictable.log',
|
||||
log_level=logging.INFO,
|
||||
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
def check_domain_status(domain_name):
|
||||
"""
|
||||
Check if the specified domain exists and is not running.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the libvirt domain to check
|
||||
|
||||
Returns:
|
||||
bool: True if domain exists and is not running, False otherwise
|
||||
|
||||
Raises:
|
||||
RuntimeError: If domain is running or connection to libvirt fails
|
||||
"""
|
||||
try:
|
||||
conn = libvirt.open('qemu:///system')
|
||||
try:
|
||||
dom = conn.lookupByName(domain_name)
|
||||
is_running = dom.isActive()
|
||||
if is_running:
|
||||
logger.error(f"Domain '{domain_name}' is running - cannot modify configuration")
|
||||
raise RuntimeError(f"Domain '{domain_name}' must not be running")
|
||||
logger.info(f"Domain '{domain_name}' exists and is not running")
|
||||
return True
|
||||
except libvirt.libvirtError as e:
|
||||
if "no domain with matching name" in str(e):
|
||||
logger.error(f"Domain '{domain_name}' not found")
|
||||
raise RuntimeError(f"Domain '{domain_name}' not found")
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"Failed to connect to libvirt: {e}")
|
||||
raise RuntimeError(f"Failed to connect to libvirt: {e}")
|
||||
|
||||
def modify_bls_entry(g):
|
||||
"""
|
||||
Find and modify the BLS entry to set net.ifnames=1.
|
||||
|
||||
Args:
|
||||
g: Mounted guestfs handle
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False if no changes needed
|
||||
|
||||
Raises:
|
||||
RuntimeError: If BLS entry cannot be found or modified
|
||||
"""
|
||||
bls_dir = "/boot/loader/entries"
|
||||
logger.info(f"Checking BLS directory: {bls_dir}")
|
||||
if g.is_dir(bls_dir):
|
||||
logger.info("BLS directory exists")
|
||||
else:
|
||||
logger.info("Listing /boot contents:")
|
||||
try:
|
||||
boot_contents = g.ls("/boot")
|
||||
logger.info(f"/boot contains: {boot_contents}")
|
||||
if g.is_dir("/boot/loader"):
|
||||
logger.info("Listing /boot/loader contents:")
|
||||
loader_contents = g.ls("/boot/loader")
|
||||
logger.info(f"/boot/loader contains: {loader_contents}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing /boot contents: {e}")
|
||||
raise RuntimeError(f"BLS directory not found: {bls_dir}")
|
||||
|
||||
# Find BLS entry file
|
||||
entries = g.glob_expand(f"{bls_dir}/*.conf")
|
||||
logger.info(f"Found BLS entries: {entries}")
|
||||
if not entries:
|
||||
logger.error("No BLS entry files found")
|
||||
raise RuntimeError("No BLS entry files found")
|
||||
|
||||
# Use the first entry found
|
||||
bls_file = entries[0]
|
||||
logger.info(f"Found BLS entry file: {bls_file}")
|
||||
|
||||
try:
|
||||
logger.info(f"Reading BLS file contents from: {bls_file}")
|
||||
content = g.read_file(bls_file).decode('utf-8')
|
||||
logger.info("Current BLS file content:")
|
||||
logger.info("---BEGIN BLS CONTENT---")
|
||||
logger.info(content)
|
||||
logger.info("---END BLS CONTENT---")
|
||||
|
||||
lines = content.splitlines()
|
||||
modified = False
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith('options '):
|
||||
logger.info(f"Found options line: {line}")
|
||||
|
||||
# First remove any existing net.ifnames parameters (both =0 and =1)
|
||||
new_line = re.sub(r'\s*net\.ifnames=[01]\s*', ' ', line)
|
||||
# Also remove any quoted versions
|
||||
new_line = re.sub(r'\s*"net\.ifnames=[01]"\s*', ' ', new_line)
|
||||
# Clean up multiple spaces
|
||||
new_line = re.sub(r'\s+', ' ', new_line).strip()
|
||||
|
||||
# Now add net.ifnames=1 at the end
|
||||
new_line = f"{new_line} net.ifnames=1"
|
||||
|
||||
if new_line != line:
|
||||
lines[i] = new_line
|
||||
modified = True
|
||||
logger.info(f"Updated options line. New line: {new_line}")
|
||||
break
|
||||
|
||||
if modified:
|
||||
new_content = '\n'.join(lines) + '\n'
|
||||
logger.info("New BLS file content:")
|
||||
logger.info("---BEGIN NEW BLS CONTENT---")
|
||||
logger.info(new_content)
|
||||
logger.info("---END NEW BLS CONTENT---")
|
||||
g.write(bls_file, new_content.encode('utf-8'))
|
||||
logger.info("Successfully updated BLS entry")
|
||||
return True
|
||||
|
||||
logger.info("No changes needed for BLS entry")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to modify BLS entry: {e}")
|
||||
raise RuntimeError(f"Failed to modify BLS entry: {e}")
|
||||
|
||||
def remove_persistent_net_rules(g):
|
||||
"""
|
||||
Remove the persistent network rules file if it exists.
|
||||
|
||||
Args:
|
||||
g: Mounted guestfs handle
|
||||
|
||||
Returns:
|
||||
bool: True if file was removed, False if it didn't exist
|
||||
"""
|
||||
rules_file = "/etc/udev/rules.d/70-persistent-net.rules"
|
||||
logger.info(f"Checking for persistent network rules file: {rules_file}")
|
||||
try:
|
||||
if g.is_file(rules_file):
|
||||
logger.info("Found persistent network rules file, removing...")
|
||||
g.rm(rules_file)
|
||||
logger.info(f"Successfully removed persistent network rules file: {rules_file}")
|
||||
return True
|
||||
logger.info("No persistent network rules file found")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to remove persistent network rules: {e}")
|
||||
raise RuntimeError(f"Failed to remove persistent network rules: {e}")
|
||||
|
||||
def update_grub_config(g):
|
||||
"""
|
||||
Update GRUB configuration.
|
||||
|
||||
Args:
|
||||
g: Mounted guestfs handle
|
||||
|
||||
Raises:
|
||||
RuntimeError: If GRUB update fails
|
||||
"""
|
||||
try:
|
||||
# First, read the current grubenv to get the existing kernelopts
|
||||
logger.info("Reading current grubenv...")
|
||||
grubenv_content = g.read_file('/boot/grub2/grubenv').decode('utf-8')
|
||||
logger.info("Current grubenv content:")
|
||||
logger.info(grubenv_content)
|
||||
|
||||
# Extract current kernelopts
|
||||
kernelopts_match = re.search(r'^kernelopts="([^"]+)"', grubenv_content, re.MULTILINE)
|
||||
if kernelopts_match:
|
||||
current_kernelopts = kernelopts_match.group(1)
|
||||
logger.info(f"Current kernelopts: {current_kernelopts}")
|
||||
|
||||
# Remove any existing net.ifnames parameters
|
||||
new_kernelopts = re.sub(r'\s*net\.ifnames=[01]\s*', ' ', current_kernelopts)
|
||||
# Clean up multiple spaces
|
||||
new_kernelopts = re.sub(r'\s+', ' ', new_kernelopts).strip()
|
||||
# Add net.ifnames=1
|
||||
new_kernelopts = f"{new_kernelopts} net.ifnames=1"
|
||||
|
||||
logger.info(f"New kernelopts: {new_kernelopts}")
|
||||
|
||||
# Update grubenv with the new kernelopts
|
||||
logger.info("Setting kernelopts with net.ifnames=1...")
|
||||
output_editenv = g.command(['grub2-editenv', '-', 'set', f'kernelopts={new_kernelopts}'])
|
||||
logger.info("grub2-editenv output:")
|
||||
logger.info(output_editenv)
|
||||
else:
|
||||
# If we can't find existing kernelopts, use the default
|
||||
logger.warning("Could not find existing kernelopts, using default")
|
||||
output_editenv = g.command(['grub2-editenv', '-', 'set', 'kernelopts=console=tty0 no_timer_check biosdevname=0 resume=/dev/mapper/vg_main-lv_swap rd.lvm.lv=vg_main/lv_root rd.lvm.lv=vg_main/lv_swap net.ifnames=1 crashkernel=1G-64G:448M,64G-:512M'])
|
||||
logger.info("grub2-editenv output:")
|
||||
logger.info(output_editenv)
|
||||
|
||||
logger.info("Updating grubby with net.ifnames=1...")
|
||||
# First remove any existing net.ifnames arguments
|
||||
output_grubby_remove = g.command(['grubby', '--update-kernel=ALL', '--remove-args=net.ifnames=0 net.ifnames=1'])
|
||||
logger.info("grubby remove output:")
|
||||
logger.info(output_grubby_remove)
|
||||
|
||||
# Then add net.ifnames=1
|
||||
output_grubby_add = g.command(['grubby', '--update-kernel=ALL', '--args=net.ifnames=1'])
|
||||
logger.info("grubby add output:")
|
||||
logger.info(output_grubby_add)
|
||||
|
||||
logger.info("Updating GRUB configuration...")
|
||||
output_mkconfig = g.command(['grub2-mkconfig', '-o', '/boot/grub2/grub.cfg'])
|
||||
logger.info("GRUB update output:")
|
||||
logger.info(output_mkconfig)
|
||||
logger.info("Successfully updated GRUB configuration")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update GRUB configuration: {e}")
|
||||
raise RuntimeError(f"Failed to update GRUB configuration: {e}")
|
||||
|
||||
def configure_network_predictability(domain_name, image_path=None):
|
||||
"""
|
||||
Configure network interface predictability for a VM.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to configure
|
||||
image_path (str, optional): Path to the QCOW2 image
|
||||
|
||||
Raises:
|
||||
RuntimeError: If configuration fails
|
||||
"""
|
||||
# Check domain status
|
||||
check_domain_status(domain_name)
|
||||
|
||||
# Use default image path if none provided
|
||||
if not image_path:
|
||||
image_path = f"/nsm/libvirt/images/{domain_name}/{domain_name}.qcow2"
|
||||
|
||||
if not os.path.exists(image_path):
|
||||
logger.error(f"Image file not found: {image_path}")
|
||||
raise RuntimeError(f"Image file not found: {image_path}")
|
||||
|
||||
if not os.access(image_path, os.R_OK | os.W_OK):
|
||||
logger.error(f"Permission denied: Cannot access image file {image_path}")
|
||||
raise RuntimeError(f"Permission denied: Cannot access image file {image_path}")
|
||||
|
||||
logger.info(f"Configuring network predictability for domain: {domain_name}")
|
||||
logger.info(f"Using image: {image_path}")
|
||||
|
||||
g = guestfs.GuestFS(python_return_dict=True)
|
||||
try:
|
||||
logger.info("Initializing guestfs...")
|
||||
g.set_network(False)
|
||||
g.selinux = False
|
||||
g.add_drive_opts(image_path, format="qcow2")
|
||||
g.launch()
|
||||
|
||||
logger.info("Inspecting operating system...")
|
||||
roots = g.inspect_os()
|
||||
if not roots:
|
||||
raise RuntimeError("No operating system found in image")
|
||||
|
||||
root = roots[0]
|
||||
logger.info(f"Found root filesystem: {root}")
|
||||
logger.info(f"Operating system type: {g.inspect_get_type(root)}")
|
||||
logger.info(f"Operating system distro: {g.inspect_get_distro(root)}")
|
||||
logger.info(f"Operating system major version: {g.inspect_get_major_version(root)}")
|
||||
logger.info(f"Operating system minor version: {g.inspect_get_minor_version(root)}")
|
||||
|
||||
logger.info("Getting mount points...")
|
||||
mountpoints = g.inspect_get_mountpoints(root)
|
||||
logger.info(f"Found mount points: {mountpoints}")
|
||||
logger.info("Converting mount points to sortable list...")
|
||||
# Convert dictionary to list of tuples
|
||||
mountpoints = list(mountpoints.items())
|
||||
logger.info(f"Converted mount points: {mountpoints}")
|
||||
logger.info("Sorting mount points by path length for proper mount order...")
|
||||
mountpoints.sort(key=lambda m: len(m[0]))
|
||||
logger.info(f"Mount order will be: {[mp[0] for mp in mountpoints]}")
|
||||
|
||||
for mp_path, mp_device in mountpoints:
|
||||
try:
|
||||
logger.info(f"Attempting to mount {mp_device} at {mp_path}")
|
||||
g.mount(mp_device, mp_path)
|
||||
logger.info(f"Successfully mounted {mp_device} at {mp_path}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not mount {mp_device} at {mp_path}: {str(e)}")
|
||||
# Continue with other mounts
|
||||
|
||||
# Perform configuration steps
|
||||
bls_modified = modify_bls_entry(g)
|
||||
rules_removed = remove_persistent_net_rules(g)
|
||||
|
||||
if bls_modified or rules_removed:
|
||||
update_grub_config(g)
|
||||
logger.info("Network predictability configuration completed successfully")
|
||||
else:
|
||||
logger.info("No changes were necessary")
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to configure network predictability: {e}")
|
||||
finally:
|
||||
try:
|
||||
logger.info("Unmounting all filesystems...")
|
||||
g.umount_all()
|
||||
logger.info("Successfully unmounted all filesystems")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error unmounting filesystems: {e}")
|
||||
g.close()
|
||||
|
||||
def parse_arguments():
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Configure network interface predictability for Security Onion VMs"
|
||||
)
|
||||
parser.add_argument("-n", "--name", required=True,
|
||||
help="Domain name of the VM to configure")
|
||||
parser.add_argument("-I", "--image",
|
||||
help="Path to the QCOW2 image (optional)")
|
||||
return parser.parse_args()
|
||||
|
||||
def main():
|
||||
"""Main entry point for the script."""
|
||||
try:
|
||||
args = parse_arguments()
|
||||
configure_network_predictability(args.name, args.image)
|
||||
sys.exit(0)
|
||||
except RuntimeError as e:
|
||||
if "must not be running" in str(e):
|
||||
logger.error(str(e))
|
||||
sys.exit(2)
|
||||
elif "not found" in str(e):
|
||||
logger.error(str(e))
|
||||
sys.exit(3)
|
||||
elif "Permission denied" in str(e):
|
||||
logger.error(str(e))
|
||||
sys.exit(4)
|
||||
else:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except KeyboardInterrupt:
|
||||
logger.error("Operation cancelled by user")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user