mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-01-23 16:33:29 +01:00
Compare commits
710 Commits
2.4.160-20
...
2.4.190-20
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
33ada95bbc | ||
|
|
de9d3c9726 | ||
|
|
39572f36f4 | ||
|
|
0994cd515a | ||
|
|
bdcd1e099d | ||
|
|
c64760b5f4 | ||
|
|
d2aa60b961 | ||
|
|
83d615d236 | ||
|
|
e910de0a06 | ||
|
|
26b80aba38 | ||
|
|
ee617eeff4 | ||
|
|
463766782c | ||
|
|
d9f70898dd | ||
|
|
7e15c89510 | ||
|
|
ed5bd19f0e | ||
|
|
feba97738f | ||
|
|
348809bdbb | ||
|
|
ca0edb1cab | ||
|
|
0172f64f15 | ||
|
|
48f8944e3b | ||
|
|
3e22043ea6 | ||
|
|
e572b854b9 | ||
|
|
c8aad2b03b | ||
|
|
8773ebc3dc | ||
|
|
2baf2478da | ||
|
|
378d37d74e | ||
|
|
f8c8e5d8e5 | ||
|
|
dca38c286a | ||
|
|
860710f5f9 | ||
|
|
d56af4acab | ||
|
|
793e98f75c | ||
|
|
f9c5aa3fef | ||
|
|
254e782da6 | ||
|
|
fe3caf66a1 | ||
|
|
09d699432a | ||
|
|
79b44586ce | ||
|
|
feddd90e41 | ||
|
|
ca935e4272 | ||
|
|
8f75bfb0a4 | ||
|
|
e551c6e037 | ||
|
|
1c5a72ee85 | ||
|
|
8a8ea04088 | ||
|
|
f730e23e30 | ||
|
|
a3e7649a3c | ||
|
|
af42c31740 | ||
|
|
a22c9f6bcf | ||
|
|
bad9a16ebb | ||
|
|
7827e05c24 | ||
|
|
e45b0bf871 | ||
|
|
659c039ba8 | ||
|
|
c7edaac42a | ||
|
|
a1a8f75409 | ||
|
|
23e25fa2d7 | ||
|
|
f077484121 | ||
|
|
c16bf50493 | ||
|
|
564374a8fb | ||
|
|
4ab4264f77 | ||
|
|
60cccb21b4 | ||
|
|
39432198cc | ||
|
|
7af95317db | ||
|
|
8675193d1f | ||
|
|
ac0d6c57e1 | ||
|
|
3db6542398 | ||
|
|
9fd1b9aec1 | ||
|
|
e5563eb9b8 | ||
|
|
e8de9e3c26 | ||
|
|
c8a3603577 | ||
|
|
05321cf1ed | ||
|
|
7deef44ff6 | ||
|
|
9752d61699 | ||
|
|
6b8e2e2643 | ||
|
|
e3ac1dd1b4 | ||
|
|
86eca53d4b | ||
|
|
bfd3d822b1 | ||
|
|
030e4961d7 | ||
|
|
14bd92067b | ||
|
|
066e227325 | ||
|
|
f1cfb9cd91 | ||
|
|
5a2e704909 | ||
|
|
f04e54d1d5 | ||
|
|
e9af46a8cb | ||
|
|
b4b051908b | ||
|
|
0148e5638c | ||
|
|
c8814d0632 | ||
|
|
6c892fed78 | ||
|
|
e775299480 | ||
|
|
c4ca9c62aa | ||
|
|
c37aeff364 | ||
|
|
cdac49052f | ||
|
|
8e5fa9576c | ||
|
|
cd04d1e5a7 | ||
|
|
1fb558cc77 | ||
|
|
7f1b76912c | ||
|
|
3a2ceb0b6f | ||
|
|
1345756fce | ||
|
|
d81d9a0722 | ||
|
|
55074fda69 | ||
|
|
23e12811a1 | ||
|
|
5d1edf6d86 | ||
|
|
c836dd2acd | ||
|
|
3a87af805f | ||
|
|
328ac329ec | ||
|
|
a3401aad11 | ||
|
|
5a67b89a80 | ||
|
|
431f71cc82 | ||
|
|
4587301cca | ||
|
|
14ddbd32ad | ||
|
|
4599b95ae7 | ||
|
|
c92dc580a2 | ||
|
|
4666aa9818 | ||
|
|
f066baf6ba | ||
|
|
ba710c9944 | ||
|
|
198695af03 | ||
|
|
fec78f5fb5 | ||
|
|
d03dd7ac2d | ||
|
|
d2dd52b42a | ||
|
|
c9db52433f | ||
|
|
138849d258 | ||
|
|
a9ec12e402 | ||
|
|
87281efc24 | ||
|
|
29ac4f23c6 | ||
|
|
878a3f8962 | ||
|
|
21e27bce87 | ||
|
|
336ca0dbbd | ||
|
|
d9eba3cd0e | ||
|
|
81b7e2b420 | ||
|
|
cd5483623b | ||
|
|
faa112eddf | ||
|
|
f663f22628 | ||
|
|
8b07ff453d | ||
|
|
24a0fa3f6d | ||
|
|
a5011b398d | ||
|
|
5b70398c0a | ||
|
|
f3aaee1e41 | ||
|
|
d0e875928d | ||
|
|
3e16bc8335 | ||
|
|
c1d85493df | ||
|
|
e01d0f81ea | ||
|
|
376d0f3295 | ||
|
|
4418623f73 | ||
|
|
d1f4e26e29 | ||
|
|
5166db1caa | ||
|
|
ff5ad586af | ||
|
|
9e24d21282 | ||
|
|
5806999f63 | ||
|
|
4dae1afe0b | ||
|
|
456cad1ada | ||
|
|
063a2b3348 | ||
|
|
bcd2e95fbe | ||
|
|
94e8cd84e6 | ||
|
|
948d72c282 | ||
|
|
bdeb92ab05 | ||
|
|
fdb5ad810a | ||
|
|
f588a80ec7 | ||
|
|
562b7e54cb | ||
|
|
3c847bca8b | ||
|
|
ce2cc26224 | ||
|
|
f3c574679c | ||
|
|
5da3fed1ce | ||
|
|
e6bcf5db6b | ||
|
|
4d24c57903 | ||
|
|
0606c0a454 | ||
|
|
bb984e05e3 | ||
|
|
b35b0aaf2c | ||
|
|
62f04fa5dd | ||
|
|
d89df5f0dd | ||
|
|
f0c1922600 | ||
|
|
ab2cdd18ed | ||
|
|
889bb7ddf4 | ||
|
|
a959f90d0b | ||
|
|
a54cd004d6 | ||
|
|
5100032fbd | ||
|
|
0f235baa7e | ||
|
|
e5660b8c8e | ||
|
|
588a1b86d1 | ||
|
|
46f0afa24b | ||
|
|
a7651b2734 | ||
|
|
890f76e45c | ||
|
|
e6eecc93c8 | ||
|
|
8dc0f8d20e | ||
|
|
fbdc0c4705 | ||
|
|
d1a2b57aa2 | ||
|
|
f5ec1d4b7c | ||
|
|
0aa556e375 | ||
|
|
d9e86c15bc | ||
|
|
4107fa006f | ||
|
|
29980ea958 | ||
|
|
8f36d2ec00 | ||
|
|
10511b8431 | ||
|
|
2535ae953d | ||
|
|
2f68cd7483 | ||
|
|
6655276410 | ||
|
|
9f7bcb0f7d | ||
|
|
aa43177d8c | ||
|
|
12959d114c | ||
|
|
855b489c4b | ||
|
|
673f9cb544 | ||
|
|
0a3ff47008 | ||
|
|
834e34128d | ||
|
|
73776f8d11 | ||
|
|
120e61e45c | ||
|
|
fc2d450de0 | ||
|
|
cea4eaf081 | ||
|
|
b1753f86f9 | ||
|
|
6323fbf46b | ||
|
|
ba601c39b3 | ||
|
|
ec27517bdd | ||
|
|
624ec3c93e | ||
|
|
f318a84c18 | ||
|
|
8cca58dba9 | ||
|
|
6c196ea61a | ||
|
|
207572f2f9 | ||
|
|
4afc986f48 | ||
|
|
ba5d140d4b | ||
|
|
348f9dcaec | ||
|
|
915b9e7bd7 | ||
|
|
dfec29d18e | ||
|
|
38ef4a6046 | ||
|
|
a007fa6505 | ||
|
|
1a32a0897c | ||
|
|
e26310d172 | ||
|
|
c7cdb0b466 | ||
|
|
df0b484b45 | ||
|
|
2181cddf49 | ||
|
|
a2b6968cef | ||
|
|
285fbc2783 | ||
|
|
94c5a1fd98 | ||
|
|
19362fe5e5 | ||
|
|
a7a81e9825 | ||
|
|
31484d1158 | ||
|
|
f51cd008f2 | ||
|
|
a5675a79fe | ||
|
|
1ea7b3c09f | ||
|
|
d9127a288f | ||
|
|
ebb78bc9bd | ||
|
|
e5920b6465 | ||
|
|
153a99a002 | ||
|
|
69a5e1e2f5 | ||
|
|
0858160be2 | ||
|
|
ccd79c814d | ||
|
|
a8a01b8191 | ||
|
|
ac2c044a94 | ||
|
|
e10d00d114 | ||
|
|
cbdd369a18 | ||
|
|
b2e7f58b3d | ||
|
|
a6600b8762 | ||
|
|
5479d49379 | ||
|
|
304985b61e | ||
|
|
d6c725299b | ||
|
|
d99857002d | ||
|
|
2a6c74917e | ||
|
|
9f0bd4bad3 | ||
|
|
924b06976c | ||
|
|
1357f19e48 | ||
|
|
c91e9ea4e0 | ||
|
|
c2c96dad6e | ||
|
|
1a08833e77 | ||
|
|
d16dfcf4e8 | ||
|
|
b79c7b0540 | ||
|
|
9f45792217 | ||
|
|
d3108c3549 | ||
|
|
7d883cb5e0 | ||
|
|
ebd81c1df9 | ||
|
|
418dbee9fa | ||
|
|
cccc3bf625 | ||
|
|
a3e0072631 | ||
|
|
220e485312 | ||
|
|
67f8fca043 | ||
|
|
0e0ab8384c | ||
|
|
58228f70ca | ||
|
|
7968de06b4 | ||
|
|
87fdd90f56 | ||
|
|
65e7e56fbe | ||
|
|
424fdff934 | ||
|
|
f72996d9d1 | ||
|
|
d77556c672 | ||
|
|
c412e9bad2 | ||
|
|
87a28e8ce7 | ||
|
|
9ca0c7d53a | ||
|
|
2e94e452ed | ||
|
|
6a0d40ee0d | ||
|
|
0cebcf4432 | ||
|
|
ed0e24fcaf | ||
|
|
24be2f869b | ||
|
|
f8058a4a3a | ||
|
|
d0ba6df2fc | ||
|
|
95bee91b12 | ||
|
|
751b5bd556 | ||
|
|
77273449c9 | ||
|
|
46e1f1bc5c | ||
|
|
884bec7465 | ||
|
|
8d3220f94b | ||
|
|
9cb42911dc | ||
|
|
a3cc6f025e | ||
|
|
6fae4a9974 | ||
|
|
f7a1a3a172 | ||
|
|
292e1ad782 | ||
|
|
af1fe86586 | ||
|
|
97100cdfdd | ||
|
|
5f60ef1541 | ||
|
|
c7e7a0a871 | ||
|
|
f09eff530e | ||
|
|
50b34a116a | ||
|
|
42874fb0d0 | ||
|
|
482847187c | ||
|
|
a19b99268d | ||
|
|
3c5a03d7b6 | ||
|
|
c1a5c2b2d1 | ||
|
|
baf0f7ba95 | ||
|
|
ee27965314 | ||
|
|
d02093295b | ||
|
|
6381444fdc | ||
|
|
01b313868d | ||
|
|
3859ebd69c | ||
|
|
9753e431e3 | ||
|
|
b307667ae2 | ||
|
|
5d7dcbbcee | ||
|
|
281b395053 | ||
|
|
3518f39d39 | ||
|
|
ae0ffc4977 | ||
|
|
bc2f716c99 | ||
|
|
9617da1791 | ||
|
|
2ba5d7d64b | ||
|
|
437b9016ca | ||
|
|
c5db0a7195 | ||
|
|
82894d88b6 | ||
|
|
4a4146f515 | ||
|
|
59a4d0129f | ||
|
|
5cf2149218 | ||
|
|
453c32df0d | ||
|
|
1df10b80b2 | ||
|
|
9d96a11753 | ||
|
|
e9e3252bb5 | ||
|
|
930c8147e7 | ||
|
|
378ecad94c | ||
|
|
02299a6742 | ||
|
|
15cbc626c4 | ||
|
|
8720a4540a | ||
|
|
7b5980bfe5 | ||
|
|
ebfb670f6a | ||
|
|
c98042fa80 | ||
|
|
70181e3e08 | ||
|
|
adb1e01c7a | ||
|
|
cdb7f0602c | ||
|
|
d52e817dd5 | ||
|
|
07305d8799 | ||
|
|
fbf5bafae7 | ||
|
|
d49cd3cb85 | ||
|
|
b60b9e7743 | ||
|
|
26fd8562c5 | ||
|
|
84b38daf62 | ||
|
|
a0f9d5dc61 | ||
|
|
e8c25d157f | ||
|
|
214f4f0f0c | ||
|
|
7ae0369a3b | ||
|
|
2e5682f11c | ||
|
|
2e7cb0e362 | ||
|
|
56748ea6e7 | ||
|
|
621f03994c | ||
|
|
ab8ad72920 | ||
|
|
3fc244ee85 | ||
|
|
4728b96c51 | ||
|
|
f303363a73 | ||
|
|
2a166af524 | ||
|
|
ab4d055fd1 | ||
|
|
af49a8e4ef | ||
|
|
669d219fdc | ||
|
|
442aecb9f4 | ||
|
|
beda0bc89c | ||
|
|
64fd6bf979 | ||
|
|
1955434416 | ||
|
|
ab6a083fa8 | ||
|
|
eabca5df18 | ||
|
|
5dac3ff2a6 | ||
|
|
93024738d3 | ||
|
|
05a368681a | ||
|
|
246161018c | ||
|
|
f27714890a | ||
|
|
47831eb300 | ||
|
|
0b1f2252ee | ||
|
|
3ce6b555f7 | ||
|
|
c29f11863e | ||
|
|
952403b696 | ||
|
|
b3eb06f53e | ||
|
|
5198d0cdf0 | ||
|
|
e61e2f04b3 | ||
|
|
1aa876f4eb | ||
|
|
a3fb2f13be | ||
|
|
9e77eae71e | ||
|
|
cd5de5cd05 | ||
|
|
98a67530f5 | ||
|
|
58ffe576d7 | ||
|
|
b0a515f2c3 | ||
|
|
a037421809 | ||
|
|
6bb6c24641 | ||
|
|
617834a044 | ||
|
|
2c5c0e7830 | ||
|
|
81d2c52867 | ||
|
|
4f8bd16910 | ||
|
|
ab9d03bc2e | ||
|
|
10bf3e8fab | ||
|
|
f8108e93d5 | ||
|
|
3108556495 | ||
|
|
f97b2444e7 | ||
|
|
415f456661 | ||
|
|
e49b3fc260 | ||
|
|
9b125fbe53 | ||
|
|
10e3b32fed | ||
|
|
5386c07b66 | ||
|
|
7149d20b42 | ||
|
|
8a57b79b77 | ||
|
|
a4e8e7ea53 | ||
|
|
95ba327eb3 | ||
|
|
3056410fd1 | ||
|
|
bf8da60605 | ||
|
|
226f858866 | ||
|
|
317d7dea7d | ||
|
|
4e548ceb6e | ||
|
|
d846fe55e1 | ||
|
|
3b2942651e | ||
|
|
fa6f4100dd | ||
|
|
33e2d18aa7 | ||
|
|
a03764d956 | ||
|
|
3fb703cd22 | ||
|
|
f1cbe23f57 | ||
|
|
07a22a0b4b | ||
|
|
b9d813cef2 | ||
|
|
76ab0eac03 | ||
|
|
08a2ad2c40 | ||
|
|
47bbc9987e | ||
|
|
59628ec8b7 | ||
|
|
bef2fa9e8d | ||
|
|
d4f0cbcb67 | ||
|
|
9e96b12e94 | ||
|
|
42552810fb | ||
|
|
4bf2c931e9 | ||
|
|
beda6ac20d | ||
|
|
d8be6e42e1 | ||
|
|
4fb7fe9e45 | ||
|
|
6d7066c381 | ||
|
|
d003e1380f | ||
|
|
ef8badaef1 | ||
|
|
dea9c149d7 | ||
|
|
56c9fa3129 | ||
|
|
a86105294b | ||
|
|
33c23c30d3 | ||
|
|
fe76a79ebd | ||
|
|
5035ec2539 | ||
|
|
9f35b20664 | ||
|
|
b93c6c0270 | ||
|
|
e5dd403dd1 | ||
|
|
493359e5a2 | ||
|
|
b0f5218775 | ||
|
|
8fdc7049f9 | ||
|
|
d79d7e2ba1 | ||
|
|
596b3e2614 | ||
|
|
59f8544324 | ||
|
|
daaad3699c | ||
|
|
1e9f3a65a4 | ||
|
|
b2acf2f807 | ||
|
|
34e561f358 | ||
|
|
e5a07170b3 | ||
|
|
02dbbc5289 | ||
|
|
5e62d3ecb2 | ||
|
|
373ef9fe91 | ||
|
|
fbb6d8146a | ||
|
|
0602601655 | ||
|
|
480e248131 | ||
|
|
19fb081fa0 | ||
|
|
d3b1a4f928 | ||
|
|
4729e194a0 | ||
|
|
ab6060c484 | ||
|
|
0b65021f75 | ||
|
|
bd4f2093db | ||
|
|
48dfcab9f0 | ||
|
|
849f8f13bc | ||
|
|
07359ad6ec | ||
|
|
1e2453eddf | ||
|
|
4c9773c68d | ||
|
|
4666670f4f | ||
|
|
0f71b45e0f | ||
|
|
3efe0eac13 | ||
|
|
d9fb79403b | ||
|
|
2ef89be67d | ||
|
|
395c4e37ba | ||
|
|
6e1e617124 | ||
|
|
08d99a3890 | ||
|
|
b3c48674c5 | ||
|
|
40531dd919 | ||
|
|
05dfce62fb | ||
|
|
502e1e1f1b | ||
|
|
e5b12ecdb9 | ||
|
|
be5e41227f | ||
|
|
08f208cd38 | ||
|
|
18d899a7f9 | ||
|
|
b2650da057 | ||
|
|
31df0b5d7d | ||
|
|
a430a47a30 | ||
|
|
a32aac7111 | ||
|
|
b0a8191f59 | ||
|
|
28aedcf50b | ||
|
|
6988f03ebc | ||
|
|
9e0f13cce5 | ||
|
|
8c37a4454c | ||
|
|
ef436026d5 | ||
|
|
a595bc4b31 | ||
|
|
a167e5e520 | ||
|
|
26d7ceebb2 | ||
|
|
e5c0f8a46c | ||
|
|
5965459423 | ||
|
|
3a31d80a85 | ||
|
|
5a8e542f96 | ||
|
|
7a60afdd5a | ||
|
|
c3b3e0ab21 | ||
|
|
6246e25fbe | ||
|
|
102ddaf262 | ||
|
|
151db2af30 | ||
|
|
b2bd8577b9 | ||
|
|
4df3070a1d | ||
|
|
142609ea67 | ||
|
|
ed80c4e13b | ||
|
|
285d73d526 | ||
|
|
0bcb6040c9 | ||
|
|
07ef3d632c | ||
|
|
21bb325157 | ||
|
|
888ab162bd | ||
|
|
8ab38956d1 | ||
|
|
0f120f7500 | ||
|
|
f6a0e62853 | ||
|
|
cc0e91aa96 | ||
|
|
bf9f92b04e | ||
|
|
8f3664f26c | ||
|
|
445afca6ee | ||
|
|
3083e3bc63 | ||
|
|
9e16c03d25 | ||
|
|
b22fe5bd3d | ||
|
|
a60e55e5cd | ||
|
|
e7aa4428de | ||
|
|
64f71143dc | ||
|
|
7aad298720 | ||
|
|
4165b33995 | ||
|
|
f9bf4e4130 | ||
|
|
269919b980 | ||
|
|
2dc977ddd8 | ||
|
|
28c7362cfa | ||
|
|
c93a5de460 | ||
|
|
44a5b3b1e5 | ||
|
|
ae94722eda | ||
|
|
ae993c47c1 | ||
|
|
c784a6e440 | ||
|
|
c66cd3b2f3 | ||
|
|
f30938ed59 | ||
|
|
6c472dd383 | ||
|
|
2c5861a0c2 | ||
|
|
8047e196fe | ||
|
|
c6c979dc19 | ||
|
|
c8a1c8377a | ||
|
|
4e954c24f7 | ||
|
|
52839e2a7d | ||
|
|
1a9d5f151f | ||
|
|
d6f527881a | ||
|
|
5811b184be | ||
|
|
e0a3b51ca2 | ||
|
|
b5276a6a1d | ||
|
|
cc1b030c00 | ||
|
|
c896785480 | ||
|
|
0006948c29 | ||
|
|
6ac14f832e | ||
|
|
fd9a4966ec | ||
|
|
3246176c0a | ||
|
|
b68f561e6f | ||
|
|
8ffd4fc664 | ||
|
|
f46548ed88 | ||
|
|
0d335e3056 | ||
|
|
6ff701bd5c | ||
|
|
c34be5313d | ||
|
|
ec2fc0a5f2 | ||
|
|
ad54afe39a | ||
|
|
eb4cd75218 | ||
|
|
a84f5a1e32 | ||
|
|
e193347fb4 | ||
|
|
ad27c8674b | ||
|
|
5123a86062 | ||
|
|
010c205eec | ||
|
|
160c84ec1a | ||
|
|
924c0b63bd | ||
|
|
9b8dce0c77 | ||
|
|
7159678385 | ||
|
|
c8e232c598 | ||
|
|
a3013ff85b | ||
|
|
65c5abfa88 | ||
|
|
0114e36cfa | ||
|
|
5c56e0f498 | ||
|
|
61992ae787 | ||
|
|
08bbeedbd7 | ||
|
|
a5f2db8c80 | ||
|
|
8d1ce0460f | ||
|
|
3c85b48291 | ||
|
|
ea2e026c56 | ||
|
|
8b3f310212 | ||
|
|
87136e9e2b | ||
|
|
5a6a9d6ec2 | ||
|
|
d3b3a0eb8a | ||
|
|
91fc59cffc | ||
|
|
e32dbad0d0 | ||
|
|
b66aafd168 | ||
|
|
2cd0f69069 | ||
|
|
0177f641c8 | ||
|
|
b3969a6ce0 | ||
|
|
ab97d3b8b7 | ||
|
|
213df68d04 | ||
|
|
9db3cd901c | ||
|
|
64c9230423 | ||
|
|
17943ef0db | ||
|
|
8ed3f0b1cc | ||
|
|
7c50a5e17b | ||
|
|
c13c85bd2d | ||
|
|
ae01dc9639 | ||
|
|
a74ed0daf0 | ||
|
|
60387651d2 | ||
|
|
3a78be68d6 | ||
|
|
a896332db3 | ||
|
|
54eeb0e327 | ||
|
|
1f13554bd9 | ||
|
|
4cc3691489 | ||
|
|
24eadf2507 | ||
|
|
a274bfb744 | ||
|
|
2277c792b9 | ||
|
|
61f5614ac9 | ||
|
|
6367aed62a | ||
|
|
739f592061 | ||
|
|
116c2b73c1 | ||
|
|
58be7ae5db | ||
|
|
0e0fb885d2 | ||
|
|
e8546b82f8 | ||
|
|
837fbab96d | ||
|
|
cbd2d88000 | ||
|
|
01ac1cdcca | ||
|
|
161e8a6c21 | ||
|
|
2e3c1adc63 | ||
|
|
776afa4a36 | ||
|
|
3cac19d498 | ||
|
|
2ba8a87c9d | ||
|
|
d677dc51de | ||
|
|
ebbfcd169c | ||
|
|
574d2994d1 | ||
|
|
ecc5d64584 | ||
|
|
6888682f92 | ||
|
|
0197cdb33d | ||
|
|
3c59858f70 | ||
|
|
6f0161e9da | ||
|
|
f2bd735f51 | ||
|
|
7a8fd8c3e5 | ||
|
|
b24aa2f797 | ||
|
|
5e4f1fc279 | ||
|
|
e779d180f9 | ||
|
|
a84a32c075 | ||
|
|
5649986834 | ||
|
|
7eaa8d54dc | ||
|
|
61a1fbde6e | ||
|
|
a0a18973d8 | ||
|
|
efbf62f56a | ||
|
|
39391c8088 | ||
|
|
9ac5ef09ad | ||
|
|
3394588602 | ||
|
|
c64a05f2ff | ||
|
|
0c4426a55e | ||
|
|
feb700393e | ||
|
|
0476585370 | ||
|
|
dcc1738978 | ||
|
|
0b0ff62bc5 | ||
|
|
9f76371449 | ||
|
|
50bd8448cc | ||
|
|
0b326370bd | ||
|
|
d0963baad4 | ||
|
|
75e8c60fe2 | ||
|
|
e7ea27a1b3 | ||
|
|
aaa48f6a1a | ||
|
|
0766a5da91 | ||
|
|
267d1a27ac | ||
|
|
f5e6e49075 | ||
|
|
d44ce0a070 | ||
|
|
9ddccba780 | ||
|
|
301894f6e8 | ||
|
|
a425a7fda2 | ||
|
|
21c3835322 | ||
|
|
d110503639 | ||
|
|
64bf7eb363 | ||
|
|
205560cc95 | ||
|
|
7698243caf | ||
|
|
67f0934930 | ||
|
|
30e998edf7 | ||
|
|
2a35e45920 | ||
|
|
aa5de9f7bd | ||
|
|
f9eeb76518 | ||
|
|
957235a656 | ||
|
|
64a0c171f3 | ||
|
|
a28ac3bee6 | ||
|
|
3643303a51 | ||
|
|
81d407f0ff | ||
|
|
d29b0660f0 | ||
|
|
59b94177d6 | ||
|
|
9d2c5d54b0 | ||
|
|
a6f1a0245a | ||
|
|
fcf859ffed | ||
|
|
fe3f87e1fd | ||
|
|
5a24a7775e | ||
|
|
52e52f35f7 | ||
|
|
810be2c9d2 | ||
|
|
8e4777a5ff |
3
.github/.gitleaks.toml
vendored
3
.github/.gitleaks.toml
vendored
@@ -541,5 +541,6 @@ paths = [
|
||||
'''gitleaks.toml''',
|
||||
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
||||
'''(go.mod|go.sum)$''',
|
||||
'''salt/nginx/files/enterprise-attack.json'''
|
||||
'''salt/nginx/files/enterprise-attack.json''',
|
||||
'''(.*?)whl$'''
|
||||
]
|
||||
|
||||
3
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
3
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -29,6 +29,9 @@ body:
|
||||
- 2.4.141
|
||||
- 2.4.150
|
||||
- 2.4.160
|
||||
- 2.4.170
|
||||
- 2.4.180
|
||||
- 2.4.190
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,3 @@
|
||||
|
||||
# Created by https://www.gitignore.io/api/macos,windows
|
||||
# Edit at https://www.gitignore.io/?templates=macos,windows
|
||||
|
||||
@@ -67,4 +66,4 @@ __pycache__
|
||||
|
||||
# Analyzer dev/test config files
|
||||
*_dev.yaml
|
||||
site-packages
|
||||
site-packages
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
### 2.4.160-20250625 ISO image released on 2025/06/25
|
||||
### 2.4.190-20251024 ISO image released on 2025/10/24
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.160-20250625 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.160-20250625.iso
|
||||
2.4.190-20251024 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
|
||||
|
||||
MD5: 78CF5602EFFAB84174C56AD2826E6E4E
|
||||
SHA1: FC7EEC3EC95D97D3337501BAA7CA8CAE7C0E15EA
|
||||
SHA256: 0ED965E8BEC80EE16AE90A0F0F96A3046CEF2D92720A587278DDDE3B656C01C2
|
||||
MD5: 25358481FB876226499C011FC0710358
|
||||
SHA1: 0B26173C0CE136F2CA40A15046D1DFB78BCA1165
|
||||
SHA256: 4FD9F62EDA672408828B3C0C446FE5EA9FF3C4EE8488A7AB1101544A3C487872
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.160-20250625.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.160-20250625.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.160-20250625.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.160-20250625.iso.sig securityonion-2.4.160-20250625.iso
|
||||
gpg --verify securityonion-2.4.190-20251024.iso.sig securityonion-2.4.190-20251024.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Wed 25 Jun 2025 10:13:33 AM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Thu 23 Oct 2025 07:21:46 AM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
34
pillar/hypervisor/nodes.sls
Normal file
34
pillar/hypervisor/nodes.sls
Normal file
@@ -0,0 +1,34 @@
|
||||
{% set node_types = {} %}
|
||||
{% for minionid, ip in salt.saltutil.runner(
|
||||
'mine.get',
|
||||
tgt='G@role:so-hypervisor or G@role:so-managerhype',
|
||||
fun='network.ip_addrs',
|
||||
tgt_type='compound') | dictsort()
|
||||
%}
|
||||
|
||||
# only add a node to the pillar if it returned an ip from the mine
|
||||
{% if ip | length > 0%}
|
||||
{% set hostname = minionid.split('_') | first %}
|
||||
{% set node_type = minionid.split('_') | last %}
|
||||
{% if node_type not in node_types.keys() %}
|
||||
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||
{% else %}
|
||||
{% if hostname not in node_types[node_type] %}
|
||||
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||
{% else %}
|
||||
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
|
||||
hypervisor:
|
||||
nodes:
|
||||
{% for node_type, values in node_types.items() %}
|
||||
{{node_type}}:
|
||||
{% for hostname, ip in values.items() %}
|
||||
{{hostname}}:
|
||||
ip: {{ip}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
@@ -18,6 +18,7 @@ base:
|
||||
- telegraf.adv_telegraf
|
||||
- versionlock.soc_versionlock
|
||||
- versionlock.adv_versionlock
|
||||
- soc.license
|
||||
|
||||
'* and not *_desktop':
|
||||
- firewall.soc_firewall
|
||||
@@ -25,7 +26,12 @@ base:
|
||||
- nginx.soc_nginx
|
||||
- nginx.adv_nginx
|
||||
|
||||
'*_manager or *_managersearch':
|
||||
'salt-cloud:driver:libvirt':
|
||||
- match: grain
|
||||
- vm.soc_vm
|
||||
- vm.adv_vm
|
||||
|
||||
'*_manager or *_managersearch or *_managerhype':
|
||||
- match: compound
|
||||
- node_data.ips
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
@@ -44,7 +50,6 @@ base:
|
||||
- logstash.adv_logstash
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- kratos.soc_kratos
|
||||
@@ -70,6 +75,9 @@ base:
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
- hypervisor.nodes
|
||||
- hypervisor.soc_hypervisor
|
||||
- hypervisor.adv_hypervisor
|
||||
- stig.soc_stig
|
||||
|
||||
'*_sensor':
|
||||
@@ -87,7 +95,6 @@ base:
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
|
||||
'*_eval':
|
||||
- node_data.ips
|
||||
@@ -114,7 +121,6 @@ base:
|
||||
- idstools.adv_idstools
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
@@ -174,7 +180,6 @@ base:
|
||||
- manager.adv_manager
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- strelka.soc_strelka
|
||||
@@ -240,7 +245,6 @@ base:
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
@@ -258,8 +262,9 @@ base:
|
||||
- minions.adv_{{ grains.id }}
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- kafka.adv_kafka
|
||||
- soc.license
|
||||
- stig.soc_stig
|
||||
- elasticfleet.soc_elasticfleet
|
||||
- elasticfleet.adv_elasticfleet
|
||||
|
||||
'*_import':
|
||||
- node_data.ips
|
||||
@@ -283,7 +288,6 @@ base:
|
||||
- manager.adv_manager
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- soc.license
|
||||
- kibana.soc_kibana
|
||||
- kibana.adv_kibana
|
||||
- backup.soc_backup
|
||||
@@ -318,9 +322,15 @@ base:
|
||||
- elasticfleet.adv_elasticfleet
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
|
||||
'*_hypervisor':
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
|
||||
'*_desktop':
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
- soc.license
|
||||
|
||||
|
||||
91
salt/_modules/hypervisor.py
Normal file
91
salt/_modules/hypervisor.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/opt/saltstack/salt/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
"""
|
||||
Salt execution module for hypervisor operations.
|
||||
|
||||
This module provides functions for managing hypervisor configurations,
|
||||
including VM file management.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'hypervisor'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
"""
|
||||
Only load this module if we're on a system that can manage hypervisors.
|
||||
"""
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def remove_vm_from_vms_file(vms_file_path, vm_hostname, vm_role):
|
||||
"""
|
||||
Remove a VM entry from the hypervisorVMs file.
|
||||
|
||||
Args:
|
||||
vms_file_path (str): Path to the hypervisorVMs file
|
||||
vm_hostname (str): Hostname of the VM to remove (without role suffix)
|
||||
vm_role (str): Role of the VM
|
||||
|
||||
Returns:
|
||||
dict: Result dictionary with success status and message
|
||||
|
||||
CLI Example:
|
||||
salt '*' hypervisor.remove_vm_from_vms_file /opt/so/saltstack/local/salt/hypervisor/hosts/hypervisor1VMs node1 nsm
|
||||
"""
|
||||
try:
|
||||
# Check if file exists
|
||||
if not os.path.exists(vms_file_path):
|
||||
msg = f"VMs file not found: {vms_file_path}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
|
||||
# Read current VMs
|
||||
with open(vms_file_path, 'r') as f:
|
||||
content = f.read().strip()
|
||||
vms = json.loads(content) if content else []
|
||||
|
||||
# Find and remove the VM entry
|
||||
original_count = len(vms)
|
||||
vms = [vm for vm in vms if not (vm.get('hostname') == vm_hostname and vm.get('role') == vm_role)]
|
||||
|
||||
if len(vms) < original_count:
|
||||
# VM was found and removed, write back to file
|
||||
with open(vms_file_path, 'w') as f:
|
||||
json.dump(vms, f, indent=2)
|
||||
|
||||
# Set socore:socore ownership (939:939)
|
||||
os.chown(vms_file_path, 939, 939)
|
||||
|
||||
msg = f"Removed VM {vm_hostname}_{vm_role} from {vms_file_path}"
|
||||
log.info(msg)
|
||||
return {'result': True, 'comment': msg}
|
||||
else:
|
||||
msg = f"VM {vm_hostname}_{vm_role} not found in {vms_file_path}"
|
||||
log.warning(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
msg = f"Failed to parse JSON in {vms_file_path}: {str(e)}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
except Exception as e:
|
||||
msg = f"Failed to remove VM {vm_hostname}_{vm_role} from {vms_file_path}: {str(e)}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
335
salt/_modules/qcow2.py
Normal file
335
salt/_modules/qcow2.py
Normal file
@@ -0,0 +1,335 @@
|
||||
#!py
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
"""
|
||||
Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions
|
||||
for modifying network configurations within QCOW2 images, adjusting virtual machine hardware settings, and
|
||||
creating virtual storage volumes. It serves as a Salt interface to the so-qcow2-modify-network,
|
||||
so-kvm-modify-hardware, and so-kvm-create-volume scripts.
|
||||
|
||||
The module offers three main capabilities:
|
||||
1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images
|
||||
2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough)
|
||||
3. Volume Management: Create and attach virtual storage volumes for NSM data
|
||||
|
||||
This module is intended to work with Security Onion's virtualization infrastructure and is typically
|
||||
used in conjunction with salt-cloud for VM provisioning and management.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import subprocess
|
||||
import shlex
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'qcow2'
|
||||
|
||||
def __virtual__():
|
||||
return __virtualname__
|
||||
|
||||
def modify_network_config(image, interface, mode, vm_name, ip4=None, gw4=None, dns4=None, search4=None):
|
||||
'''
|
||||
Usage:
|
||||
salt '*' qcow2.modify_network_config image=<path> interface=<iface> mode=<mode> vm_name=<name> [ip4=<addr>] [gw4=<addr>] [dns4=<servers>] [search4=<domain>]
|
||||
|
||||
Options:
|
||||
image
|
||||
Path to the QCOW2 image file that will be modified
|
||||
interface
|
||||
Network interface name to configure (e.g., 'enp1s0')
|
||||
mode
|
||||
Network configuration mode, either 'dhcp4' or 'static4'
|
||||
vm_name
|
||||
Full name of the VM (hostname_role)
|
||||
ip4
|
||||
IPv4 address with CIDR notation (e.g., '192.168.1.10/24')
|
||||
Required when mode='static4'
|
||||
gw4
|
||||
IPv4 gateway address (e.g., '192.168.1.1')
|
||||
Required when mode='static4'
|
||||
dns4
|
||||
Comma-separated list of IPv4 DNS servers (e.g., '8.8.8.8,8.8.4.4')
|
||||
Optional for both DHCP and static configurations
|
||||
search4
|
||||
DNS search domain for IPv4 (e.g., 'example.local')
|
||||
Optional for both DHCP and static configurations
|
||||
|
||||
Examples:
|
||||
1. **Configure DHCP:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='dhcp4'
|
||||
```
|
||||
This configures enp1s0 to use DHCP for IP assignment
|
||||
|
||||
2. **Configure Static IP:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='static4' ip4='192.168.1.10/24' gw4='192.168.1.1' dns4='192.168.1.1,8.8.8.8' search4='example.local'
|
||||
```
|
||||
This sets a static IP configuration with DNS servers and search domain
|
||||
|
||||
Notes:
|
||||
- The QCOW2 image must be accessible and writable by the salt minion
|
||||
- The image should not be in use by a running VM when modified
|
||||
- Network changes take effect on next VM boot
|
||||
- Requires so-qcow2-modify-network script to be installed
|
||||
|
||||
Description:
|
||||
This function modifies network configuration within a QCOW2 image file by executing
|
||||
the so-qcow2-modify-network script. It supports both DHCP and static IPv4 configuration.
|
||||
The script mounts the image, modifies the network configuration files, and unmounts
|
||||
safely. All operations are logged for troubleshooting purposes.
|
||||
|
||||
Exit Codes:
|
||||
0: Success
|
||||
1: Invalid parameters or configuration
|
||||
2: Image access or mounting error
|
||||
3: Network configuration error
|
||||
4: System command error
|
||||
255: Unexpected error
|
||||
|
||||
Logging:
|
||||
- All operations are logged to the salt minion log
|
||||
- Log entries are prefixed with 'qcow2 module:'
|
||||
- Error conditions include detailed error messages and stack traces
|
||||
- Success/failure status is logged for verification
|
||||
'''
|
||||
|
||||
cmd = ['/usr/sbin/so-qcow2-modify-network', '-I', image, '-i', interface, '-n', vm_name]
|
||||
|
||||
if mode.lower() == 'dhcp4':
|
||||
cmd.append('--dhcp4')
|
||||
elif mode.lower() == 'static4':
|
||||
cmd.append('--static4')
|
||||
if not ip4 or not gw4:
|
||||
raise ValueError('Both ip4 and gw4 are required for static configuration.')
|
||||
cmd.extend(['--ip4', ip4, '--gw4', gw4])
|
||||
if dns4:
|
||||
cmd.extend(['--dns4', dns4])
|
||||
if search4:
|
||||
cmd.extend(['--search4', search4])
|
||||
else:
|
||||
raise ValueError("Invalid mode '{}'. Expected 'dhcp4' or 'static4'.".format(mode))
|
||||
|
||||
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
ret = {
|
||||
'retcode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
if result.returncode != 0:
|
||||
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||
else:
|
||||
log.info('qcow2 module: Script executed successfully.')
|
||||
return ret
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
|
||||
def modify_hardware_config(vm_name, cpu=None, memory=None, pci=None, start=False):
|
||||
'''
|
||||
Usage:
|
||||
salt '*' qcow2.modify_hardware_config vm_name=<name> [cpu=<count>] [memory=<size>] [pci=<id>] [pci=<id>] [start=<bool>]
|
||||
|
||||
Options:
|
||||
vm_name
|
||||
Name of the virtual machine to modify
|
||||
cpu
|
||||
Number of virtual CPUs to assign (positive integer)
|
||||
Optional - VM's current CPU count retained if not specified
|
||||
memory
|
||||
Amount of memory to assign in MiB (positive integer)
|
||||
Optional - VM's current memory size retained if not specified
|
||||
pci
|
||||
PCI hardware ID(s) to passthrough to the VM (e.g., '0000:c7:00.0')
|
||||
Can be specified multiple times for multiple devices
|
||||
Optional - no PCI passthrough if not specified
|
||||
start
|
||||
Boolean flag to start the VM after modification
|
||||
Optional - defaults to False
|
||||
|
||||
Examples:
|
||||
1. **Modify CPU and Memory:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=4 memory=8192
|
||||
```
|
||||
This assigns 4 CPUs and 8GB memory to the VM
|
||||
|
||||
2. **Enable PCI Passthrough:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_hardware_config vm_name='sensor1' pci='0000:c7:00.0' pci='0000:c4:00.0' start=True
|
||||
```
|
||||
This configures PCI passthrough and starts the VM
|
||||
|
||||
3. **Complete Hardware Configuration:**
|
||||
```bash
|
||||
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=8 memory=16384 pci='0000:c7:00.0' start=True
|
||||
```
|
||||
This sets CPU, memory, PCI passthrough, and starts the VM
|
||||
|
||||
Notes:
|
||||
- VM must be stopped before modification unless only the start flag is set
|
||||
- Memory is specified in MiB (1024 = 1GB)
|
||||
- PCI devices must be available and not in use by the host
|
||||
- CPU count should align with host capabilities
|
||||
- Requires so-kvm-modify-hardware script to be installed
|
||||
|
||||
Description:
|
||||
This function modifies the hardware configuration of a KVM virtual machine using
|
||||
the so-kvm-modify-hardware script. It can adjust CPU count, memory allocation,
|
||||
and PCI device passthrough. Changes are applied to the VM's libvirt configuration.
|
||||
The VM can optionally be started after modifications are complete.
|
||||
|
||||
Exit Codes:
|
||||
0: Success
|
||||
1: Invalid parameters
|
||||
2: VM state error (running when should be stopped)
|
||||
3: Hardware configuration error
|
||||
4: System command error
|
||||
255: Unexpected error
|
||||
|
||||
Logging:
|
||||
- All operations are logged to the salt minion log
|
||||
- Log entries are prefixed with 'qcow2 module:'
|
||||
- Hardware configuration changes are logged
|
||||
- Errors include detailed messages and stack traces
|
||||
- Final status of modification is logged
|
||||
'''
|
||||
|
||||
cmd = ['/usr/sbin/so-kvm-modify-hardware', '-v', vm_name]
|
||||
|
||||
if cpu is not None:
|
||||
if isinstance(cpu, int) and cpu > 0:
|
||||
cmd.extend(['-c', str(cpu)])
|
||||
else:
|
||||
raise ValueError('cpu must be a positive integer.')
|
||||
if memory is not None:
|
||||
if isinstance(memory, int) and memory > 0:
|
||||
cmd.extend(['-m', str(memory)])
|
||||
else:
|
||||
raise ValueError('memory must be a positive integer.')
|
||||
if pci:
|
||||
# Handle PCI IDs (can be a single device or comma-separated list)
|
||||
if isinstance(pci, str):
|
||||
devices = [dev.strip() for dev in pci.split(',') if dev.strip()]
|
||||
elif isinstance(pci, list):
|
||||
devices = pci
|
||||
else:
|
||||
devices = [pci]
|
||||
|
||||
# Add each device with its own -p flag
|
||||
for device in devices:
|
||||
cmd.extend(['-p', str(device)])
|
||||
if start:
|
||||
cmd.append('-s')
|
||||
|
||||
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
ret = {
|
||||
'retcode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
if result.returncode != 0:
|
||||
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||
else:
|
||||
log.info('qcow2 module: Script executed successfully.')
|
||||
return ret
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
|
||||
def create_volume_config(vm_name, size_gb, start=False):
|
||||
'''
|
||||
Usage:
|
||||
salt '*' qcow2.create_volume_config vm_name=<name> size_gb=<size> [start=<bool>]
|
||||
|
||||
Options:
|
||||
vm_name
|
||||
Name of the virtual machine to attach the volume to
|
||||
size_gb
|
||||
Volume size in GB (positive integer)
|
||||
This determines the capacity of the virtual storage volume
|
||||
start
|
||||
Boolean flag to start the VM after volume creation
|
||||
Optional - defaults to False
|
||||
|
||||
Examples:
|
||||
1. **Create 500GB Volume:**
|
||||
```bash
|
||||
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=500
|
||||
```
|
||||
This creates a 500GB virtual volume for NSM storage
|
||||
|
||||
2. **Create 1TB Volume and Start VM:**
|
||||
```bash
|
||||
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=1000 start=True
|
||||
```
|
||||
This creates a 1TB volume and starts the VM after attachment
|
||||
|
||||
Notes:
|
||||
- VM must be stopped before volume creation
|
||||
- Volume is created as a qcow2 image and attached to the VM
|
||||
- This is an alternative to disk passthrough via modify_hardware_config
|
||||
- Volume is automatically attached to the VM's libvirt configuration
|
||||
- Requires so-kvm-create-volume script to be installed
|
||||
- Volume files are stored in the hypervisor's VM storage directory
|
||||
|
||||
Description:
|
||||
This function creates and attaches a virtual storage volume to a KVM virtual machine
|
||||
using the so-kvm-create-volume script. It creates a qcow2 disk image of the specified
|
||||
size and attaches it to the VM for NSM (Network Security Monitoring) storage purposes.
|
||||
This provides an alternative to physical disk passthrough, allowing flexible storage
|
||||
allocation without requiring dedicated hardware. The VM can optionally be started
|
||||
after the volume is successfully created and attached.
|
||||
|
||||
Exit Codes:
|
||||
0: Success
|
||||
1: Invalid parameters
|
||||
2: VM state error (running when should be stopped)
|
||||
3: Volume creation error
|
||||
4: System command error
|
||||
255: Unexpected error
|
||||
|
||||
Logging:
|
||||
- All operations are logged to the salt minion log
|
||||
- Log entries are prefixed with 'qcow2 module:'
|
||||
- Volume creation and attachment operations are logged
|
||||
- Errors include detailed messages and stack traces
|
||||
- Final status of volume creation is logged
|
||||
'''
|
||||
|
||||
# Validate size_gb parameter
|
||||
if not isinstance(size_gb, int) or size_gb <= 0:
|
||||
raise ValueError('size_gb must be a positive integer.')
|
||||
|
||||
cmd = ['/usr/sbin/so-kvm-create-volume', '-v', vm_name, '-s', str(size_gb)]
|
||||
|
||||
if start:
|
||||
cmd.append('-S')
|
||||
|
||||
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
ret = {
|
||||
'retcode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
if result.returncode != 0:
|
||||
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||
else:
|
||||
log.info('qcow2 module: Script executed successfully.')
|
||||
return ret
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
1092
salt/_runners/setup_hypervisor.py
Normal file
1092
salt/_runners/setup_hypervisor.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,264 +1,180 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
|
||||
{% set saltversion = saltversion.salt.minion.version %}
|
||||
|
||||
{# this is the list we are returning from this map file, it gets built below #}
|
||||
{% set allowed_states= [] %}
|
||||
{# Define common state groups to reduce redundancy #}
|
||||
{% set base_states = [
|
||||
'common',
|
||||
'patch.os.schedule',
|
||||
'motd',
|
||||
'salt.minion-check',
|
||||
'sensoroni',
|
||||
'salt.lasthighstate',
|
||||
'salt.minion'
|
||||
] %}
|
||||
|
||||
{% set ssl_states = [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
] %}
|
||||
|
||||
{% set manager_states = [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility'
|
||||
] %}
|
||||
|
||||
{% set sensor_states = [
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'tcpreplay',
|
||||
'zeek',
|
||||
'strelka'
|
||||
] %}
|
||||
|
||||
{% set kafka_states = [
|
||||
'kafka'
|
||||
] %}
|
||||
|
||||
{% set stig_states = [
|
||||
'stig'
|
||||
] %}
|
||||
|
||||
{% set elastic_stack_states = [
|
||||
'elasticsearch',
|
||||
'elasticsearch.auth',
|
||||
'kibana',
|
||||
'kibana.secrets',
|
||||
'elastalert',
|
||||
'logstash',
|
||||
'redis'
|
||||
] %}
|
||||
|
||||
{# Initialize the allowed_states list #}
|
||||
{% set allowed_states = [] %}
|
||||
|
||||
{% if grains.saltversion | string == saltversion | string %}
|
||||
{# Map role-specific states #}
|
||||
{% set role_states = {
|
||||
'so-eval': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states +
|
||||
elastic_stack_states | reject('equalto', 'logstash') | list
|
||||
),
|
||||
'so-heavynode': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
|
||||
),
|
||||
'so-idh': (
|
||||
ssl_states +
|
||||
['idh']
|
||||
),
|
||||
'so-import': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
|
||||
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
|
||||
),
|
||||
'so-manager': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managerhype': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managersearch': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-searchnode': (
|
||||
ssl_states +
|
||||
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
|
||||
stig_states
|
||||
),
|
||||
'so-standalone': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
|
||||
sensor_states +
|
||||
stig_states +
|
||||
kafka_states +
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-sensor': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['nginx'] +
|
||||
stig_states
|
||||
),
|
||||
'so-fleet': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['logstash', 'nginx', 'healthcheck', 'elasticfleet']
|
||||
),
|
||||
'so-receiver': (
|
||||
ssl_states +
|
||||
kafka_states +
|
||||
stig_states +
|
||||
['logstash', 'redis']
|
||||
),
|
||||
'so-hypervisor': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['hypervisor', 'libvirt']
|
||||
),
|
||||
'so-desktop': (
|
||||
['ssl', 'docker_clean', 'telegraf'] +
|
||||
stig_states
|
||||
)
|
||||
} %}
|
||||
|
||||
{% set allowed_states= salt['grains.filter_by']({
|
||||
'so-eval': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'healthcheck',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'utility',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-heavynode': [
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'elasticagent',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-idh': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'idh',
|
||||
'schedule',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-import': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'influxdb',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'pcap',
|
||||
'utility',
|
||||
'suricata',
|
||||
'zeek',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry'
|
||||
],
|
||||
'so-manager': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-managersearch': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'strelka.manager',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elastic-fleet-package-registry',
|
||||
'elasticfleet',
|
||||
'firewall',
|
||||
'manager',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-searchnode': [
|
||||
'ssl',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka.ca',
|
||||
'kafka.ssl'
|
||||
],
|
||||
'so-standalone': [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ssl',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
'telegraf',
|
||||
'influxdb',
|
||||
'soc',
|
||||
'kratos',
|
||||
'hydra',
|
||||
'elastic-fleet-package-registry',
|
||||
'elasticfleet',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'utility',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig',
|
||||
'kafka'
|
||||
],
|
||||
'so-sensor': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'nginx',
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean',
|
||||
'stig'
|
||||
],
|
||||
'so-fleet': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'logstash',
|
||||
'nginx',
|
||||
'healthcheck',
|
||||
'schedule',
|
||||
'elasticfleet',
|
||||
'docker_clean'
|
||||
],
|
||||
'so-receiver': [
|
||||
'ssl',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
'docker_clean',
|
||||
'kafka',
|
||||
'stig'
|
||||
],
|
||||
'so-desktop': [
|
||||
'ssl',
|
||||
'docker_clean',
|
||||
'telegraf',
|
||||
'stig'
|
||||
],
|
||||
}, grain='role') %}
|
||||
|
||||
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('zeek') %}
|
||||
{%- endif %}
|
||||
|
||||
{% if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
{% do allowed_states.append('strelka') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch.auth') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('kibana') %}
|
||||
{% do allowed_states.append('kibana.secrets') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% do allowed_states.append('elastalert') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
||||
{% do allowed_states.append('logstash') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver', 'so-eval'] %}
|
||||
{% do allowed_states.append('redis') %}
|
||||
{% endif %}
|
||||
|
||||
{# all nodes on the right salt version can run the following states #}
|
||||
{% do allowed_states.append('common') %}
|
||||
{% do allowed_states.append('patch.os.schedule') %}
|
||||
{% do allowed_states.append('motd') %}
|
||||
{% do allowed_states.append('salt.minion-check') %}
|
||||
{% do allowed_states.append('sensoroni') %}
|
||||
{% do allowed_states.append('salt.lasthighstate') %}
|
||||
{# Get states for the current role #}
|
||||
{% if grains.role in role_states %}
|
||||
{% set allowed_states = role_states[grains.role] %}
|
||||
{% endif %}
|
||||
|
||||
{# Add base states that apply to all roles #}
|
||||
{% for state in base_states %}
|
||||
{% do allowed_states.append(state) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
{# Add airgap state if needed #}
|
||||
{% if ISAIRGAP %}
|
||||
{% do allowed_states.append('airgap') %}
|
||||
{% do allowed_states.append('airgap') %}
|
||||
{% endif %}
|
||||
|
||||
{# all nodes can always run salt.minion state #}
|
||||
{% do allowed_states.append('salt.minion') %}
|
||||
|
||||
@@ -11,6 +11,10 @@ TODAY=$(date '+%Y_%m_%d')
|
||||
BACKUPDIR={{ DESTINATION }}
|
||||
BACKUPFILE="$BACKUPDIR/so-config-backup-$TODAY.tar"
|
||||
MAXBACKUPS=7
|
||||
EXCLUSIONS=(
|
||||
"--exclude=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers"
|
||||
)
|
||||
|
||||
|
||||
# Create backup dir if it does not exist
|
||||
mkdir -p /nsm/backup
|
||||
@@ -23,7 +27,7 @@ if [ ! -f $BACKUPFILE ]; then
|
||||
|
||||
# Loop through all paths defined in global.sls, and append them to backup file
|
||||
{%- for LOCATION in BACKUPLOCATIONS %}
|
||||
tar -rf $BACKUPFILE {{ LOCATION }}
|
||||
tar -rf $BACKUPFILE "${EXCLUSIONS[@]}" {{ LOCATION }}
|
||||
{%- endfor %}
|
||||
|
||||
fi
|
||||
|
||||
21
salt/common/grains.sls
Normal file
21
salt/common/grains.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% set nsm_exists = salt['file.directory_exists']('/nsm') %}
|
||||
{% if nsm_exists %}
|
||||
{% set nsm_total = salt['cmd.shell']('df -BG /nsm | tail -1 | awk \'{print $2}\'') %}
|
||||
|
||||
nsm_total:
|
||||
grains.present:
|
||||
- name: nsm_total
|
||||
- value: {{ nsm_total }}
|
||||
|
||||
{% else %}
|
||||
|
||||
nsm_missing:
|
||||
test.succeed_without_changes:
|
||||
- name: /nsm does not exist, skipping grain assignment
|
||||
|
||||
{% endif %}
|
||||
@@ -4,6 +4,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- common.grains
|
||||
- common.packages
|
||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||
- manager.elasticsearch # needed for elastic_curl_config state
|
||||
@@ -106,7 +107,7 @@ Etc/UTC:
|
||||
timezone.system
|
||||
|
||||
# Sync curl configuration for Elasticsearch authentication
|
||||
{% if GLOBALS.role in ['so-eval', 'so-heavynode', 'so-import', 'so-manager', 'so-managersearch', 'so-searchnode', 'so-standalone'] %}
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-searchnode'] %}
|
||||
elastic_curl_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/curl.config
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
{% if GLOBALS.os_family == 'Debian' %}
|
||||
# we cannot import GLOBALS from vars/globals.map.jinja in this state since it is called in setup.virt.init
|
||||
# since it is early in setup of a new VM, the pillars imported in GLOBALS are not yet defined
|
||||
{% if grains.os_family == 'Debian' %}
|
||||
commonpkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: True
|
||||
@@ -46,7 +46,7 @@ python-rich:
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.os_family == 'RedHat' %}
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
|
||||
remove_mariadb:
|
||||
pkg.removed:
|
||||
|
||||
@@ -441,8 +441,7 @@ lookup_grain() {
|
||||
|
||||
lookup_role() {
|
||||
id=$(lookup_grain id)
|
||||
pieces=($(echo $id | tr '_' ' '))
|
||||
echo ${pieces[1]}
|
||||
echo "${id##*_}"
|
||||
}
|
||||
|
||||
is_feature_enabled() {
|
||||
|
||||
@@ -158,6 +158,8 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding index lifecycle policy" # false positive (elasticsearch policy names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
@@ -220,6 +222,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
|
||||
fi
|
||||
|
||||
RESULT=0
|
||||
@@ -266,6 +269,13 @@ for log_file in $(cat /tmp/log_check_files); do
|
||||
tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check
|
||||
check_for_errors
|
||||
done
|
||||
# Look for OOM specific errors in /var/log/messages which can lead to odd behavior / test failures
|
||||
if [[ -f /var/log/messages ]]; then
|
||||
status "Checking log file /var/log/messages"
|
||||
if journalctl --since "24 hours ago" | grep -iE 'out of memory|oom-kill'; then
|
||||
RESULT=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Cleanup temp files
|
||||
rm -f /tmp/log_check_files
|
||||
|
||||
53
salt/common/tools/sbin/so_logging_utils.py
Normal file
53
salt/common/tools/sbin/so_logging_utils.py
Normal file
@@ -0,0 +1,53 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
def setup_logging(logger_name, log_file_path, log_level=logging.INFO, format_str='%(asctime)s - %(levelname)s - %(message)s'):
|
||||
"""
|
||||
Sets up logging for a script.
|
||||
|
||||
Parameters:
|
||||
logger_name (str): The name of the logger.
|
||||
log_file_path (str): The file path for the log file.
|
||||
log_level (int): The logging level (e.g., logging.INFO, logging.DEBUG).
|
||||
format_str (str): The format string for log messages.
|
||||
|
||||
Returns:
|
||||
logging.Logger: Configured logger object.
|
||||
"""
|
||||
logger = logging.getLogger(logger_name)
|
||||
logger.setLevel(log_level)
|
||||
|
||||
# Create directory for log file if it doesn't exist
|
||||
log_file_dir = os.path.dirname(log_file_path)
|
||||
if log_file_dir and not os.path.exists(log_file_dir):
|
||||
try:
|
||||
os.makedirs(log_file_dir)
|
||||
except OSError as e:
|
||||
print(f"Error creating directory {log_file_dir}: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Create handlers
|
||||
c_handler = logging.StreamHandler()
|
||||
f_handler = logging.FileHandler(log_file_path)
|
||||
c_handler.setLevel(log_level)
|
||||
f_handler.setLevel(log_level)
|
||||
|
||||
# Create formatter and add it to handlers
|
||||
formatter = logging.Formatter(format_str)
|
||||
c_handler.setFormatter(formatter)
|
||||
f_handler.setFormatter(formatter)
|
||||
|
||||
# Add handlers to the logger if they are not already added
|
||||
if not logger.hasHandlers():
|
||||
logger.addHandler(c_handler)
|
||||
logger.addHandler(f_handler)
|
||||
|
||||
return logger
|
||||
@@ -173,7 +173,7 @@ for PCAP in $INPUT_FILES; do
|
||||
status "- assigning unique identifier to import: $HASH"
|
||||
|
||||
pcap_data=$(pcapinfo "${PCAP}")
|
||||
if ! echo "$pcap_data" | grep -q "First packet time:" || echo "$pcap_data" |egrep -q "Last packet time: 1970-01-01|Last packet time: n/a"; then
|
||||
if ! echo "$pcap_data" | grep -q "Earliest packet time:" || echo "$pcap_data" |egrep -q "Latest packet time: 1970-01-01|Latest packet time: n/a"; then
|
||||
status "- this PCAP file is invalid; skipping"
|
||||
INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1))
|
||||
else
|
||||
@@ -205,8 +205,8 @@ for PCAP in $INPUT_FILES; do
|
||||
HASHES="${HASHES} ${HASH}"
|
||||
fi
|
||||
|
||||
START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}')
|
||||
END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}')
|
||||
START=$(pcapinfo "${PCAP}" -a |grep "Earliest packet time:" | awk '{print $4}')
|
||||
END=$(pcapinfo "${PCAP}" -e |grep "Latest packet time:" | awk '{print $4}')
|
||||
status "- found PCAP data spanning dates $START through $END"
|
||||
|
||||
# compare $START to $START_OLDEST
|
||||
@@ -248,7 +248,7 @@ fi
|
||||
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
|
||||
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
|
||||
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
|
||||
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source.as.organization.name%20source.geo.country_name%20%7C%20groupby%20destination.as.organization.name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
|
||||
|
||||
status "Import complete!"
|
||||
status
|
||||
|
||||
@@ -0,0 +1,132 @@
|
||||
#!/opt/saltstack/salt/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) -%}
|
||||
|
||||
"""
|
||||
Script for emitting VM deployment status events to the Salt event bus.
|
||||
|
||||
This script provides functionality to emit status events for VM deployment operations,
|
||||
used by various Security Onion VM management tools.
|
||||
|
||||
Usage:
|
||||
so-salt-emit-vm-deployment-status-event -v <vm_name> -H <hypervisor> -s <status>
|
||||
|
||||
Arguments:
|
||||
-v, --vm-name Name of the VM (hostname_role)
|
||||
-H, --hypervisor Name of the hypervisor
|
||||
-s, --status Current deployment status of the VM
|
||||
|
||||
Example:
|
||||
so-salt-emit-vm-deployment-status-event -v sensor1_sensor -H hypervisor1 -s "Creating"
|
||||
"""
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
import logging
|
||||
import salt.client
|
||||
from typing import Dict, Any
|
||||
|
||||
# Configure logging
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
def emit_event(vm_name: str, hypervisor: str, status: str) -> bool:
|
||||
"""
|
||||
Emit a VM deployment status event to the salt event bus.
|
||||
|
||||
Args:
|
||||
vm_name: Name of the VM (hostname_role)
|
||||
hypervisor: Name of the hypervisor
|
||||
status: Current deployment status of the VM
|
||||
|
||||
Returns:
|
||||
bool: True if event was sent successfully, False otherwise
|
||||
|
||||
Raises:
|
||||
ValueError: If status is not a valid deployment status
|
||||
"""
|
||||
log.info("Attempting to emit deployment event...")
|
||||
|
||||
try:
|
||||
caller = salt.client.Caller()
|
||||
event_data = {
|
||||
'vm_name': vm_name,
|
||||
'hypervisor': hypervisor,
|
||||
'status': status
|
||||
}
|
||||
|
||||
# Use consistent event tag structure
|
||||
event_tag = f'soc/dyanno/hypervisor/{status.lower()}'
|
||||
|
||||
ret = caller.cmd(
|
||||
'event.send',
|
||||
event_tag,
|
||||
event_data
|
||||
)
|
||||
|
||||
if not ret:
|
||||
log.error("Failed to emit VM deployment status event: %s", event_data)
|
||||
return False
|
||||
|
||||
log.info("Successfully emitted VM deployment status event: %s", event_data)
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
log.error("Error emitting VM deployment status event: %s", str(e))
|
||||
return False
|
||||
|
||||
def parse_args():
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Emit VM deployment status events to the Salt event bus.'
|
||||
)
|
||||
parser.add_argument('-v', '--vm-name', required=True,
|
||||
help='Name of the VM (hostname_role)')
|
||||
parser.add_argument('-H', '--hypervisor', required=True,
|
||||
help='Name of the hypervisor')
|
||||
parser.add_argument('-s', '--status', required=True,
|
||||
help='Current deployment status of the VM')
|
||||
return parser.parse_args()
|
||||
|
||||
def main():
|
||||
"""Main entry point for the script."""
|
||||
try:
|
||||
args = parse_args()
|
||||
|
||||
success = emit_event(
|
||||
vm_name=args.vm_name,
|
||||
hypervisor=args.hypervisor,
|
||||
status=args.status
|
||||
)
|
||||
|
||||
if not success:
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
log.error("Failed to emit status event: %s", str(e))
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
{%- else -%}
|
||||
|
||||
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||
for more information about purchasing a license to enable this feature."
|
||||
|
||||
{% endif -%}
|
||||
@@ -9,3 +9,6 @@ fleetartifactdir:
|
||||
- user: 947
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
|
||||
@@ -9,6 +9,9 @@
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{% set node_data = salt['pillar.get']('node_data') %}
|
||||
|
||||
include:
|
||||
- elasticfleet.artifact_registry
|
||||
|
||||
# Add EA Group
|
||||
elasticfleetgroup:
|
||||
group.present:
|
||||
@@ -166,7 +169,7 @@ eaoptionalintegrationsdir:
|
||||
|
||||
{% for minion in node_data %}
|
||||
{% set role = node_data[minion]["role"] %}
|
||||
{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %}
|
||||
{% if role in [ "eval","fleet","heavynode","import","manager", "managerhype", "managersearch","standalone" ] %}
|
||||
{% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %}
|
||||
{% set integration_keys = optional_integrations.keys() %}
|
||||
fleet_server_integrations_{{ minion }}:
|
||||
|
||||
@@ -38,6 +38,7 @@ elasticfleet:
|
||||
- elasticsearch
|
||||
- endpoint
|
||||
- fleet_server
|
||||
- filestream
|
||||
- http_endpoint
|
||||
- httpjson
|
||||
- log
|
||||
|
||||
@@ -67,6 +67,8 @@ so-elastic-fleet-auto-configure-artifact-urls:
|
||||
elasticagent_syncartifacts:
|
||||
file.recurse:
|
||||
- name: /nsm/elastic-fleet/artifacts/beats
|
||||
- user: 947
|
||||
- group: 947
|
||||
- source: salt://beats
|
||||
{% endif %}
|
||||
|
||||
@@ -133,12 +135,18 @@ so-elastic-fleet-package-statefile:
|
||||
so-elastic-fleet-package-upgrade:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-package-upgrade
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
- onchanges:
|
||||
- file: /opt/so/state/elastic_fleet_packages.txt
|
||||
|
||||
so-elastic-fleet-integrations:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
|
||||
so-elastic-agent-grid-upgrade:
|
||||
cmd.run:
|
||||
@@ -150,7 +158,11 @@ so-elastic-agent-grid-upgrade:
|
||||
so-elastic-fleet-integration-upgrade:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
|
||||
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
|
||||
so-elastic-fleet-addon-integrations:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
|
||||
|
||||
@@ -1,32 +1,33 @@
|
||||
{
|
||||
"name": "elastic-defend-endpoints",
|
||||
"namespace": "default",
|
||||
"description": "",
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.17.0",
|
||||
"requires_root": true
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_id": "endpoints-initial",
|
||||
"vars": {},
|
||||
"inputs": [
|
||||
{
|
||||
"type": "endpoint",
|
||||
"enabled": true,
|
||||
"config": {
|
||||
"integration_config": {
|
||||
"value": {
|
||||
"type": "endpoint",
|
||||
"endpointConfig": {
|
||||
"preset": "DataCollection"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"streams": []
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
"name": "elastic-defend-endpoints",
|
||||
"namespace": "default",
|
||||
"description": "",
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.18.1",
|
||||
"requires_root": true
|
||||
},
|
||||
"enabled": true,
|
||||
"policy_ids": [
|
||||
"endpoints-initial"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": [
|
||||
{
|
||||
"type": "ENDPOINT_INTEGRATION_CONFIG",
|
||||
"enabled": true,
|
||||
"config": {
|
||||
"_config": {
|
||||
"value": {
|
||||
"type": "endpoint",
|
||||
"endpointConfig": {
|
||||
"preset": "DataCollection"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"streams": []
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "agent-monitor",
|
||||
"namespace": "",
|
||||
"description": "",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"output_id": null,
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/agents/agent-monitor.log"
|
||||
],
|
||||
"data_stream.dataset": "agentmonitor",
|
||||
"pipeline": "elasticagent.monitor",
|
||||
"parsers": "",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: gridmetrics",
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": true,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": 64,
|
||||
"file_identity_native": false,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -40,7 +40,7 @@
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/elasticsearch/*.log"
|
||||
"/opt/so/log/elasticsearch/*.json"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
],
|
||||
"data_stream.dataset": "idh",
|
||||
"tags": [],
|
||||
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
||||
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
||||
"custom": "pipeline: common"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"custom": "",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.67.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-2.5.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.67.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.67.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-2.5.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"tags": [
|
||||
"import"
|
||||
]
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
"tcp-tcp": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"tcp.generic": {
|
||||
"tcp.tcp": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"listen_address": "0.0.0.0",
|
||||
@@ -23,7 +23,8 @@
|
||||
"syslog"
|
||||
],
|
||||
"syslog_options": "field: message\n#format: auto\n#timezone: Local",
|
||||
"ssl": ""
|
||||
"ssl": "",
|
||||
"custom": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
|
||||
{% import_json '/opt/so/state/esfleet_package_components.json' as ADDON_PACKAGE_COMPONENTS %}
|
||||
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
|
||||
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||
|
||||
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
|
||||
@@ -14,6 +15,7 @@
|
||||
'awsfirehose.logs': 'awsfirehose',
|
||||
'awsfirehose.metrics': 'aws.cloudwatch',
|
||||
'cribl.logs': 'cribl',
|
||||
'cribl.metrics': 'cribl',
|
||||
'sentinel_one_cloud_funnel.logins': 'sentinel_one_cloud_funnel.login',
|
||||
'azure_application_insights.app_insights': 'azure.app_insights',
|
||||
'azure_application_insights.app_state': 'azure.app_state',
|
||||
@@ -45,7 +47,10 @@
|
||||
'synthetics.browser_screenshot': 'synthetics-browser.screenshot',
|
||||
'synthetics.http': 'synthetics-http',
|
||||
'synthetics.icmp': 'synthetics-icmp',
|
||||
'synthetics.tcp': 'synthetics-tcp'
|
||||
'synthetics.tcp': 'synthetics-tcp',
|
||||
'swimlane.swimlane_api': 'swimlane.api',
|
||||
'swimlane.tenant_api': 'swimlane.tenant',
|
||||
'swimlane.turbine_api': 'turbine.api'
|
||||
} %}
|
||||
|
||||
{% for pkg in ADDON_PACKAGE_COMPONENTS %}
|
||||
@@ -62,70 +67,90 @@
|
||||
{% else %}
|
||||
{% set integration_type = "" %}
|
||||
{% endif %}
|
||||
{% set component_name = pkg.name ~ "." ~ pattern.title %}
|
||||
{# fix weirdly named components #}
|
||||
{% if component_name in WEIRD_INTEGRATIONS %}
|
||||
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
||||
{% endif %}
|
||||
{% set component_name = pkg.name ~ "." ~ pattern.title %}
|
||||
{% set index_pattern = pattern.name %}
|
||||
|
||||
{# fix weirdly named components #}
|
||||
{% if component_name in WEIRD_INTEGRATIONS %}
|
||||
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
||||
{% endif %}
|
||||
|
||||
{# create duplicate of component_name, so we can split generics from @custom component templates in the index template below and overwrite the default @package when needed
|
||||
eg. having to replace unifiedlogs.generic@package with filestream.generic@package, but keep the ability to customize unifiedlogs.generic@custom and its ILM policy #}
|
||||
{% set custom_component_name = component_name %}
|
||||
|
||||
{# duplicate integration_type to assist with sometimes needing to overwrite component templates with 'logs-filestream.generic@package' (there is no metrics-filestream.generic@package) #}
|
||||
{% set generic_integration_type = integration_type %}
|
||||
|
||||
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
||||
{% set component_name_x = component_name.replace(".","_x_") %}
|
||||
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
||||
{% set integration_key = "so-" ~ integration_type ~ component_name_x %}
|
||||
|
||||
{# if its a .generic template make sure that a .generic@package for the integration exists. Else default to logs-filestream.generic@package #}
|
||||
{% if ".generic" in component_name and integration_type ~ component_name ~ "@package" not in INSTALLED_COMPONENT_TEMPLATES %}
|
||||
{# these generic templates by default are directed to index_pattern of 'logs-generic-*', overwrite that here to point to eg gcp_pubsub.generic-* #}
|
||||
{% set index_pattern = integration_type ~ component_name ~ "-*" %}
|
||||
{# includes use of .generic component template, but it doesn't exist in installed component templates. Redirect it to filestream.generic@package #}
|
||||
{% set component_name = "filestream.generic" %}
|
||||
{% set generic_integration_type = "logs-" %}
|
||||
{% endif %}
|
||||
|
||||
{# Default integration settings #}
|
||||
{% set integration_defaults = {
|
||||
"index_sorting": false,
|
||||
"index_template": {
|
||||
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||
"data_stream": {
|
||||
"allow_custom_routing": false,
|
||||
"hidden": false
|
||||
},
|
||||
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
|
||||
"index_patterns": [pattern.name],
|
||||
"priority": 501,
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
|
||||
"number_of_replicas": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 0}
|
||||
},
|
||||
"min_age": "60d"
|
||||
"index_sorting": false,
|
||||
"index_template": {
|
||||
"composed_of": [generic_integration_type ~ component_name ~ "@package", integration_type ~ custom_component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||
"data_stream": {
|
||||
"allow_custom_routing": false,
|
||||
"hidden": false
|
||||
},
|
||||
"ignore_missing_component_templates": [integration_type ~ custom_component_name ~ "@custom"],
|
||||
"index_patterns": [index_pattern],
|
||||
"priority": 501,
|
||||
"template": {
|
||||
"settings": {
|
||||
"index": {
|
||||
"lifecycle": {"name": "so-" ~ integration_type ~ custom_component_name ~ "-logs"},
|
||||
"number_of_replicas": 0
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"policy": {
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 0}
|
||||
},
|
||||
"min_age": "60d"
|
||||
},
|
||||
"delete": {
|
||||
"actions": {
|
||||
"delete": {}
|
||||
},
|
||||
"min_age": "365d"
|
||||
},
|
||||
"hot": {
|
||||
"actions": {
|
||||
"rollover": {
|
||||
"max_age": "30d",
|
||||
"max_primary_shard_size": "50gb"
|
||||
},
|
||||
"set_priority": {"priority": 100}
|
||||
},
|
||||
"delete": {
|
||||
"actions": {
|
||||
"delete": {}
|
||||
},
|
||||
"min_age": "365d"
|
||||
},
|
||||
"hot": {
|
||||
"actions": {
|
||||
"rollover": {
|
||||
"max_age": "30d",
|
||||
"max_primary_shard_size": "50gb"
|
||||
},
|
||||
"set_priority": {"priority": 100}
|
||||
},
|
||||
"min_age": "0ms"
|
||||
},
|
||||
"warm": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 50}
|
||||
},
|
||||
"min_age": "30d"
|
||||
}
|
||||
}
|
||||
}
|
||||
} %}
|
||||
"min_age": "0ms"
|
||||
},
|
||||
"warm": {
|
||||
"actions": {
|
||||
"set_priority": {"priority": 50}
|
||||
},
|
||||
"min_age": "30d"
|
||||
}
|
||||
}
|
||||
}
|
||||
} %}
|
||||
|
||||
{% do ADDON_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
@@ -23,6 +23,13 @@ fi
|
||||
# Define a banner to separate sections
|
||||
banner="========================================================================="
|
||||
|
||||
fleet_api() {
|
||||
local QUERYPATH=$1
|
||||
shift
|
||||
|
||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --retry-delay 10 --fail 2>/dev/null
|
||||
}
|
||||
|
||||
elastic_fleet_integration_check() {
|
||||
|
||||
AGENT_POLICY=$1
|
||||
@@ -39,7 +46,9 @@ elastic_fleet_integration_create() {
|
||||
|
||||
JSON_STRING=$1
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +65,10 @@ elastic_fleet_integration_remove() {
|
||||
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
||||
)
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies/delete" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo "Error: Unable to delete '$NAME' from '$AGENT_POLICY'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_update() {
|
||||
@@ -65,7 +77,9 @@ elastic_fleet_integration_update() {
|
||||
|
||||
JSON_STRING=$2
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPUT -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_upgrade() {
|
||||
@@ -77,101 +91,116 @@ elastic_fleet_integration_policy_upgrade() {
|
||||
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
||||
)
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies/upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
elastic_fleet_package_version_check() {
|
||||
PACKAGE=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version'
|
||||
|
||||
if output=$(fleet_api "epm/packages/$PACKAGE"); then
|
||||
echo "$output" | jq -r '.item.version'
|
||||
else
|
||||
echo "Error: Failed to get current package version for '$PACKAGE'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_package_latest_version_check() {
|
||||
PACKAGE=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.latestVersion'
|
||||
if output=$(fleet_api "epm/packages/$PACKAGE"); then
|
||||
if version=$(jq -e -r '.item.latestVersion' <<< $output); then
|
||||
echo "$version"
|
||||
fi
|
||||
else
|
||||
echo "Error: Failed to get latest version for '$PACKAGE'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_package_install() {
|
||||
PKG=$1
|
||||
VERSION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}' "localhost:5601/api/fleet/epm/packages/$PKG/$VERSION"
|
||||
if ! fleet_api "epm/packages/$PKG/$VERSION" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}'; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_bulk_package_install() {
|
||||
BULK_PKG_LIST=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$1 "localhost:5601/api/fleet/epm/packages/_bulk"
|
||||
}
|
||||
|
||||
elastic_fleet_package_is_installed() {
|
||||
PACKAGE=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status'
|
||||
}
|
||||
|
||||
elastic_fleet_installed_packages() {
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' -H 'Content-Type: application/json' "localhost:5601/api/fleet/epm/packages/installed?perPage=500"
|
||||
}
|
||||
|
||||
elastic_fleet_agent_policy_ids() {
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].id
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
if ! fleet_api "epm/packages/_bulk" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$BULK_PKG_LIST; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_agent_policy_names() {
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].name
|
||||
if [ $? -ne 0 ]; then
|
||||
elastic_fleet_installed_packages() {
|
||||
if ! fleet_api "epm/packages/installed?perPage=500"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_agent_policy_ids() {
|
||||
if output=$(fleet_api "agent_policies"); then
|
||||
echo "$output" | jq -r .items[].id
|
||||
else
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_names() {
|
||||
AGENT_POLICY=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r .item.package_policies[].name
|
||||
if [ $? -ne 0 ]; then
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
echo "$output" | jq -r .item.package_policies[].name
|
||||
else
|
||||
echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_package_name() {
|
||||
AGENT_POLICY=$1
|
||||
INTEGRATION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
|
||||
if [ $? -ne 0 ]; then
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
|
||||
else
|
||||
echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_package_version() {
|
||||
AGENT_POLICY=$1
|
||||
INTEGRATION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version'
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve package version for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||
exit 1
|
||||
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< "$output"); then
|
||||
echo "$version"
|
||||
fi
|
||||
else
|
||||
echo "Error: Failed to retrieve integration version for '$INTEGRATION' in policy '$AGENT_POLICY'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_id() {
|
||||
AGENT_POLICY=$1
|
||||
INTEGRATION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
|
||||
if [ $? -ne 0 ]; then
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
|
||||
else
|
||||
echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_dryrun_upgrade() {
|
||||
INTEGRATION_ID=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -L -X POST "localhost:5601/api/fleet/package_policies/upgrade/dryrun" -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"
|
||||
if [ $? -ne 0 ]; then
|
||||
if ! fleet_api "package_policies/upgrade/dryrun" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -XPOST -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"; then
|
||||
echo "Error: Failed to complete dry run for '$INTEGRATION_ID'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -180,25 +209,18 @@ elastic_fleet_policy_create() {
|
||||
NAME=$1
|
||||
DESC=$2
|
||||
FLEETSERVER=$3
|
||||
TIMEOUT=$4
|
||||
TIMEOUT=$4
|
||||
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg NAME "$NAME" \
|
||||
--arg DESC "$DESC" \
|
||||
--arg TIMEOUT $TIMEOUT \
|
||||
--arg FLEETSERVER "$FLEETSERVER" \
|
||||
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
|
||||
)
|
||||
--arg NAME "$NAME" \
|
||||
--arg DESC "$DESC" \
|
||||
--arg TIMEOUT $TIMEOUT \
|
||||
--arg FLEETSERVER "$FLEETSERVER" \
|
||||
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
|
||||
)
|
||||
# Create Fleet Policy
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "agent_policies" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
elastic_fleet_policy_update() {
|
||||
|
||||
POLICYID=$1
|
||||
JSON_STRING=$2
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
ERROR=false
|
||||
# Manage Elastic Defend Integration for Initial Endpoints Policy
|
||||
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
|
||||
do
|
||||
@@ -15,9 +16,20 @@ do
|
||||
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
|
||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
||||
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
|
||||
echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}"
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ "$ERROR" == "true" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
@@ -25,5 +25,9 @@ for POLICYNAME in $POLICY; do
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Now update the integration policy using the modified JSON
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"; then
|
||||
# exit 1 on failure to update fleet integration policies, let salt handle retries
|
||||
echo "Failed to update $POLICYNAME.."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
@@ -13,11 +13,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
/usr/sbin/so-elastic-fleet-package-upgrade
|
||||
|
||||
# Second, update Fleet Server policies
|
||||
/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
|
||||
|
||||
# Third, configure Elastic Defend Integration seperately
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
||||
|
||||
# Initial Endpoints
|
||||
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
|
||||
do
|
||||
@@ -25,10 +24,18 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -39,10 +46,18 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ "$RETURN_CODE" != "1" ]]; then
|
||||
@@ -56,11 +71,19 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
if [ "$NAME" != "elasticsearch-logs" ]; then
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
@@ -77,11 +100,19 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
if [ "$NAME" != "elasticsearch-logs" ]; then
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -24,23 +24,39 @@ fi
|
||||
|
||||
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
|
||||
|
||||
ERROR=false
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
|
||||
# this script upgrades default integration packages, exit 1 and let salt handle retrying
|
||||
exit 1
|
||||
fi
|
||||
for INTEGRATION in $integrations; do
|
||||
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
|
||||
# Get package name so we know what package to look for when checking the current and latest available version
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then
|
||||
exit 1
|
||||
fi
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||
{%- endif %}
|
||||
# Get currently installed version of package
|
||||
PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION")
|
||||
|
||||
# Get latest available version of package
|
||||
AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME")
|
||||
attempt=0
|
||||
max_attempts=3
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
if PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION") && AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME"); then
|
||||
break
|
||||
fi
|
||||
attempt=$((attempt + 1))
|
||||
done
|
||||
if [ $attempt -eq $max_attempts ]; then
|
||||
echo "Error: Failed getting $PACKAGE_VERSION or $AVAILABLE_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Get integration ID
|
||||
INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
|
||||
if ! INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION"); then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
|
||||
# Dry run of the upgrade
|
||||
@@ -48,20 +64,23 @@ for AGENT_POLICY in $agent_policies; do
|
||||
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
|
||||
echo "Upgrading $INTEGRATION..."
|
||||
echo "Starting dry run..."
|
||||
DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
|
||||
if ! DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID"); then
|
||||
exit 1
|
||||
fi
|
||||
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
|
||||
|
||||
# If no errors with dry run, proceed with actual upgrade
|
||||
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
|
||||
echo "No errors detected. Proceeding with upgrade..."
|
||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
||||
if [ $? -ne 0 ]; then
|
||||
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
|
||||
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
|
||||
exit 1
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
else
|
||||
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
|
||||
exit 1
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
@@ -70,4 +89,7 @@ for AGENT_POLICY in $agent_policies; do
|
||||
fi
|
||||
done
|
||||
done
|
||||
if [[ "$ERROR" == "true" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
|
||||
@@ -19,6 +19,7 @@ BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json
|
||||
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
|
||||
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
|
||||
PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
|
||||
COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json
|
||||
|
||||
PENDING_UPDATE=false
|
||||
|
||||
@@ -61,9 +62,17 @@ default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.l
|
||||
in_use_integrations=()
|
||||
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
|
||||
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
|
||||
# skip the agent policy if we can't get required info, let salt retry. Integrations loaded by this script are non-default integrations.
|
||||
echo "Skipping $AGENT_POLICY.. "
|
||||
continue
|
||||
fi
|
||||
for INTEGRATION in $integrations; do
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then
|
||||
echo "Not adding $INTEGRATION, couldn't get package name"
|
||||
continue
|
||||
fi
|
||||
# non-default integrations that are in-use in any policy
|
||||
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||
in_use_integrations+=("$PACKAGE_NAME")
|
||||
@@ -147,14 +156,38 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
done <<< "$(jq -c '.packages[]' "$INSTALLED_PACKAGE_LIST")"
|
||||
|
||||
if [ "$PENDING_UPDATE" = true ]; then
|
||||
# Run bulk install of packages
|
||||
elastic_fleet_bulk_package_install $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_OUTPUT
|
||||
# Run chunked install of packages
|
||||
echo "" > $BULK_INSTALL_OUTPUT
|
||||
pkg_group=1
|
||||
pkg_filename="${BULK_INSTALL_PACKAGE_LIST%.json}"
|
||||
|
||||
jq -c '.packages | _nwise(25)' $BULK_INSTALL_PACKAGE_LIST | while read -r line; do
|
||||
echo "$line" | jq '{ "packages": . }' > "${pkg_filename}_${pkg_group}.json"
|
||||
pkg_group=$((pkg_group + 1))
|
||||
done
|
||||
|
||||
for file in "${pkg_filename}_"*.json; do
|
||||
[ -e "$file" ] || continue
|
||||
if ! elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT; then
|
||||
# integrations loaded my this script are non-essential and shouldn't cause exit, skip them for now next highstate run can retry
|
||||
echo "Failed to complete a chunk of bulk package installs -- $file "
|
||||
continue
|
||||
fi
|
||||
done
|
||||
# cleanup any temp files for chunked package install
|
||||
rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST
|
||||
else
|
||||
echo "Elastic integrations don't appear to need installation/updating..."
|
||||
fi
|
||||
# Write out file for generating index/component/ilm templates
|
||||
latest_installed_package_list=$(elastic_fleet_installed_packages)
|
||||
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
||||
if latest_installed_package_list=$(elastic_fleet_installed_packages); then
|
||||
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
||||
fi
|
||||
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
|
||||
# Refresh installed component template list
|
||||
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
|
||||
echo $latest_component_templates_list > $COMPONENT_TEMPLATES
|
||||
fi
|
||||
|
||||
else
|
||||
# This is the installation of add-on integrations and upgrade of existing integrations. Exiting without error, next highstate will attempt to re-run.
|
||||
|
||||
@@ -15,22 +15,49 @@ if ! is_manager_node; then
|
||||
fi
|
||||
|
||||
function update_logstash_outputs() {
|
||||
# Generate updated JSON payload
|
||||
JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
|
||||
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl')
|
||||
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
|
||||
fi
|
||||
fi
|
||||
|
||||
# Update Logstash Outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
}
|
||||
function update_kafka_outputs() {
|
||||
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
|
||||
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
|
||||
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
|
||||
if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
# Update policy when fleet has secrets enabled
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
# Update policy when fleet has secrets disabled or policy hasn't been force updated
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
fi
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
else
|
||||
printf "Failed to get current Kafka output policy..."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||
|
||||
@@ -10,8 +10,16 @@
|
||||
|
||||
{%- for PACKAGE in SUPPORTED_PACKAGES %}
|
||||
echo "Setting up {{ PACKAGE }} package..."
|
||||
VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}")
|
||||
elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
|
||||
if VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}"); then
|
||||
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then
|
||||
# packages loaded by this script should never fail to install and REQUIRED before an installation of SO can be considered successful
|
||||
echo -e "\nERROR: Failed to install default integration package -- $PACKAGE $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
{%- endfor %}
|
||||
echo
|
||||
|
||||
@@ -10,8 +10,15 @@
|
||||
|
||||
{%- for PACKAGE in SUPPORTED_PACKAGES %}
|
||||
echo "Upgrading {{ PACKAGE }} package..."
|
||||
VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}")
|
||||
elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
|
||||
if VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}"); then
|
||||
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then
|
||||
# exit 1 on failure to upgrade a default package, allow salt to handle retries
|
||||
echo -e "\nERROR: Failed to upgrade $PACKAGE to version: $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
|
||||
fi
|
||||
echo
|
||||
{%- endfor %}
|
||||
echo
|
||||
|
||||
@@ -23,18 +23,17 @@ if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ALIASES=".fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest"
|
||||
for ALIAS in ${ALIASES}
|
||||
do
|
||||
ALIASES=(.fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest)
|
||||
for ALIAS in "${ALIASES[@]}"; do
|
||||
# Get all concrete indices from alias
|
||||
INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]')
|
||||
|
||||
# Delete all resolved indices
|
||||
for INDX in ${INDXS}
|
||||
do
|
||||
if INDXS_RAW=$(curl -sK /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" --fail 2>/dev/null); then
|
||||
INDXS=$(echo "$INDXS_RAW" | jq -r '.aliases[].indices[]')
|
||||
# Delete all resolved indices
|
||||
for INDX in ${INDXS}; do
|
||||
status "Deleting $INDX"
|
||||
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
|
||||
done
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
# Restarting Kibana...
|
||||
@@ -51,22 +50,61 @@ if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
fi
|
||||
|
||||
printf "\n### Create ES Token ###\n"
|
||||
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
|
||||
if ESTOKEN_RAW=$(fleet_api "service_tokens" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
ESTOKEN=$(echo "$ESTOKEN_RAW" | jq -r .value)
|
||||
else
|
||||
echo -e "\nFailed to create ES token..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
### Create Outputs, Fleet Policy and Fleet URLs ###
|
||||
# Create the Manager Elasticsearch Output first and set it as the default output
|
||||
printf "\nAdd Manager Elasticsearch Output...\n"
|
||||
ESCACRT=$(openssl x509 -in $INTCA)
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg ESCACRT "$ESCACRT" \
|
||||
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
ESCACRT=$(openssl x509 -in "$INTCA" -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]')
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg ESCACRT "$ESCACRT" \
|
||||
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ca_trusted_fingerprint": $ESCACRT}')
|
||||
|
||||
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to create so-elasticsearch_manager policy..."
|
||||
exit 1
|
||||
fi
|
||||
printf "\n\n"
|
||||
|
||||
# so-manager_elasticsearch should exist and be disabled. Now update it before checking its the only default policy
|
||||
MANAGER_OUTPUT_ENABLED=$(echo "$JSON_STRING" | jq 'del(.id) | .is_default = true | .is_default_monitoring = true')
|
||||
if ! curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$MANAGER_OUTPUT_ENABLED"; then
|
||||
echo -e "\n failed to update so-manager_elasticsearch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# At this point there should only be two policies. fleet-default-output & so-manager_elasticsearch
|
||||
status "Verifying so-manager_elasticsearch policy is configured as the current default"
|
||||
|
||||
# Grab the fleet-default-output policy instead of so-manager_elasticsearch, because a weird state can exist where both fleet-default-output & so-elasticsearch_manager can be set as the active default output for logs / metrics. Resulting in logs not ingesting on import/eval nodes
|
||||
if DEFAULTPOLICY=$(fleet_api "outputs/fleet-default-output"); then
|
||||
fleet_default=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default')
|
||||
fleet_default_monitoring=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default_monitoring')
|
||||
# Check that fleet-default-output isn't configured as a default for anything ( both variables return false )
|
||||
if [[ $fleet_default == "false" ]] && [[ $fleet_default_monitoring == "false" ]]; then
|
||||
echo -e "\nso-manager_elasticsearch is configured as the current default policy..."
|
||||
else
|
||||
echo -e "\nVerification of so-manager_elasticsearch policy failed... The default 'fleet-default-output' output is still active..."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# fleet-output-policy is created automatically by fleet when started. Should always exist on any installation type
|
||||
echo -e "\nDefault fleet-default-output policy doesn't exist...\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the Manager Fleet Server Host Agent Policy
|
||||
# This has to be done while the Elasticsearch Output is set to the default Output
|
||||
printf "Create Manager Fleet Server Policy...\n"
|
||||
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
|
||||
if ! elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"; then
|
||||
echo -e "\n Failed to create Manager fleet server policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Modify the default integration policy to update the policy_id with the correct naming
|
||||
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
|
||||
@@ -74,7 +112,10 @@ UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Add the Fleet Server Integration to the new Fleet Policy
|
||||
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
|
||||
if ! elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"; then
|
||||
echo -e "\nFailed to create Fleet server integration for Manager.."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Now we can create the Logstash Output and set it to to be the default Output
|
||||
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
|
||||
@@ -86,9 +127,12 @@ JSON_STRING=$( jq -n \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]},"proxy_id":null}'
|
||||
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }},"proxy_id":null}'
|
||||
)
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to create logstash fleet output"
|
||||
exit 1
|
||||
fi
|
||||
printf "\n\n"
|
||||
{%- endif %}
|
||||
|
||||
@@ -106,7 +150,10 @@ else
|
||||
fi
|
||||
|
||||
## This array replaces whatever URLs are currently configured
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/fleet_server_hosts" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "fleet_server_hosts" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to add manager fleet URL"
|
||||
exit 1
|
||||
fi
|
||||
printf "\n\n"
|
||||
|
||||
### Create Policies & Associated Integration Configuration ###
|
||||
@@ -117,13 +164,22 @@ printf "\n\n"
|
||||
/usr/sbin/so-elasticsearch-templates-load
|
||||
|
||||
# Initial Endpoints Policy
|
||||
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
|
||||
if ! elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"; then
|
||||
echo -e "\nFailed to create endpoints-initial policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Grid Nodes - General Policy
|
||||
elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"
|
||||
if ! elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"; then
|
||||
echo -e "\nFailed to create so-grid-nodes_general policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Grid Nodes - Heavy Node Policy
|
||||
elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"
|
||||
if ! elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"; then
|
||||
echo -e "\nFailed to create so-grid-nodes_heavy policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Load Integrations for default policies
|
||||
so-elastic-fleet-integration-policy-load
|
||||
@@ -135,14 +191,34 @@ JSON_STRING=$( jq -n \
|
||||
'{"name":$NAME,"host":$URL,"is_default":true}'
|
||||
)
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "agent_download_sources" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to update Elastic Agent artifact URL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
### Finalization ###
|
||||
|
||||
# Query for Enrollment Tokens for default policies
|
||||
ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||
GRIDNODESENROLLMENTOKENGENERAL=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
|
||||
GRIDNODESENROLLMENTOKENHEAVY=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
|
||||
if ENDPOINTSENROLLMENTOKEN_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
ENDPOINTSENROLLMENTOKEN=$(echo "$ENDPOINTSENROLLMENTOKEN_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||
else
|
||||
echo -e "\nFailed to query for Endpoints enrollment token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if GRIDNODESENROLLMENTOKENGENERAL_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
GRIDNODESENROLLMENTOKENGENERAL=$(echo "$GRIDNODESENROLLMENTOKENGENERAL_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
|
||||
else
|
||||
echo -e "\nFailed to query for Grid nodes - General enrollment token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if GRIDNODESENROLLMENTOKENHEAVY_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
GRIDNODESENROLLMENTOKENHEAVY=$(echo "$GRIDNODESENROLLMENTOKENHEAVY_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
|
||||
else
|
||||
echo -e "\nFailed to query for Grid nodes - Heavy enrollment token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Store needed data in minion pillar
|
||||
pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
|
||||
|
||||
@@ -5,46 +5,78 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-managerhype'] %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
force=false
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-f|--force)
|
||||
force=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option $1"
|
||||
echo "Usage: $0 [-f|--force]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check to make sure that Kibana API is up & ready
|
||||
RETURN_CODE=0
|
||||
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
||||
RETURN_CODE=$?
|
||||
|
||||
if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
|
||||
exit 1
|
||||
echo -e "\nKibana API not accessible, can't setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
|
||||
if ! echo "$output" | grep -q "so-manager_kafka"; then
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
# Create a new output policy for Kafka. Default is disabled 'is_default: false & is_default_monitoring: false'
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 10 }, "topics":[{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
|
||||
)
|
||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
|
||||
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
|
||||
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
else
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 0
|
||||
fi
|
||||
elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null) && [[ "$force" == "true" ]]; then
|
||||
# force an update to Kafka policy. Keep the current value of Kafka output policy (enabled/disabled).
|
||||
ENABLED_DISABLED=$(echo "$kafka_output" | jq -e .item.is_default)
|
||||
HOSTS=$(echo "$kafka_output" | jq -r '.item.hosts')
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
--argjson HOSTS "$HOSTS" \
|
||||
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
else
|
||||
echo -e "\nForced update to Elastic Fleet output policy for Kafka...\n"
|
||||
fi
|
||||
|
||||
elif echo "$output" | grep -q "so-manager_kafka"; then
|
||||
else
|
||||
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
|
||||
fi
|
||||
{% else %}
|
||||
|
||||
@@ -28,7 +28,7 @@
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %}
|
||||
{% if grains.id.split('_') | last in ['manager','managerhype','managersearch','standalone'] %}
|
||||
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
|
||||
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
elasticsearch:
|
||||
enabled: false
|
||||
version: 8.17.3
|
||||
version: 8.18.8
|
||||
index_clean: true
|
||||
config:
|
||||
action:
|
||||
@@ -284,6 +284,86 @@ elasticsearch:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-assistant-chat:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- assistant-chat-mappings
|
||||
- assistant-chat-settings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-assistant-chat*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
dynamic_templates:
|
||||
- strings_as_keyword:
|
||||
mapping:
|
||||
ignore_above: 1024
|
||||
type: keyword
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-assistant-chat-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 1s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-assistant-session:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- assistant-session-mappings
|
||||
- assistant-session-settings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-assistant-session*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
dynamic_templates:
|
||||
- strings_as_keyword:
|
||||
mapping:
|
||||
ignore_above: 1024
|
||||
type: keyword
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-assistant-session-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 1s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-endgame:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -567,6 +647,7 @@ elasticsearch:
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- winlog-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -1242,6 +1323,68 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-elastic-agent-monitor:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- event-mappings
|
||||
- so-elastic-agent-monitor
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
index_patterns:
|
||||
- logs-agentmonitor-*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-elastic-agent-monitor-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
number_of_replicas: 0
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-elastic_agent_x_apm_server:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -1848,6 +1991,70 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-elasticsearch_x_server:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- logs-elasticsearch.server@package
|
||||
- logs-elasticsearch.server@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates:
|
||||
- logs-elasticsearch.server@custom
|
||||
index_patterns:
|
||||
- logs-elasticsearch.server-*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-logs-elasticsearch.server-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
number_of_replicas: 0
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-endpoint_x_actions:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -3874,6 +4081,7 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -3987,6 +4195,7 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -4028,7 +4237,7 @@ elasticsearch:
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 1d
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
@@ -4100,6 +4309,7 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -4329,6 +4539,7 @@ elasticsearch:
|
||||
- zeek-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
@@ -4501,6 +4712,14 @@ elasticsearch:
|
||||
- data
|
||||
- remote_cluster_client
|
||||
- transform
|
||||
so-managerhype:
|
||||
config:
|
||||
node:
|
||||
roles:
|
||||
- master
|
||||
- data
|
||||
- remote_cluster_client
|
||||
- transform
|
||||
so-managersearch:
|
||||
config:
|
||||
node:
|
||||
|
||||
@@ -204,7 +204,7 @@ so-elasticsearch-roles-load:
|
||||
- docker_container: so-elasticsearch
|
||||
- file: elasticsearch_sbin_jinja
|
||||
|
||||
{% if grains.role in ['so-managersearch', 'so-manager'] %}
|
||||
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
|
||||
{% set ap = "absent" %}
|
||||
{% endif %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||
|
||||
@@ -26,7 +26,7 @@
|
||||
{
|
||||
"geoip": {
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination_geo",
|
||||
"target_field": "destination.as",
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true,
|
||||
@@ -36,13 +36,17 @@
|
||||
{
|
||||
"geoip": {
|
||||
"field": "source.ip",
|
||||
"target_field": "source_geo",
|
||||
"target_field": "source.as",
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true,
|
||||
"properties": ["ip", "asn", "organization_name", "network"]
|
||||
}
|
||||
},
|
||||
{ "rename": { "field": "destination.as.organization_name", "target_field": "destination.as.organization.name", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "rename": { "field": "source.as.organization_name", "target_field": "source.as.organization.name", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "rename": { "field": "destination.as.asn", "target_field": "destination.as.number", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "rename": { "field": "source.as.asn", "target_field": "source.as.number", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "set": { "if": "ctx.event?.severity == 1", "field": "event.severity_label", "value": "low", "override": true } },
|
||||
{ "set": { "if": "ctx.event?.severity == 2", "field": "event.severity_label", "value": "medium", "override": true } },
|
||||
{ "set": { "if": "ctx.event?.severity == 3", "field": "event.severity_label", "value": "high", "override": true } },
|
||||
|
||||
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"convert": {
|
||||
"field": "_ingest._value",
|
||||
"type": "ip",
|
||||
"target_field": "_ingest._temp_ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "temp._valid_ips",
|
||||
"allow_duplicates": false,
|
||||
"value": [
|
||||
"{{{_ingest._temp_ip}}}"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
36
salt/elasticsearch/files/ingest/elasticagent.monitor
Normal file
36
salt/elasticsearch/files/ingest/elasticagent.monitor
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "gridmetrics.agents",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.module",
|
||||
"value": "gridmetrics",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"host",
|
||||
"elastic_agent",
|
||||
"agent"
|
||||
],
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"add_to_root": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -23,8 +23,9 @@
|
||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'", "field": "event.module", "value":"elasticsearch" }},
|
||||
{"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
|
||||
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint'","description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -107,61 +107,61 @@
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-firewall",
|
||||
"name": "logs-pfsense.log-1.23.1-firewall",
|
||||
"if": "ctx.event.provider == 'filterlog'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-openvpn",
|
||||
"name": "logs-pfsense.log-1.23.1-openvpn",
|
||||
"if": "ctx.event.provider == 'openvpn'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-ipsec",
|
||||
"name": "logs-pfsense.log-1.23.1-ipsec",
|
||||
"if": "ctx.event.provider == 'charon'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-dhcp",
|
||||
"name": "logs-pfsense.log-1.23.1-dhcp",
|
||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-unbound",
|
||||
"name": "logs-pfsense.log-1.23.1-unbound",
|
||||
"if": "ctx.event.provider == 'unbound'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-haproxy",
|
||||
"name": "logs-pfsense.log-1.23.1-haproxy",
|
||||
"if": "ctx.event.provider == 'haproxy'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-php-fpm",
|
||||
"name": "logs-pfsense.log-1.23.1-php-fpm",
|
||||
"if": "ctx.event.provider == 'php-fpm'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-squid",
|
||||
"name": "logs-pfsense.log-1.23.1-squid",
|
||||
"if": "ctx.event.provider == 'squid'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-snort",
|
||||
"name": "logs-pfsense.log-1.23.1-snort",
|
||||
"if": "ctx.event.provider == 'snort'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.21.0-suricata",
|
||||
"name": "logs-pfsense.log-1.23.1-suricata",
|
||||
"if": "ctx.event.provider == 'suricata'"
|
||||
}
|
||||
},
|
||||
@@ -358,14 +358,6 @@
|
||||
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": "event.original",
|
||||
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "global@custom",
|
||||
@@ -24,6 +24,10 @@
|
||||
{ "rename": { "field": "message2.resp_cc", "target_field": "server.country_code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.sensorname", "target_field": "observer.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4l", "target_field": "hash.ja4l", "ignore_missing" : true, "if": "ctx.message2?.ja4l != null && ctx.message2.ja4l.length() > 0" }},
|
||||
{ "rename": { "field": "message2.ja4ls", "target_field": "hash.ja4ls", "ignore_missing" : true, "if": "ctx.message2?.ja4ls != null && ctx.message2.ja4ls.length() > 0" }},
|
||||
{ "rename": { "field": "message2.ja4t", "target_field": "hash.ja4t", "ignore_missing" : true, "if": "ctx.message2?.ja4t != null && ctx.message2.ja4t.length() > 0" }},
|
||||
{ "rename": { "field": "message2.ja4ts", "target_field": "hash.ja4ts", "ignore_missing" : true, "if": "ctx.message2?.ja4ts != null && ctx.message2.ja4ts.length() > 0" }},
|
||||
{ "script": { "lang": "painless", "source": "ctx.network.bytes = (ctx.client.bytes + ctx.server.bytes)", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.connection?.state == 'S0'", "field": "connection.state_description", "value": "Connection attempt seen, no reply" } },
|
||||
{ "set": { "if": "ctx.connection?.state == 'S1'", "field": "connection.state_description", "value": "Connection established, not terminated" } },
|
||||
|
||||
@@ -21,7 +21,10 @@
|
||||
{ "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||
{ "script": { "lang": "painless", "if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null", "source": "def ips = []; for (item in ctx.dns.answers.name) { if (item =~ /^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$/ || item =~ /^([a-fA-F0-9:]+:+)+[a-fA-F0-9]+$/) { ips.add(item); } } ctx.dns.resolved_ip = ips;" } },
|
||||
{ "foreach": {"field": "dns.answers.name","processor": {"pipeline": {"name": "common.ip_validation"}},"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null","ignore_failure": true}},
|
||||
{ "foreach": {"field": "temp._valid_ips","processor": {"append": {"field": "dns.resolved_ip","allow_duplicates": false,"value": "{{{_ingest._value}}}","ignore_failure": true}},"ignore_failure": true}},
|
||||
{ "script": { "source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }","ignore_failure": true }},
|
||||
{ "remove": {"field": ["temp"], "ignore_missing": true ,"ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
{ "rename": { "field": "message2.resp_fuids", "target_field": "log.id.resp_fuids", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4h", "target_field": "hash.ja4h", "ignore_missing": true, "if": "ctx?.message2?.ja4h != null && ctx.message2.ja4h.length() > 0" } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.useragent_length = ctx.useragent.length()", "ignore_failure": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.virtual_host_length = ctx.virtual_host.length()", "ignore_failure": true } },
|
||||
|
||||
@@ -27,6 +27,7 @@
|
||||
{ "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.stream_id", "target_field": "http2.stream_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4h", "target_field": "hash.ja4h", "ignore_missing": true, "if": "ctx?.message2?.ja4h != null && ctx.message2.ja4h.length() > 0" } },
|
||||
{ "remove": { "field": "message2.tags", "ignore_failure": true } },
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
|
||||
|
||||
10
salt/elasticsearch/files/ingest/zeek.ja4ssh
Normal file
10
salt/elasticsearch/files/ingest/zeek.ja4ssh
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"description": "zeek.ja4ssh",
|
||||
"processors": [
|
||||
{"set": {"field": "event.dataset","value": "ja4ssh"}},
|
||||
{"remove": {"field": "host","ignore_missing": true,"ignore_failure": true}},
|
||||
{"json": {"field": "message","target_field": "message2","ignore_failure": true}},
|
||||
{"rename": {"field": "message2.ja4ssh", "target_field": "hash.ja4ssh", "ignore_missing": true, "if": "ctx?.message2?.ja4ssh != null && ctx.message2.ja4ssh.length() > 0" }},
|
||||
{"pipeline": {"name": "zeek.common"}}
|
||||
]
|
||||
}
|
||||
@@ -23,6 +23,8 @@
|
||||
{ "rename": { "field": "message2.validation_status","target_field": "ssl.validation_status", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja3", "target_field": "hash.ja3", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja3s", "target_field": "hash.ja3s", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4", "target_field": "hash.ja4", "ignore_missing": true, "if": "ctx?.message2?.ja4 != null && ctx.message2.ja4.length() > 0" } },
|
||||
{ "rename": { "field": "message2.ja4s", "target_field": "hash.ja4s", "ignore_missing": true, "if": "ctx?.message2?.ja4s != null && ctx.message2.ja4s.length() > 0" } },
|
||||
{ "foreach":
|
||||
{
|
||||
"if": "ctx?.tls?.client?.hash?.sha256 !=null",
|
||||
|
||||
@@ -42,6 +42,7 @@
|
||||
{ "dot_expander": { "field": "basic_constraints.path_length", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.basic_constraints.path_length", "target_field": "x509.basic_constraints.path_length", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.fingerprint", "target_field": "hash.sha256", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ja4x", "target_field": "hash.ja4x", "ignore_missing": true, "if": "ctx?.message2?.ja4x != null && ctx.message2.ja4x.length() > 0" } },
|
||||
{ "pipeline": { "name": "zeek.common_ssl" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -20,8 +20,28 @@ appender.rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.rolling.strategy.action.type = Delete
|
||||
appender.rolling.strategy.action.basepath = /var/log/elasticsearch
|
||||
appender.rolling.strategy.action.condition.type = IfFileName
|
||||
appender.rolling.strategy.action.condition.glob = *.gz
|
||||
appender.rolling.strategy.action.condition.glob = *.log.gz
|
||||
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
|
||||
appender.rolling.strategy.action.condition.nested_condition.age = 7D
|
||||
|
||||
appender.rolling_json.type = RollingFile
|
||||
appender.rolling_json.name = rolling_json
|
||||
appender.rolling_json.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.json
|
||||
appender.rolling_json.layout.type = ECSJsonLayout
|
||||
appender.rolling_json.layout.dataset = elasticsearch.server
|
||||
appender.rolling_json.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.json.gz
|
||||
appender.rolling_json.policies.type = Policies
|
||||
appender.rolling_json.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.rolling_json.policies.time.interval = 1
|
||||
appender.rolling_json.policies.time.modulate = true
|
||||
appender.rolling_json.strategy.type = DefaultRolloverStrategy
|
||||
appender.rolling_json.strategy.action.type = Delete
|
||||
appender.rolling_json.strategy.action.basepath = /var/log/elasticsearch
|
||||
appender.rolling_json.strategy.action.condition.type = IfFileName
|
||||
appender.rolling_json.strategy.action.condition.glob = *.json.gz
|
||||
appender.rolling_json.strategy.action.condition.nested_condition.type = IfLastModified
|
||||
appender.rolling_json.strategy.action.condition.nested_condition.age = 1D
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.rolling.ref = rolling
|
||||
rootLogger.appenderRef.rolling_json.ref = rolling_json
|
||||
|
||||
@@ -392,6 +392,7 @@ elasticsearch:
|
||||
so-logs-elastic_agent_x_metricbeat: *indexSettings
|
||||
so-logs-elastic_agent_x_osquerybeat: *indexSettings
|
||||
so-logs-elastic_agent_x_packetbeat: *indexSettings
|
||||
so-logs-elasticsearch_x_server: *indexSettings
|
||||
so-metrics-endpoint_x_metadata: *indexSettings
|
||||
so-metrics-endpoint_x_metrics: *indexSettings
|
||||
so-metrics-endpoint_x_policy: *indexSettings
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
|
||||
|
||||
{# start generation of integration default index_settings #}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') %}
|
||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') and salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %}
|
||||
{% set check_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %}
|
||||
{% if check_package_components.size > 1 %}
|
||||
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
|
||||
|
||||
69
salt/elasticsearch/templates/component/ecs/hash.json
Normal file
69
salt/elasticsearch/templates/component/ecs/hash.json
Normal file
@@ -0,0 +1,69 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"hash": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"ja3": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja3s": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"hassh": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"md5": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"sha1": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"sha256": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4l": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4ls": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4t": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4ts": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4ssh": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4h": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
},
|
||||
"ja4x": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"agent": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"hostname": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"last_checkin_status": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"last_checkin": {
|
||||
"type": "date"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"offline_duration_hours": {
|
||||
"type": "integer"
|
||||
},
|
||||
"policy_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"status": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,104 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"so_kind": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"so_operation": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"so_chat": {
|
||||
"properties": {
|
||||
"role": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"content": {
|
||||
"type": "object",
|
||||
"enabled": false
|
||||
},
|
||||
"sessionId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"createTime": {
|
||||
"type": "date"
|
||||
},
|
||||
"deletedAt": {
|
||||
"type": "date"
|
||||
},
|
||||
"tags": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"tool_use_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"userId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"message": {
|
||||
"properties": {
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"type": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"role": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"model": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"contentStr": {
|
||||
"type": "text"
|
||||
},
|
||||
"contentBlocks": {
|
||||
"type": "nested",
|
||||
"enabled": false
|
||||
},
|
||||
"stopReason": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"stopSequence": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"usage": {
|
||||
"properties": {
|
||||
"input_tokens": {
|
||||
"type": "long"
|
||||
},
|
||||
"output_tokens": {
|
||||
"type": "long"
|
||||
},
|
||||
"credits": {
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"ecs_version": "1.12.2"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"template": {},
|
||||
"version": 1,
|
||||
"_meta": {
|
||||
"description": "default settings for common Security Onion Assistant indices"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"@timestamp": {
|
||||
"type": "date"
|
||||
},
|
||||
"so_kind": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"so_session": {
|
||||
"properties": {
|
||||
"title": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"sessionId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"createTime": {
|
||||
"type": "date"
|
||||
},
|
||||
"deleteTime": {
|
||||
"type": "date"
|
||||
},
|
||||
"tags": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"userId": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"_meta": {
|
||||
"ecs_version": "1.12.2"
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"template": {},
|
||||
"version": 1,
|
||||
"_meta": {
|
||||
"description": "default settings for common Security Onion Assistant indices"
|
||||
}
|
||||
}
|
||||
@@ -21,7 +21,7 @@ while [[ "$COUNT" -le 240 ]]; do
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
# Check cluster health once connected
|
||||
so-elasticsearch-query _cluster/health?wait_for_status=yellow > /dev/null 2>&1
|
||||
so-elasticsearch-query _cluster/health?wait_for_status=yellow\&timeout=120s > /dev/null 2>&1
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
|
||||
195
salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot
Normal file
195
salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot
Normal file
@@ -0,0 +1,195 @@
|
||||
#!/bin/bash
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
BOLD='\033[1;37m'
|
||||
NC='\033[0m'
|
||||
|
||||
log_title() {
|
||||
if [ $1 == "LOG" ]; then
|
||||
echo -e "\n${BOLD}================ $2 ================${NC}\n"
|
||||
elif [ $1 == "OK" ]; then
|
||||
echo -e "${GREEN} $2 ${NC}"
|
||||
elif [ $1 == "WARN" ]; then
|
||||
echo -e "${YELLOW} $2 ${NC}"
|
||||
elif [ $1 == "ERROR" ]; then
|
||||
echo -e "${RED} $2 ${NC}"
|
||||
fi
|
||||
}
|
||||
|
||||
health_report() {
|
||||
if ! health_report_output=$(so-elasticsearch-query _health_report?format=json --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve health report from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
non_green_count=$(echo "$health_report_output" | jq '[.indicators | to_entries[] | select(.value.status != "green")] | length')
|
||||
|
||||
if [ "$non_green_count" -gt 0 ]; then
|
||||
echo "$health_report_output" | jq -r '.indicators | to_entries[] | select(.value.status != "green") | .key' | while read -r indicator_name; do
|
||||
indicator=$(echo "$health_report_output" | jq -r ".indicators.\"$indicator_name\"")
|
||||
status=$(echo "$indicator" | jq -r '.status')
|
||||
symptom=$(echo "$indicator" | jq -r '.symptom // "No symptom available"')
|
||||
|
||||
# reormat indicator name
|
||||
display_name=$(echo "$indicator_name" | tr '_' ' ' | sed 's/\b\(.\)/\u\1/g')
|
||||
|
||||
if [ "$status" = "yellow" ]; then
|
||||
log_title "WARN" "$display_name: $symptom"
|
||||
else
|
||||
log_title "ERROR" "$display_name: $symptom"
|
||||
fi
|
||||
|
||||
# diagnosis if available
|
||||
echo "$indicator" | jq -c '.diagnosis[]? // empty' | while read -r diagnosis; do
|
||||
cause=$(echo "$diagnosis" | jq -r '.cause // "Unknown"')
|
||||
action=$(echo "$diagnosis" | jq -r '.action // "No action specified"')
|
||||
|
||||
echo -e " ${BOLD}Cause:${NC} $cause\n"
|
||||
echo -e " ${BOLD}Action:${NC} $action\n"
|
||||
|
||||
# Check for affected indices
|
||||
affected_indices=$(echo "$diagnosis" | jq -r '.affected_resources.indices[]? // empty')
|
||||
if [ -n "$affected_indices" ]; then
|
||||
echo -e " ${BOLD}Affected indices:${NC}"
|
||||
total_indices=$(echo "$affected_indices" | wc -l)
|
||||
echo "$affected_indices" | head -10 | while read -r index; do
|
||||
echo " - $index"
|
||||
done
|
||||
if [ "$total_indices" -gt 10 ]; then
|
||||
remaining=$((total_indices - 10))
|
||||
echo " ... and $remaining more indices (truncated for readability)"
|
||||
fi
|
||||
fi
|
||||
echo
|
||||
done
|
||||
done
|
||||
else
|
||||
log_title "OK" "All health indicators are green"
|
||||
fi
|
||||
}
|
||||
|
||||
elasticsearch_status() {
|
||||
log_title "LOG" "Elasticsearch Status"
|
||||
if so-elasticsearch-query / --fail --output /dev/null; then
|
||||
health_report
|
||||
else
|
||||
log_title "ERROR" "Elasticsearch API is not accessible"
|
||||
so-status
|
||||
log_title "ERROR" "Make sure Elasticsearch is running. Addtionally, check for startup errors in /opt/so/log/elasticsearch/securityonion.log${NC}\n"
|
||||
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
indices_by_age() {
|
||||
log_title "LOG" "Indices by Creation Date - Size > 1KB"
|
||||
log_title "WARN" "Since high/flood watermark has been reached consider updating ILM policies.\n"
|
||||
if ! indices_output=$(so-elasticsearch-query '_cat/indices?v&s=creation.date:asc&h=creation.date.string,index,status,health,docs.count,pri.store.size&bytes=b&format=json' --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve indices list from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Filter for indices with size > 1KB (1024 bytes) and format output
|
||||
echo -e "${BOLD}Creation Date Name Size${NC}"
|
||||
echo -e "${BOLD}--------------------------------------------------------------------------------------------------------------${NC}"
|
||||
|
||||
# Create list of indices excluding .internal, so-detection*, so-case*
|
||||
echo "$indices_output" | jq -r '.[] | select((."pri.store.size" | tonumber) > 1024) | select(.index | (startswith(".internal") or startswith("so-detection") or startswith("so-case")) | not ) | "\(."creation.date.string") | \(.index) | \(."pri.store.size")"' | while IFS='|' read -r creation_date index_name size_bytes; do
|
||||
# Convert bytes to GB / MB
|
||||
if [ "$size_bytes" -gt 1073741824 ]; then
|
||||
size_human=$(echo "scale=2; $size_bytes / 1073741824" | bc)GB
|
||||
else
|
||||
size_human=$(echo "scale=2; $size_bytes / 1048576" | bc)MB
|
||||
fi
|
||||
|
||||
creation_date=$(date -d "$creation_date" '+%Y-%m-%dT%H:%MZ' )
|
||||
|
||||
# Format output with spacing
|
||||
printf "%-19s %-76s %10s\n" "$creation_date" "$index_name" "$size_human"
|
||||
done
|
||||
}
|
||||
|
||||
watermark_settings() {
|
||||
watermark_path=".defaults.cluster.routing.allocation.disk.watermark"
|
||||
if ! watermark_output=$(so-elasticsearch-query _cluster/settings?include_defaults=true\&filter_path=*.cluster.routing.allocation.disk.* --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve watermark settings from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! disk_allocation_output=$(so-elasticsearch-query _cat/nodes?v\&h=name,ip,disk.used_percent,disk.avail,disk.total,node.role\&format=json --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve disk allocation data from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
flood=$(echo $watermark_output | jq -r "$watermark_path.flood_stage" )
|
||||
high=$(echo $watermark_output | jq -r "$watermark_path.high" )
|
||||
low=$(echo $watermark_output | jq -r "$watermark_path.low" )
|
||||
|
||||
# Strip percentage signs for comparison
|
||||
flood_num=${flood%\%}
|
||||
high_num=${high%\%}
|
||||
low_num=${low%\%}
|
||||
|
||||
# Check each nodes disk usage
|
||||
log_title "LOG" "Disk Usage Check"
|
||||
echo -e "${BOLD}LOW:${GREEN}$low${NC}${BOLD} HIGH:${YELLOW}${high}${NC}${BOLD} FLOOD:${RED}${flood}${NC}\n"
|
||||
|
||||
# Only show data nodes (d=data, h=hot, w=warm, c=cold, f=frozen, s=content)
|
||||
echo "$disk_allocation_output" | jq -r '.[] | select(.["node.role"] | test("[dhwcfs]")) | "\(.name)|\(.["disk.used_percent"])"' | while IFS='|' read -r node_name disk_used; do
|
||||
disk_used_num=$(echo $disk_used | bc)
|
||||
|
||||
if (( $(echo "$disk_used_num >= $flood_num" | bc -l) )); then
|
||||
log_title "ERROR" "$node_name is at or above the flood watermark ($flood)! Disk usage: ${disk_used}%"
|
||||
touch /tmp/watermark_reached
|
||||
elif (( $(echo "$disk_used_num >= $high_num" | bc -l) )); then
|
||||
log_title "ERROR" "$node_name is at or above the high watermark ($high)! Disk usage: ${disk_used}%"
|
||||
touch /tmp/watermark_reached
|
||||
else
|
||||
log_title "OK" "$node_name disk usage: ${disk_used}%"
|
||||
fi
|
||||
done
|
||||
|
||||
# Check if we need to show indices by age
|
||||
if [ -f /tmp/watermark_reached ]; then
|
||||
indices_by_age
|
||||
rm -f /tmp/watermark_reached
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
unassigned_shards() {
|
||||
|
||||
if ! unassigned_shards_output=$(so-elasticsearch-query _cat/shards?v\&h=index,shard,prirep,state,unassigned.reason,unassigned.details\&s=state\&format=json --fail 2>/dev/null); then
|
||||
log_title "ERROR" "Failed to retrieve shard data from Elasticsearch"
|
||||
return 1
|
||||
fi
|
||||
|
||||
log_title "LOG" "Unassigned Shards Check"
|
||||
# Check if there are any UNASSIGNED shards
|
||||
unassigned_count=$(echo "$unassigned_shards_output" | jq '[.[] | select(.state == "UNASSIGNED")] | length')
|
||||
|
||||
if [ "$unassigned_count" -gt 0 ]; then
|
||||
echo "$unassigned_shards_output" | jq -r '.[] | select(.state == "UNASSIGNED") | "\(.index)|\(.shard)|\(.prirep)|\(."unassigned.reason")"' | while IFS='|' read -r index shard prirep reason; do
|
||||
if [ "$prirep" = "r" ]; then
|
||||
log_title "WARN" "Replica shard for index $index is unassigned. Reason: $reason"
|
||||
elif [ "$prirep" = "p" ]; then
|
||||
log_title "ERROR" "Primary shard for index $index is unassigned. Reason: $reason"
|
||||
fi
|
||||
done
|
||||
else
|
||||
log_title "OK" "All shards are assigned"
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
elasticsearch_status
|
||||
watermark_settings
|
||||
unassigned_shards
|
||||
}
|
||||
|
||||
main
|
||||
@@ -136,7 +136,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
||||
TEMPLATE=${i::-14}
|
||||
COMPONENT_PATTERN=${TEMPLATE:3}
|
||||
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ logs-http_endpoint\.generic|logs-winlog\.winlog ]]; then
|
||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ \.generic|logs-winlog\.winlog ]]; then
|
||||
load_failures=$((load_failures+1))
|
||||
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
|
||||
else
|
||||
|
||||
@@ -21,7 +21,7 @@
|
||||
'so-strelka-filestream'
|
||||
] %}
|
||||
|
||||
{% elif GLOBALS.role == 'so-manager' or GLOBALS.role == 'so-standalone' or GLOBALS.role == 'so-managersearch' %}
|
||||
{% elif GLOBALS.role in ['so-manager', 'so-standalone','so-managersearch', 'so-managerhype'] %}
|
||||
{% set NODE_CONTAINERS = [
|
||||
'so-dockerregistry',
|
||||
'so-elasticsearch',
|
||||
|
||||
@@ -14,11 +14,13 @@ firewall:
|
||||
external_kafka: []
|
||||
fleet: []
|
||||
heavynode: []
|
||||
hypervisor: []
|
||||
idh: []
|
||||
import: []
|
||||
localhost:
|
||||
- 127.0.0.1
|
||||
manager: []
|
||||
managerhype: []
|
||||
managersearch: []
|
||||
receiver: []
|
||||
searchnode: []
|
||||
@@ -489,6 +491,15 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -541,6 +552,218 @@ firewall:
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
syslog:
|
||||
portgroups:
|
||||
- syslog
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
managerhype:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
managerhype:
|
||||
portgroups:
|
||||
- kibana
|
||||
- redis
|
||||
- influxdb
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- docker_registry
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- localrules
|
||||
- sensoroni
|
||||
fleet:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- beats_5056
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
idh:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
sensor:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
- beats_5644
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
searchnode:
|
||||
portgroups:
|
||||
- redis
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- beats_5644
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
heavynode:
|
||||
portgroups:
|
||||
- redis
|
||||
- elasticsearch_rest
|
||||
- elasticsearch_node
|
||||
- beats_5644
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
receiver:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
analyst:
|
||||
portgroups:
|
||||
- nginx
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- beats_5044
|
||||
beats_endpoint_ssl:
|
||||
portgroups:
|
||||
- beats_5644
|
||||
elasticsearch_rest:
|
||||
portgroups:
|
||||
- elasticsearch_rest
|
||||
elastic_agent_endpoint:
|
||||
portgroups:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
endgame:
|
||||
portgroups:
|
||||
- endgame
|
||||
external_suricata:
|
||||
portgroups:
|
||||
- external_suricata
|
||||
desktop:
|
||||
portgroups:
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- sensoroni
|
||||
- yum
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- ssh
|
||||
dockernet:
|
||||
portgroups:
|
||||
- all
|
||||
fleet:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
idh:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
sensor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
searchnode:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
heavynode:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
receiver:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -686,6 +909,15 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -738,6 +970,9 @@ firewall:
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -890,6 +1125,15 @@ firewall:
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- yum
|
||||
- docker_registry
|
||||
- influxdb
|
||||
- elastic_agent_control
|
||||
- elastic_agent_data
|
||||
- elastic_agent_update
|
||||
- sensoroni
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
@@ -945,6 +1189,9 @@ firewall:
|
||||
desktop:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
hypervisor:
|
||||
portgroups:
|
||||
- salt_manager
|
||||
self:
|
||||
portgroups:
|
||||
- syslog
|
||||
@@ -983,6 +1230,10 @@ firewall:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
standalone:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
@@ -1130,6 +1381,10 @@ firewall:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
- elasticsearch_rest
|
||||
standalone:
|
||||
portgroups:
|
||||
- elasticsearch_node
|
||||
@@ -1332,6 +1587,9 @@ firewall:
|
||||
portgroups:
|
||||
- redis
|
||||
- elastic_agent_data
|
||||
managerhype:
|
||||
portgroups:
|
||||
- elastic_agent_data
|
||||
self:
|
||||
portgroups:
|
||||
- redis
|
||||
@@ -1449,6 +1707,9 @@ firewall:
|
||||
managersearch:
|
||||
portgroups:
|
||||
- openssh
|
||||
managerhype:
|
||||
portgroups:
|
||||
- openssh
|
||||
standalone:
|
||||
portgroups:
|
||||
- openssh
|
||||
@@ -1472,3 +1733,66 @@ firewall:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
hypervisor:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
portgroups:
|
||||
- ssh
|
||||
dockernet:
|
||||
portgroups:
|
||||
- all
|
||||
localhost:
|
||||
portgroups:
|
||||
- all
|
||||
manager:
|
||||
portgroups: []
|
||||
managersearch:
|
||||
portgroups: []
|
||||
managerhype:
|
||||
portgroups: []
|
||||
standalone:
|
||||
portgroups: []
|
||||
customhostgroup0:
|
||||
portgroups: []
|
||||
customhostgroup1:
|
||||
portgroups: []
|
||||
customhostgroup2:
|
||||
portgroups: []
|
||||
customhostgroup3:
|
||||
portgroups: []
|
||||
customhostgroup4:
|
||||
portgroups: []
|
||||
customhostgroup5:
|
||||
portgroups: []
|
||||
customhostgroup6:
|
||||
portgroups: []
|
||||
customhostgroup7:
|
||||
portgroups: []
|
||||
customhostgroup8:
|
||||
portgroups: []
|
||||
customhostgroup9:
|
||||
portgroups: []
|
||||
|
||||
@@ -91,6 +91,10 @@ COMMIT
|
||||
-A INPUT -m conntrack --ctstate INVALID -j DROP
|
||||
-A INPUT -p icmp -j ACCEPT
|
||||
-A INPUT -j LOGGING
|
||||
{% if GLOBALS.role in ['so-hypervisor', 'so-managerhype'] -%}
|
||||
-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||
-A FORWARD -i br0 -o br0 -j ACCEPT
|
||||
{%- endif %}
|
||||
-A FORWARD -j DOCKER-USER
|
||||
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
|
||||
-A FORWARD -o sobridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||
|
||||
@@ -25,7 +25,7 @@
|
||||
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
||||
{% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
|
||||
|
||||
{% if role in ['manager', 'managersearch', 'standalone'] %}
|
||||
{% if role.startswith('manager') or role == 'standalone' %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
@@ -38,8 +38,8 @@
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||
{% endif %}
|
||||
|
||||
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if role.startswith('manager') or role in ['standalone', 'receiver'] %}
|
||||
{% for r in ['manager', 'managersearch', 'managerhype', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
|
||||
{% endif %}
|
||||
@@ -48,11 +48,11 @@
|
||||
|
||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||
{# Kafka external access only applies for Kafka nodes with the broker role. #}
|
||||
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] and 'broker' in kafka_node_type %}
|
||||
{% if role.startswith('manager') or role in ['standalone', 'receiver'] and 'broker' in kafka_node_type %}
|
||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||
|
||||
@@ -36,6 +36,7 @@ firewall:
|
||||
external_kafka: *hostgroupsettings
|
||||
fleet: *hostgroupsettings
|
||||
heavynode: *hostgroupsettings
|
||||
hypervisor: *hostgroupsettings
|
||||
idh: *hostgroupsettings
|
||||
import: *hostgroupsettings
|
||||
localhost: *ROhostgroupsettingsadv
|
||||
|
||||
125
salt/hypervisor/defaults.yaml
Normal file
125
salt/hypervisor/defaults.yaml
Normal file
@@ -0,0 +1,125 @@
|
||||
hypervisor:
|
||||
model:
|
||||
testModel:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 128
|
||||
disk:
|
||||
1: pci_0000_c7_00_0
|
||||
2: pci_0000_c8_00_0
|
||||
copper:
|
||||
1: pci_0000_c4_00_0
|
||||
2: pci_0000_c4_00_1
|
||||
3: pci_0000_c4_00_2
|
||||
4: pci_0000_c4_00_3
|
||||
sfp:
|
||||
5: pci_0000_02_00_0
|
||||
6: pci_0000_02_00_1
|
||||
7: pci_0000_41_00_0
|
||||
8: pci_0000_41_00_1
|
||||
SOSSNNV:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 256
|
||||
disk:
|
||||
1: pci_0000_42_00_0
|
||||
2: pci_0000_43_00_0
|
||||
3: pci_0000_44_00_0
|
||||
4: pci_0000_45_00_0
|
||||
copper:
|
||||
sfp:
|
||||
1: pci_0000_02_00_0
|
||||
2: pci_0000_02_00_1
|
||||
3: pci_0000_41_00_0
|
||||
4: pci_0000_41_00_1
|
||||
SOSSNNV-DE02:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 384
|
||||
disk:
|
||||
1: pci_0000_41_00_0
|
||||
2: pci_0000_42_00_0
|
||||
3: pci_0000_81_00_0
|
||||
4: pci_0000_82_00_0
|
||||
5: pci_0000_83_00_0
|
||||
6: pci_0000_84_00_0
|
||||
copper:
|
||||
1: pci_0000_85_00_0
|
||||
2: pci_0000_85_00_1
|
||||
3: pci_0000_85_00_2
|
||||
4: pci_0000_85_00_3
|
||||
sfp:
|
||||
5: pci_0000_c4_00_0
|
||||
6: pci_0000_c4_00_1
|
||||
7: pci_0000_c5_00_0
|
||||
8: pci_0000_c5_00_1
|
||||
9: pci_0000_c5_00_2
|
||||
10: pci_0000_c5_00_3
|
||||
SOSSN7200:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 256
|
||||
copper:
|
||||
1: pci_0000_03_00_0
|
||||
2: pci_0000_03_00_1
|
||||
3: pci_0000_03_00_2
|
||||
4: pci_0000_03_00_3
|
||||
sfp:
|
||||
5: pci_0000_02_00_0
|
||||
6: pci_0000_02_00_1
|
||||
7: pci_0000_81_00_0
|
||||
8: pci_0000_81_00_1
|
||||
9: pci_0000_81_00_2
|
||||
10: pci_0000_81_00_3
|
||||
SOSSN7200-DE02:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 384
|
||||
copper:
|
||||
1: pci_0000_82_00_0
|
||||
2: pci_0000_82_00_1
|
||||
3: pci_0000_82_00_2
|
||||
4: pci_0000_82_00_3
|
||||
sfp:
|
||||
5: pci_0000_c4_00_0
|
||||
6: pci_0000_c4_00_1
|
||||
7: pci_0000_c5_00_0
|
||||
8: pci_0000_c5_00_1
|
||||
9: pci_0000_c6_00_0
|
||||
10: pci_0000_c6_00_1
|
||||
11: pci_0000_c6_00_2
|
||||
12: pci_0000_c6_00_3
|
||||
SOS4000:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 256
|
||||
copper:
|
||||
1: pci_0000_03_00_0
|
||||
2: pci_0000_03_00_1
|
||||
3: pci_0000_03_00_2
|
||||
4: pci_0000_03_00_3
|
||||
sfp:
|
||||
5: pci_0000_02_00_0
|
||||
6: pci_0000_02_00_1
|
||||
7: pci_0000_81_00_0
|
||||
8: pci_0000_81_00_1
|
||||
9: pci_0000_81_00_2
|
||||
10: pci_0000_81_00_3
|
||||
SOS5000-DE02:
|
||||
hardware:
|
||||
cpu: 128
|
||||
memory: 384
|
||||
copper:
|
||||
1: pci_0000_82_00_0
|
||||
2: pci_0000_82_00_1
|
||||
3: pci_0000_82_00_2
|
||||
4: pci_0000_82_00_3
|
||||
sfp:
|
||||
5: pci_0000_c4_00_0
|
||||
6: pci_0000_c4_00_1
|
||||
7: pci_0000_c5_00_0
|
||||
8: pci_0000_c5_00_1
|
||||
9: pci_0000_c6_00_0
|
||||
10: pci_0000_c6_00_1
|
||||
11: pci_0000_c6_00_2
|
||||
12: pci_0000_c6_00_3
|
||||
1
salt/hypervisor/hosts/README
Normal file
1
salt/hypervisor/hosts/README
Normal file
@@ -0,0 +1 @@
|
||||
This directory will contain hypervisor hosts. We need this README in place to ensure /opt/so/saltstack/local/salt/hypervisor/hosts directory gets created during setup.
|
||||
49
salt/hypervisor/init.sls
Normal file
49
salt/hypervisor/init.sls
Normal file
@@ -0,0 +1,49 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
hypervisor_log_dir:
|
||||
file.directory:
|
||||
- name: /opt/so/log/hypervisor
|
||||
|
||||
hypervisor_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://hypervisor/tools/sbin
|
||||
- file_mode: 744
|
||||
|
||||
hypervisor_sbin_jinja:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://hypervisor/tools/sbin_jinja
|
||||
- template: jinja
|
||||
- file_mode: 744
|
||||
|
||||
{% else %}
|
||||
{{sls}}_no_license_detected:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_no_license_detected
|
||||
- comment:
|
||||
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||
for more information about purchasing a license to enable this feature."
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
182
salt/hypervisor/map.jinja
Normal file
182
salt/hypervisor/map.jinja
Normal file
@@ -0,0 +1,182 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0.
|
||||
|
||||
Note: Per the Elastic License 2.0, the second limitation states:
|
||||
|
||||
"You may not move, change, disable, or circumvent the license key functionality
|
||||
in the software, and you may not remove or obscure any functionality in the
|
||||
software that is protected by the license key." #}
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
{# Import defaults.yaml for model hardware capabilities #}
|
||||
{% import_yaml 'hypervisor/defaults.yaml' as DEFAULTS %}
|
||||
{% set HYPERVISORMERGED = salt['pillar.get']('hypervisor', default=DEFAULTS.hypervisor, merge=True) %}
|
||||
|
||||
{# Get hypervisor nodes from pillar #}
|
||||
{% set NODES = salt['pillar.get']('hypervisor:nodes', {}) %}
|
||||
|
||||
{# Build enhanced HYPERVISORS structure #}
|
||||
{% set HYPERVISORS = {} %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: NODES content: ' ~ NODES | tojson) %}
|
||||
{% for role, hypervisors in NODES.items() %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing role: ' ~ role) %}
|
||||
{% do HYPERVISORS.update({role: {}}) %}
|
||||
{% for hypervisor, config in hypervisors.items() %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing hypervisor: ' ~ hypervisor ~ ' with config: ' ~ config | tojson) %}
|
||||
{# Get model from cached grains using Salt runner #}
|
||||
{% set grains = salt.saltutil.runner('cache.grains', tgt=hypervisor ~ '_*', tgt_type='glob') %}
|
||||
{% set model = '' %}
|
||||
{% if grains %}
|
||||
{% set minion_id = grains.keys() | first %}
|
||||
{% set model = grains[minion_id].get('sosmodel', grains[minion_id].get('byodmodel', '')) %}
|
||||
{% endif %}
|
||||
|
||||
{% set model_config = HYPERVISORMERGED.model.get(model, {}) %}
|
||||
|
||||
{# Get VM list from VMs file #}
|
||||
{% set vms = {} %}
|
||||
{% set vm_list = [] %}
|
||||
{% set vm_list_file = 'hypervisor/hosts/' ~ hypervisor ~ 'VMs' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list file: ' ~ vm_list_file) %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_list_file) %}
|
||||
{% import_json vm_list_file as vm_list %}
|
||||
{% endif %}
|
||||
{% if vm_list %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list content: ' ~ vm_list | tojson) %}
|
||||
{% else %}
|
||||
{# we won't get here if the vm_list_file doesn't exist because we will get TemplateNotFound on the import_json #}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM list empty: ' ~ vm_list_file) %}
|
||||
{% endif %}
|
||||
|
||||
{# Load status and configuration for each VM #}
|
||||
{% for vm in vm_list %}
|
||||
{# Get VM details from list entry #}
|
||||
{% set hostname = vm.get('hostname', '') %}
|
||||
{% set role = vm.get('role', '') %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing VM - hostname: ' ~ hostname ~ ', role: ' ~ role) %}
|
||||
|
||||
{# Try to load VM configuration from config file first, then .error file if config doesn't exist #}
|
||||
{% set vm_file = 'hypervisor/hosts/' ~ hypervisor ~ '/' ~ hostname ~ '_' ~ role %}
|
||||
{% set vm_error_file = vm_file ~ '.error' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config file: ' ~ vm_file) %}
|
||||
|
||||
{# Check if base config file exists #}
|
||||
{% set config_exists = salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_file) %}
|
||||
{% set error_exists = salt['file.file_exists']('/opt/so/saltstack/local/salt/' ~ vm_error_file) %}
|
||||
|
||||
{% set vm_state = none %}
|
||||
{% if config_exists %}
|
||||
{% import_json vm_file as vm_state %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Loaded VM config from base file') %}
|
||||
{% elif error_exists %}
|
||||
{% import_json vm_error_file as vm_state %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Loaded VM config from .error file') %}
|
||||
{% else %}
|
||||
{% do salt.log.warning('salt/hypervisor/map.jinja: No config or error file found for VM ' ~ hostname ~ '_' ~ role) %}
|
||||
{% endif %}
|
||||
|
||||
{% if vm_state %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM config content: ' ~ vm_state | tojson) %}
|
||||
{% set vm_data = {'config': vm_state.config} %}
|
||||
|
||||
{# Load VM status from status file #}
|
||||
{% set status_file = vm_file ~ '.status' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM status file: ' ~ status_file) %}
|
||||
{% import_json status_file as status_data %}
|
||||
{% if status_data %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: VM status content: ' ~ status_data | tojson) %}
|
||||
{% do vm_data.update({'status': status_data}) %}
|
||||
{% else %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Status file empty: ' ~ status_file) %}
|
||||
{% do vm_data.update({
|
||||
'status': {
|
||||
'status': '',
|
||||
'details': null,
|
||||
'timestamp': ''
|
||||
}
|
||||
}) %}
|
||||
{% endif %}
|
||||
{% do vms.update({hostname ~ '_' ~ role: vm_data}) %}
|
||||
{% else %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Skipping VM ' ~ hostname ~ '_' ~ role ~ ' - no config available') %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Find and add destroyed VMs from status files #}
|
||||
{% set processed_vms = [] %}
|
||||
{% for vm_full_name, vm_data in vms.items() %}
|
||||
{% do processed_vms.append(vm_full_name) %}
|
||||
{% endfor %}
|
||||
|
||||
{# Find all status files for this hypervisor #}
|
||||
{% set relative_path = 'hypervisor/hosts/' ~ hypervisor %}
|
||||
{% set absolute_path = '/opt/so/saltstack/local/salt/' ~ relative_path %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Scanning for status files in: ' ~ absolute_path) %}
|
||||
|
||||
{# Try to find status files using file.find with absolute path #}
|
||||
{% set status_files = salt['file.find'](absolute_path, name='*_*.status', type='f') %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Found status files: ' ~ status_files | tojson) %}
|
||||
|
||||
{# Convert absolute paths back to relative paths for processing #}
|
||||
{% set relative_status_files = [] %}
|
||||
{% for status_file in status_files %}
|
||||
{% set relative_file = status_file | replace('/opt/so/saltstack/local/salt/', '') %}
|
||||
{% do relative_status_files.append(relative_file) %}
|
||||
{% endfor %}
|
||||
{% set status_files = relative_status_files %}
|
||||
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Converted to relative paths: ' ~ status_files | tojson) %}
|
||||
|
||||
{% for status_file in status_files %}
|
||||
{# Extract the VM name from the filename #}
|
||||
{% set basename = status_file.split('/')[-1] %}
|
||||
{% set vm_name = basename.replace('.status', '') %}
|
||||
{% set hostname = vm_name.split('_')[0] %}
|
||||
|
||||
{# Skip already processed VMs #}
|
||||
{% if vm_name in processed_vms %}
|
||||
{% continue %}
|
||||
{% endif %}
|
||||
|
||||
{# Read the status file #}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Processing potential destroyed VM status file: ' ~ status_file) %}
|
||||
{% import_json status_file as status_data %}
|
||||
|
||||
{# Only process files with "Destroyed Instance" status #}
|
||||
{% if status_data and status_data.status == 'Destroyed Instance' %}
|
||||
{% do salt.log.debug('salt/hypervisor/map.jinja: Found VM with Destroyed Instance status: ' ~ vm_name) %}
|
||||
|
||||
{# Add to vms with minimal config #}
|
||||
{% do vms.update({
|
||||
vm_name: {
|
||||
'status': status_data,
|
||||
'config': {}
|
||||
}
|
||||
}) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{# Merge node config with model capabilities and VM states #}
|
||||
{% do HYPERVISORS[role].update({
|
||||
hypervisor: {
|
||||
'config': config,
|
||||
'model': model,
|
||||
'hardware': model_config.get('hardware', {}),
|
||||
'vms': vms
|
||||
}
|
||||
}) %}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{% do salt.log.error(
|
||||
'Hypervisor nodes are a feature supported only for customers with a valid license.'
|
||||
'Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com'
|
||||
'for more information about purchasing a license to enable this feature.'
|
||||
) %}
|
||||
|
||||
{% endif %}
|
||||
445
salt/hypervisor/tools/sbin/so-nvme-raid1.sh
Normal file
445
salt/hypervisor/tools/sbin/so-nvme-raid1.sh
Normal file
@@ -0,0 +1,445 @@
|
||||
#!/bin/bash
|
||||
|
||||
#################################################################
|
||||
# RAID-1 Setup Script for NVMe Drives
|
||||
#################################################################
|
||||
#
|
||||
# DESCRIPTION:
|
||||
# This script automatically sets up a RAID-1 (mirrored) array using two NVMe drives
|
||||
# (/dev/nvme0n1 and /dev/nvme1n1) and mounts it at /nsm with XFS filesystem.
|
||||
#
|
||||
# FUNCTIONALITY:
|
||||
# - Detects and reports existing RAID configurations
|
||||
# - Thoroughly cleans target drives of any existing data/configurations
|
||||
# - Creates GPT partition tables with RAID-type partitions
|
||||
# - Establishes RAID-1 array (${RAID_DEVICE}) for data redundancy
|
||||
# - Formats the array with XFS filesystem for performance
|
||||
# - Automatically mounts at /nsm and configures for boot persistence
|
||||
# - Provides monitoring information for resync operations
|
||||
#
|
||||
# SAFETY FEATURES:
|
||||
# - Requires root privileges
|
||||
# - Exits gracefully if RAID already exists and is mounted
|
||||
# - Performs comprehensive cleanup to avoid conflicts
|
||||
# - Forces partition table updates and waits for system recognition
|
||||
#
|
||||
# PREREQUISITES:
|
||||
# - Two NVMe drives: /dev/nvme0n1 and /dev/nvme1n1
|
||||
# - Root access
|
||||
# - mdadm, sgdisk, and standard Linux utilities
|
||||
#
|
||||
# WARNING: This script will DESTROY all data on the target drives!
|
||||
#
|
||||
# USAGE:
|
||||
# sudo ./so-nvme-raid1.sh # Normal operation
|
||||
# sudo ./so-nvme-raid1.sh --force-cleanup # Force cleanup of existing RAID
|
||||
#
|
||||
#################################################################
|
||||
|
||||
# Exit on any error
|
||||
set -e
|
||||
|
||||
# Configuration variables
|
||||
RAID_ARRAY_NAME="md0"
|
||||
RAID_DEVICE="/dev/${RAID_ARRAY_NAME}"
|
||||
MOUNT_POINT="/nsm"
|
||||
FORCE_CLEANUP=false
|
||||
|
||||
# Parse command line arguments
|
||||
for arg in "$@"; do
|
||||
case $arg in
|
||||
--force-cleanup)
|
||||
FORCE_CLEANUP=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Function to log messages
|
||||
log() {
|
||||
echo "[$(date '+%Y-%m-%d %H:%M:%S')] $1"
|
||||
}
|
||||
|
||||
# Function to check if running as root
|
||||
check_root() {
|
||||
if [ "$EUID" -ne 0 ]; then
|
||||
log "Error: Please run as root"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to force cleanup all RAID components
|
||||
force_cleanup_raid() {
|
||||
log "=== FORCE CLEANUP MODE ==="
|
||||
log "This will destroy all RAID configurations and data on target drives!"
|
||||
|
||||
# Stop all MD arrays
|
||||
log "Stopping all MD arrays"
|
||||
mdadm --stop --scan 2>/dev/null || true
|
||||
|
||||
# Wait for arrays to stop
|
||||
sleep 2
|
||||
|
||||
# Remove any running md devices
|
||||
for md in /dev/md*; do
|
||||
if [ -b "$md" ]; then
|
||||
log "Stopping $md"
|
||||
mdadm --stop "$md" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Force cleanup both NVMe drives
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
log "Force cleaning $device"
|
||||
|
||||
# Kill any processes using the device
|
||||
fuser -k "${device}"* 2>/dev/null || true
|
||||
|
||||
# Unmount any mounted partitions
|
||||
for part in "${device}"*; do
|
||||
if [ -b "$part" ]; then
|
||||
umount -f "$part" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Force zero RAID superblocks on partitions
|
||||
for part in "${device}"p*; do
|
||||
if [ -b "$part" ]; then
|
||||
log "Zeroing RAID superblock on $part"
|
||||
mdadm --zero-superblock --force "$part" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Zero superblock on the device itself
|
||||
log "Zeroing RAID superblock on $device"
|
||||
mdadm --zero-superblock --force "$device" 2>/dev/null || true
|
||||
|
||||
# Remove LVM physical volumes
|
||||
pvremove -ff -y "$device" 2>/dev/null || true
|
||||
|
||||
# Wipe all filesystem and partition signatures
|
||||
log "Wiping all signatures from $device"
|
||||
wipefs -af "$device" 2>/dev/null || true
|
||||
|
||||
# Overwrite the beginning of the drive (partition table area)
|
||||
log "Clearing partition table on $device"
|
||||
dd if=/dev/zero of="$device" bs=1M count=10 2>/dev/null || true
|
||||
|
||||
# Clear the end of the drive (backup partition table area)
|
||||
local device_size=$(blockdev --getsz "$device" 2>/dev/null || echo "0")
|
||||
if [ "$device_size" -gt 0 ]; then
|
||||
dd if=/dev/zero of="$device" bs=512 seek=$(( device_size - 2048 )) count=2048 2>/dev/null || true
|
||||
fi
|
||||
|
||||
# Force kernel to re-read partition table
|
||||
blockdev --rereadpt "$device" 2>/dev/null || true
|
||||
partprobe -s "$device" 2>/dev/null || true
|
||||
done
|
||||
|
||||
# Clear mdadm configuration
|
||||
log "Clearing mdadm configuration"
|
||||
echo "DEVICE partitions" > /etc/mdadm.conf
|
||||
|
||||
# Remove any fstab entries for the RAID device or mount point
|
||||
log "Cleaning fstab entries"
|
||||
sed -i "\|${RAID_DEVICE}|d" /etc/fstab
|
||||
sed -i "\|${MOUNT_POINT}|d" /etc/fstab
|
||||
|
||||
# Wait for system to settle
|
||||
udevadm settle
|
||||
sleep 5
|
||||
|
||||
log "Force cleanup complete!"
|
||||
log "Proceeding with RAID setup..."
|
||||
}
|
||||
|
||||
# Function to find MD arrays using specific devices
|
||||
find_md_arrays_using_devices() {
|
||||
local target_devices=("$@")
|
||||
local found_arrays=()
|
||||
|
||||
# Parse /proc/mdstat to find arrays using our target devices
|
||||
if [ -f "/proc/mdstat" ]; then
|
||||
while IFS= read -r line; do
|
||||
if [[ $line =~ ^(md[0-9]+) ]]; then
|
||||
local array_name="${BASH_REMATCH[1]}"
|
||||
local array_path="/dev/$array_name"
|
||||
|
||||
# Check if this array uses any of our target devices
|
||||
for device in "${target_devices[@]}"; do
|
||||
if echo "$line" | grep -q "${device##*/}"; then
|
||||
found_arrays+=("$array_path")
|
||||
break
|
||||
fi
|
||||
done
|
||||
fi
|
||||
done < /proc/mdstat
|
||||
fi
|
||||
|
||||
printf '%s\n' "${found_arrays[@]}"
|
||||
}
|
||||
|
||||
# Function to check if RAID is already set up
|
||||
check_existing_raid() {
|
||||
local target_devices=("/dev/nvme0n1p1" "/dev/nvme1n1p1")
|
||||
local found_arrays=($(find_md_arrays_using_devices "${target_devices[@]}"))
|
||||
|
||||
# Check if we found any arrays using our target devices
|
||||
if [ ${#found_arrays[@]} -gt 0 ]; then
|
||||
for array_path in "${found_arrays[@]}"; do
|
||||
if mdadm --detail "$array_path" &>/dev/null; then
|
||||
local raid_state=$(mdadm --detail "$array_path" | grep "State" | awk '{print $3}')
|
||||
local mount_point="/nsm"
|
||||
|
||||
log "Found existing RAID array $array_path (State: $raid_state)"
|
||||
|
||||
# Check what's currently mounted at /nsm
|
||||
local current_mount=$(findmnt -n -o SOURCE "$mount_point" 2>/dev/null || echo "")
|
||||
|
||||
if [ -n "$current_mount" ]; then
|
||||
if [ "$current_mount" = "$array_path" ]; then
|
||||
log "RAID array $array_path is already correctly mounted at $mount_point"
|
||||
log "Current RAID details:"
|
||||
mdadm --detail "$array_path"
|
||||
|
||||
# Check if resyncing
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing:"
|
||||
grep resync /proc/mdstat
|
||||
log "You can monitor progress with: watch -n 60 cat /proc/mdstat"
|
||||
else
|
||||
log "RAID is fully synced and operational"
|
||||
fi
|
||||
|
||||
# Show disk usage
|
||||
log "Current disk usage:"
|
||||
df -h "$mount_point"
|
||||
|
||||
exit 0
|
||||
else
|
||||
log "Found $mount_point mounted on $current_mount, but RAID array $array_path exists"
|
||||
log "Will unmount current filesystem and remount on RAID array"
|
||||
|
||||
# Unmount current filesystem
|
||||
log "Unmounting $mount_point"
|
||||
umount "$mount_point"
|
||||
|
||||
# Remove old fstab entry
|
||||
log "Removing old fstab entry for $current_mount"
|
||||
sed -i "\|$current_mount|d" /etc/fstab
|
||||
|
||||
# Mount the RAID array
|
||||
log "Mounting RAID array $array_path at $mount_point"
|
||||
mount "$array_path" "$mount_point"
|
||||
|
||||
# Update fstab
|
||||
log "Updating fstab for RAID array"
|
||||
sed -i "\|${array_path}|d" /etc/fstab
|
||||
echo "${array_path} ${mount_point} xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
|
||||
log "RAID array is now mounted at $mount_point"
|
||||
log "Current RAID details:"
|
||||
mdadm --detail "$array_path"
|
||||
|
||||
# Check if resyncing
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing:"
|
||||
grep resync /proc/mdstat
|
||||
log "You can monitor progress with: watch -n 60 cat /proc/mdstat"
|
||||
else
|
||||
log "RAID is fully synced and operational"
|
||||
fi
|
||||
|
||||
# Show disk usage
|
||||
log "Current disk usage:"
|
||||
df -h "$mount_point"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
# /nsm not mounted, mount the RAID array
|
||||
log "Mounting RAID array $array_path at $mount_point"
|
||||
mount "$array_path" "$mount_point"
|
||||
|
||||
# Update fstab
|
||||
log "Updating fstab for RAID array"
|
||||
sed -i "\|${array_path}|d" /etc/fstab
|
||||
echo "${array_path} ${mount_point} xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
|
||||
log "RAID array is now mounted at $mount_point"
|
||||
log "Current RAID details:"
|
||||
mdadm --detail "$array_path"
|
||||
|
||||
# Show disk usage
|
||||
log "Current disk usage:"
|
||||
df -h "$mount_point"
|
||||
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Check if any of the target devices are in use
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
if mdadm --examine "$device" &>/dev/null || mdadm --examine "${device}p1" &>/dev/null; then
|
||||
# Find the actual array name for this device
|
||||
local device_arrays=($(find_md_arrays_using_devices "${device}p1"))
|
||||
local array_name=""
|
||||
|
||||
if [ ${#device_arrays[@]} -gt 0 ]; then
|
||||
array_name="${device_arrays[0]}"
|
||||
else
|
||||
# Fallback: try to find array name from /proc/mdstat
|
||||
local partition_name="${device##*/}p1"
|
||||
array_name=$(grep -l "$partition_name" /proc/mdstat 2>/dev/null | head -1)
|
||||
if [ -n "$array_name" ]; then
|
||||
array_name=$(grep "^md[0-9]" /proc/mdstat | grep "$partition_name" | awk '{print "/dev/" $1}' | head -1)
|
||||
fi
|
||||
# Final fallback
|
||||
if [ -z "$array_name" ]; then
|
||||
array_name="$RAID_DEVICE"
|
||||
fi
|
||||
fi
|
||||
|
||||
log "Error: $device appears to be part of an existing RAID array"
|
||||
log "Old RAID metadata detected but array is not running."
|
||||
log ""
|
||||
log "To fix this, run the script with --force-cleanup:"
|
||||
log " sudo $0 --force-cleanup"
|
||||
log ""
|
||||
log "Or manually clean up with:"
|
||||
log "1. Stop any arrays: mdadm --stop --scan"
|
||||
log "2. Zero superblocks: mdadm --zero-superblock --force ${device}p1"
|
||||
log "3. Wipe signatures: wipefs -af $device"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
# Function to ensure devices are not in use
|
||||
ensure_devices_free() {
|
||||
local device=$1
|
||||
|
||||
log "Cleaning up device $device"
|
||||
|
||||
# Kill any processes using the device
|
||||
fuser -k "${device}"* 2>/dev/null || true
|
||||
|
||||
# Force unmount any partitions
|
||||
for part in "${device}"*; do
|
||||
if mount | grep -q "$part"; then
|
||||
umount -f "$part" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Stop any MD arrays using this device
|
||||
for md in $(ls /dev/md* 2>/dev/null || true); do
|
||||
if mdadm --detail "$md" 2>/dev/null | grep -q "$device"; then
|
||||
mdadm --stop "$md" 2>/dev/null || true
|
||||
fi
|
||||
done
|
||||
|
||||
# Clear MD superblock
|
||||
mdadm --zero-superblock --force "${device}"* 2>/dev/null || true
|
||||
|
||||
# Remove LVM PV if exists
|
||||
pvremove -ff -y "$device" 2>/dev/null || true
|
||||
|
||||
# Clear all signatures
|
||||
wipefs -af "$device" 2>/dev/null || true
|
||||
|
||||
# Delete partition table
|
||||
dd if=/dev/zero of="$device" bs=512 count=2048 2>/dev/null || true
|
||||
dd if=/dev/zero of="$device" bs=512 seek=$(( $(blockdev --getsz "$device") - 2048 )) count=2048 2>/dev/null || true
|
||||
|
||||
# Force kernel to reread
|
||||
blockdev --rereadpt "$device" 2>/dev/null || true
|
||||
partprobe -s "$device" 2>/dev/null || true
|
||||
sleep 2
|
||||
}
|
||||
|
||||
# Main script
|
||||
main() {
|
||||
log "Starting RAID setup script"
|
||||
|
||||
# Check if running as root
|
||||
check_root
|
||||
|
||||
# If force cleanup flag is set, do aggressive cleanup first
|
||||
if [ "$FORCE_CLEANUP" = true ]; then
|
||||
force_cleanup_raid
|
||||
fi
|
||||
|
||||
# Check for existing RAID setup
|
||||
check_existing_raid
|
||||
|
||||
# Clean up any existing MD arrays
|
||||
log "Cleaning up existing MD arrays"
|
||||
mdadm --stop --scan 2>/dev/null || true
|
||||
|
||||
# Clear mdadm configuration
|
||||
log "Clearing mdadm configuration"
|
||||
echo "DEVICE partitions" > /etc/mdadm.conf
|
||||
|
||||
# Clean and prepare devices
|
||||
for device in "/dev/nvme0n1" "/dev/nvme1n1"; do
|
||||
ensure_devices_free "$device"
|
||||
|
||||
log "Creating new partition table on $device"
|
||||
sgdisk -Z "$device"
|
||||
sgdisk -o "$device"
|
||||
|
||||
log "Creating RAID partition"
|
||||
sgdisk -n 1:0:0 -t 1:fd00 "$device"
|
||||
|
||||
partprobe "$device"
|
||||
udevadm settle
|
||||
sleep 5
|
||||
done
|
||||
|
||||
log "Final verification of partition availability"
|
||||
if ! [ -b "/dev/nvme0n1p1" ] || ! [ -b "/dev/nvme1n1p1" ]; then
|
||||
log "Error: Partitions not available after creation"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "Creating RAID array"
|
||||
mdadm --create "$RAID_DEVICE" --level=1 --raid-devices=2 \
|
||||
--metadata=1.2 \
|
||||
/dev/nvme0n1p1 /dev/nvme1n1p1 \
|
||||
--force --run
|
||||
|
||||
log "Creating XFS filesystem"
|
||||
mkfs.xfs -f "$RAID_DEVICE"
|
||||
|
||||
log "Creating mount point"
|
||||
mkdir -p /nsm
|
||||
|
||||
log "Updating fstab"
|
||||
sed -i "\|${RAID_DEVICE}|d" /etc/fstab
|
||||
echo "${RAID_DEVICE} ${MOUNT_POINT} xfs defaults,nofail 0 0" >> /etc/fstab
|
||||
|
||||
log "Reloading systemd daemon"
|
||||
systemctl daemon-reload
|
||||
|
||||
log "Mounting filesystem"
|
||||
mount -a
|
||||
|
||||
log "Saving RAID configuration"
|
||||
mdadm --detail --scan > /etc/mdadm.conf
|
||||
|
||||
log "RAID setup complete"
|
||||
log "RAID array details:"
|
||||
mdadm --detail "$RAID_DEVICE"
|
||||
|
||||
if grep -q "resync" /proc/mdstat; then
|
||||
log "RAID is currently resyncing. You can monitor progress with:"
|
||||
log "watch -n 60 cat /proc/mdstat"
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
424
salt/hypervisor/tools/sbin/so-qcow2-network-predictable
Normal file
424
salt/hypervisor/tools/sbin/so-qcow2-network-predictable
Normal file
@@ -0,0 +1,424 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
"""
|
||||
Script for configuring network interface predictability in Security Onion VMs.
|
||||
This script modifies the necessary files to ensure consistent network interface naming.
|
||||
|
||||
The script performs the following operations:
|
||||
1. Modifies the BLS entry to set net.ifnames=1
|
||||
2. Removes any existing persistent network rules
|
||||
3. Updates GRUB configuration
|
||||
|
||||
**Usage:**
|
||||
so-qcow2-network-predictable -n <domain_name> [-I <qcow2_image_path>]
|
||||
|
||||
**Options:**
|
||||
-n, --name Domain name of the VM to configure
|
||||
-I, --image (Optional) Path to the QCOW2 image. If not provided,
|
||||
defaults to /nsm/libvirt/images/<domain_name>/<domain_name>.qcow2
|
||||
|
||||
**Examples:**
|
||||
|
||||
1. **Configure using domain name:**
|
||||
```bash
|
||||
so-qcow2-network-predictable -n sool9
|
||||
```
|
||||
This command will:
|
||||
- Use default image path: /nsm/libvirt/images/sool9/sool9.qcow2
|
||||
- Configure network interface predictability
|
||||
|
||||
2. **Configure using custom image path:**
|
||||
```bash
|
||||
so-qcow2-network-predictable -n sool9 -I /path/to/custom/image.qcow2
|
||||
```
|
||||
This command will:
|
||||
- Use the specified image path
|
||||
- Configure network interface predictability
|
||||
|
||||
**Notes:**
|
||||
- The VM must not be running when executing this script
|
||||
- Requires root privileges
|
||||
- Will automatically find and modify the appropriate BLS entry
|
||||
- Removes /etc/udev/rules.d/70-persistent-net.rules if it exists
|
||||
- Updates GRUB configuration after changes
|
||||
|
||||
**Exit Codes:**
|
||||
- 0: Success
|
||||
- 1: General error (invalid arguments, file operations, etc.)
|
||||
- 2: VM is running
|
||||
- 3: Required files not found
|
||||
- 4: Permission denied
|
||||
|
||||
**Logging:**
|
||||
- Logs are written to /opt/so/log/hypervisor/so-qcow2-network-predictable.log
|
||||
- Both file and console logging are enabled
|
||||
- Log entries include:
|
||||
- Timestamps
|
||||
- Operation details
|
||||
- Error messages
|
||||
- Configuration changes
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import guestfs
|
||||
import glob
|
||||
import libvirt
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
from so_logging_utils import setup_logging
|
||||
|
||||
# Set up logging
|
||||
logger = setup_logging(
|
||||
logger_name='so-qcow2-network-predictable',
|
||||
log_file_path='/opt/so/log/hypervisor/so-qcow2-network-predictable.log',
|
||||
log_level=logging.INFO,
|
||||
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
def check_domain_status(domain_name):
|
||||
"""
|
||||
Check if the specified domain exists and is not running.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the libvirt domain to check
|
||||
|
||||
Returns:
|
||||
bool: True if domain exists and is not running, False otherwise
|
||||
|
||||
Raises:
|
||||
RuntimeError: If domain is running or connection to libvirt fails
|
||||
"""
|
||||
try:
|
||||
conn = libvirt.open('qemu:///system')
|
||||
try:
|
||||
dom = conn.lookupByName(domain_name)
|
||||
is_running = dom.isActive()
|
||||
if is_running:
|
||||
logger.error(f"Domain '{domain_name}' is running - cannot modify configuration")
|
||||
raise RuntimeError(f"Domain '{domain_name}' must not be running")
|
||||
logger.info(f"Domain '{domain_name}' exists and is not running")
|
||||
return True
|
||||
except libvirt.libvirtError as e:
|
||||
if "no domain with matching name" in str(e):
|
||||
logger.error(f"Domain '{domain_name}' not found")
|
||||
raise RuntimeError(f"Domain '{domain_name}' not found")
|
||||
raise
|
||||
finally:
|
||||
conn.close()
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"Failed to connect to libvirt: {e}")
|
||||
raise RuntimeError(f"Failed to connect to libvirt: {e}")
|
||||
|
||||
def modify_bls_entry(g):
|
||||
"""
|
||||
Find and modify the BLS entry to set net.ifnames=1.
|
||||
|
||||
Args:
|
||||
g: Mounted guestfs handle
|
||||
|
||||
Returns:
|
||||
bool: True if successful, False if no changes needed
|
||||
|
||||
Raises:
|
||||
RuntimeError: If BLS entry cannot be found or modified
|
||||
"""
|
||||
bls_dir = "/boot/loader/entries"
|
||||
logger.info(f"Checking BLS directory: {bls_dir}")
|
||||
if g.is_dir(bls_dir):
|
||||
logger.info("BLS directory exists")
|
||||
else:
|
||||
logger.info("Listing /boot contents:")
|
||||
try:
|
||||
boot_contents = g.ls("/boot")
|
||||
logger.info(f"/boot contains: {boot_contents}")
|
||||
if g.is_dir("/boot/loader"):
|
||||
logger.info("Listing /boot/loader contents:")
|
||||
loader_contents = g.ls("/boot/loader")
|
||||
logger.info(f"/boot/loader contains: {loader_contents}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error listing /boot contents: {e}")
|
||||
raise RuntimeError(f"BLS directory not found: {bls_dir}")
|
||||
|
||||
# Find BLS entry file
|
||||
entries = g.glob_expand(f"{bls_dir}/*.conf")
|
||||
logger.info(f"Found BLS entries: {entries}")
|
||||
if not entries:
|
||||
logger.error("No BLS entry files found")
|
||||
raise RuntimeError("No BLS entry files found")
|
||||
|
||||
# Use the first entry found
|
||||
bls_file = entries[0]
|
||||
logger.info(f"Found BLS entry file: {bls_file}")
|
||||
|
||||
try:
|
||||
logger.info(f"Reading BLS file contents from: {bls_file}")
|
||||
content = g.read_file(bls_file).decode('utf-8')
|
||||
logger.info("Current BLS file content:")
|
||||
logger.info("---BEGIN BLS CONTENT---")
|
||||
logger.info(content)
|
||||
logger.info("---END BLS CONTENT---")
|
||||
|
||||
lines = content.splitlines()
|
||||
modified = False
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith('options '):
|
||||
logger.info(f"Found options line: {line}")
|
||||
|
||||
# First remove any existing net.ifnames parameters (both =0 and =1)
|
||||
new_line = re.sub(r'\s*net\.ifnames=[01]\s*', ' ', line)
|
||||
# Also remove any quoted versions
|
||||
new_line = re.sub(r'\s*"net\.ifnames=[01]"\s*', ' ', new_line)
|
||||
# Clean up multiple spaces
|
||||
new_line = re.sub(r'\s+', ' ', new_line).strip()
|
||||
|
||||
# Now add net.ifnames=1 at the end
|
||||
new_line = f"{new_line} net.ifnames=1"
|
||||
|
||||
if new_line != line:
|
||||
lines[i] = new_line
|
||||
modified = True
|
||||
logger.info(f"Updated options line. New line: {new_line}")
|
||||
break
|
||||
|
||||
if modified:
|
||||
new_content = '\n'.join(lines) + '\n'
|
||||
logger.info("New BLS file content:")
|
||||
logger.info("---BEGIN NEW BLS CONTENT---")
|
||||
logger.info(new_content)
|
||||
logger.info("---END NEW BLS CONTENT---")
|
||||
g.write(bls_file, new_content.encode('utf-8'))
|
||||
logger.info("Successfully updated BLS entry")
|
||||
return True
|
||||
|
||||
logger.info("No changes needed for BLS entry")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to modify BLS entry: {e}")
|
||||
raise RuntimeError(f"Failed to modify BLS entry: {e}")
|
||||
|
||||
def remove_persistent_net_rules(g):
|
||||
"""
|
||||
Remove the persistent network rules file if it exists.
|
||||
|
||||
Args:
|
||||
g: Mounted guestfs handle
|
||||
|
||||
Returns:
|
||||
bool: True if file was removed, False if it didn't exist
|
||||
"""
|
||||
rules_file = "/etc/udev/rules.d/70-persistent-net.rules"
|
||||
logger.info(f"Checking for persistent network rules file: {rules_file}")
|
||||
try:
|
||||
if g.is_file(rules_file):
|
||||
logger.info("Found persistent network rules file, removing...")
|
||||
g.rm(rules_file)
|
||||
logger.info(f"Successfully removed persistent network rules file: {rules_file}")
|
||||
return True
|
||||
logger.info("No persistent network rules file found")
|
||||
return False
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to remove persistent network rules: {e}")
|
||||
raise RuntimeError(f"Failed to remove persistent network rules: {e}")
|
||||
|
||||
def update_grub_config(g):
|
||||
"""
|
||||
Update GRUB configuration.
|
||||
|
||||
Args:
|
||||
g: Mounted guestfs handle
|
||||
|
||||
Raises:
|
||||
RuntimeError: If GRUB update fails
|
||||
"""
|
||||
try:
|
||||
# First, read the current grubenv to get the existing kernelopts
|
||||
logger.info("Reading current grubenv...")
|
||||
grubenv_content = g.read_file('/boot/grub2/grubenv').decode('utf-8')
|
||||
logger.info("Current grubenv content:")
|
||||
logger.info(grubenv_content)
|
||||
|
||||
# Extract current kernelopts
|
||||
kernelopts_match = re.search(r'^kernelopts="([^"]+)"', grubenv_content, re.MULTILINE)
|
||||
if kernelopts_match:
|
||||
current_kernelopts = kernelopts_match.group(1)
|
||||
logger.info(f"Current kernelopts: {current_kernelopts}")
|
||||
|
||||
# Remove any existing net.ifnames parameters
|
||||
new_kernelopts = re.sub(r'\s*net\.ifnames=[01]\s*', ' ', current_kernelopts)
|
||||
# Clean up multiple spaces
|
||||
new_kernelopts = re.sub(r'\s+', ' ', new_kernelopts).strip()
|
||||
# Add net.ifnames=1
|
||||
new_kernelopts = f"{new_kernelopts} net.ifnames=1"
|
||||
|
||||
logger.info(f"New kernelopts: {new_kernelopts}")
|
||||
|
||||
# Update grubenv with the new kernelopts
|
||||
logger.info("Setting kernelopts with net.ifnames=1...")
|
||||
output_editenv = g.command(['grub2-editenv', '-', 'set', f'kernelopts={new_kernelopts}'])
|
||||
logger.info("grub2-editenv output:")
|
||||
logger.info(output_editenv)
|
||||
else:
|
||||
# If we can't find existing kernelopts, use the default
|
||||
logger.warning("Could not find existing kernelopts, using default")
|
||||
output_editenv = g.command(['grub2-editenv', '-', 'set', 'kernelopts=console=tty0 no_timer_check biosdevname=0 resume=/dev/mapper/vg_main-lv_swap rd.lvm.lv=vg_main/lv_root rd.lvm.lv=vg_main/lv_swap net.ifnames=1 crashkernel=1G-64G:448M,64G-:512M'])
|
||||
logger.info("grub2-editenv output:")
|
||||
logger.info(output_editenv)
|
||||
|
||||
logger.info("Updating grubby with net.ifnames=1...")
|
||||
# First remove any existing net.ifnames arguments
|
||||
output_grubby_remove = g.command(['grubby', '--update-kernel=ALL', '--remove-args=net.ifnames=0 net.ifnames=1'])
|
||||
logger.info("grubby remove output:")
|
||||
logger.info(output_grubby_remove)
|
||||
|
||||
# Then add net.ifnames=1
|
||||
output_grubby_add = g.command(['grubby', '--update-kernel=ALL', '--args=net.ifnames=1'])
|
||||
logger.info("grubby add output:")
|
||||
logger.info(output_grubby_add)
|
||||
|
||||
logger.info("Updating GRUB configuration...")
|
||||
output_mkconfig = g.command(['grub2-mkconfig', '-o', '/boot/grub2/grub.cfg'])
|
||||
logger.info("GRUB update output:")
|
||||
logger.info(output_mkconfig)
|
||||
logger.info("Successfully updated GRUB configuration")
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to update GRUB configuration: {e}")
|
||||
raise RuntimeError(f"Failed to update GRUB configuration: {e}")
|
||||
|
||||
def configure_network_predictability(domain_name, image_path=None):
|
||||
"""
|
||||
Configure network interface predictability for a VM.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to configure
|
||||
image_path (str, optional): Path to the QCOW2 image
|
||||
|
||||
Raises:
|
||||
RuntimeError: If configuration fails
|
||||
"""
|
||||
# Check domain status
|
||||
check_domain_status(domain_name)
|
||||
|
||||
# Use default image path if none provided
|
||||
if not image_path:
|
||||
image_path = f"/nsm/libvirt/images/{domain_name}/{domain_name}.qcow2"
|
||||
|
||||
if not os.path.exists(image_path):
|
||||
logger.error(f"Image file not found: {image_path}")
|
||||
raise RuntimeError(f"Image file not found: {image_path}")
|
||||
|
||||
if not os.access(image_path, os.R_OK | os.W_OK):
|
||||
logger.error(f"Permission denied: Cannot access image file {image_path}")
|
||||
raise RuntimeError(f"Permission denied: Cannot access image file {image_path}")
|
||||
|
||||
logger.info(f"Configuring network predictability for domain: {domain_name}")
|
||||
logger.info(f"Using image: {image_path}")
|
||||
|
||||
g = guestfs.GuestFS(python_return_dict=True)
|
||||
try:
|
||||
logger.info("Initializing guestfs...")
|
||||
g.set_network(False)
|
||||
g.selinux = False
|
||||
g.add_drive_opts(image_path, format="qcow2")
|
||||
g.launch()
|
||||
|
||||
logger.info("Inspecting operating system...")
|
||||
roots = g.inspect_os()
|
||||
if not roots:
|
||||
raise RuntimeError("No operating system found in image")
|
||||
|
||||
root = roots[0]
|
||||
logger.info(f"Found root filesystem: {root}")
|
||||
logger.info(f"Operating system type: {g.inspect_get_type(root)}")
|
||||
logger.info(f"Operating system distro: {g.inspect_get_distro(root)}")
|
||||
logger.info(f"Operating system major version: {g.inspect_get_major_version(root)}")
|
||||
logger.info(f"Operating system minor version: {g.inspect_get_minor_version(root)}")
|
||||
|
||||
logger.info("Getting mount points...")
|
||||
mountpoints = g.inspect_get_mountpoints(root)
|
||||
logger.info(f"Found mount points: {mountpoints}")
|
||||
logger.info("Converting mount points to sortable list...")
|
||||
# Convert dictionary to list of tuples
|
||||
mountpoints = list(mountpoints.items())
|
||||
logger.info(f"Converted mount points: {mountpoints}")
|
||||
logger.info("Sorting mount points by path length for proper mount order...")
|
||||
mountpoints.sort(key=lambda m: len(m[0]))
|
||||
logger.info(f"Mount order will be: {[mp[0] for mp in mountpoints]}")
|
||||
|
||||
for mp_path, mp_device in mountpoints:
|
||||
try:
|
||||
logger.info(f"Attempting to mount {mp_device} at {mp_path}")
|
||||
g.mount(mp_device, mp_path)
|
||||
logger.info(f"Successfully mounted {mp_device} at {mp_path}")
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not mount {mp_device} at {mp_path}: {str(e)}")
|
||||
# Continue with other mounts
|
||||
|
||||
# Perform configuration steps
|
||||
bls_modified = modify_bls_entry(g)
|
||||
rules_removed = remove_persistent_net_rules(g)
|
||||
|
||||
if bls_modified or rules_removed:
|
||||
update_grub_config(g)
|
||||
logger.info("Network predictability configuration completed successfully")
|
||||
else:
|
||||
logger.info("No changes were necessary")
|
||||
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Failed to configure network predictability: {e}")
|
||||
finally:
|
||||
try:
|
||||
logger.info("Unmounting all filesystems...")
|
||||
g.umount_all()
|
||||
logger.info("Successfully unmounted all filesystems")
|
||||
except Exception as e:
|
||||
logger.warning(f"Error unmounting filesystems: {e}")
|
||||
g.close()
|
||||
|
||||
def parse_arguments():
|
||||
"""Parse command line arguments."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Configure network interface predictability for Security Onion VMs"
|
||||
)
|
||||
parser.add_argument("-n", "--name", required=True,
|
||||
help="Domain name of the VM to configure")
|
||||
parser.add_argument("-I", "--image",
|
||||
help="Path to the QCOW2 image (optional)")
|
||||
return parser.parse_args()
|
||||
|
||||
def main():
|
||||
"""Main entry point for the script."""
|
||||
try:
|
||||
args = parse_arguments()
|
||||
configure_network_predictability(args.name, args.image)
|
||||
sys.exit(0)
|
||||
except RuntimeError as e:
|
||||
if "must not be running" in str(e):
|
||||
logger.error(str(e))
|
||||
sys.exit(2)
|
||||
elif "not found" in str(e):
|
||||
logger.error(str(e))
|
||||
sys.exit(3)
|
||||
elif "Permission denied" in str(e):
|
||||
logger.error(str(e))
|
||||
sys.exit(4)
|
||||
else:
|
||||
logger.error(str(e))
|
||||
sys.exit(1)
|
||||
except KeyboardInterrupt:
|
||||
logger.error("Operation cancelled by user")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
206
salt/hypervisor/tools/sbin/so-wait-cloud-init
Normal file
206
salt/hypervisor/tools/sbin/so-wait-cloud-init
Normal file
@@ -0,0 +1,206 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
"""
|
||||
Script for waiting for cloud-init to complete on a Security Onion VM.
|
||||
Monitors VM state to ensure proper cloud-init initialization and shutdown.
|
||||
|
||||
**Usage:**
|
||||
so-wait-cloud-init -n <domain_name>
|
||||
|
||||
**Options:**
|
||||
-n, --name Domain name of the VM to monitor
|
||||
|
||||
**Exit Codes:**
|
||||
- 0: Success (cloud-init completed and VM shutdown)
|
||||
- 1: General error
|
||||
- 2: VM never started
|
||||
- 3: VM stopped too quickly
|
||||
- 4: VM failed to shutdown
|
||||
|
||||
**Description:**
|
||||
This script monitors a VM's state to ensure proper cloud-init initialization and completion:
|
||||
1. Waits for VM to start running
|
||||
2. Verifies VM remains running (not an immediate crash)
|
||||
3. Waits for VM to shutdown (indicating cloud-init completion)
|
||||
4. Verifies VM remains shutdown
|
||||
|
||||
The script is typically used in the libvirt.images state after creating a new VM
|
||||
to ensure cloud-init completes its initialization before proceeding with further
|
||||
configuration.
|
||||
|
||||
**Logging:**
|
||||
- Logs are written to /opt/so/log/hypervisor/so-wait-cloud-init.log
|
||||
- Both file and console logging are enabled
|
||||
- Log entries include:
|
||||
- Timestamps
|
||||
- State changes
|
||||
- Error conditions
|
||||
- Verification steps
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from so_logging_utils import setup_logging
|
||||
|
||||
# Set up logging
|
||||
logger = setup_logging(
|
||||
logger_name='so-wait-cloud-init',
|
||||
log_file_path='/opt/so/log/hypervisor/so-wait-cloud-init.log',
|
||||
log_level=logging.INFO,
|
||||
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
|
||||
def check_vm_running(domain_name):
|
||||
"""
|
||||
Check if VM is in running state.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to check
|
||||
|
||||
Returns:
|
||||
bool: True if VM is running, False otherwise
|
||||
"""
|
||||
try:
|
||||
result = subprocess.run(['virsh', 'list', '--state-running', '--name'],
|
||||
capture_output=True, text=True, check=True)
|
||||
return domain_name in result.stdout.splitlines()
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to check VM state: {e}")
|
||||
return False
|
||||
|
||||
def wait_for_vm_start(domain_name, timeout=300):
|
||||
"""
|
||||
Wait for VM to start running.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to monitor
|
||||
timeout (int): Maximum time to wait in seconds
|
||||
|
||||
Returns:
|
||||
bool: True if VM started, False if timeout occurred
|
||||
"""
|
||||
logger.info(f"Waiting for VM {domain_name} to start...")
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
if check_vm_running(domain_name):
|
||||
logger.info("VM is running")
|
||||
return True
|
||||
time.sleep(1)
|
||||
|
||||
logger.error(f"Timeout waiting for VM {domain_name} to start")
|
||||
return False
|
||||
|
||||
def verify_vm_running(domain_name):
|
||||
"""
|
||||
Verify VM remains running after initial start.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to verify
|
||||
|
||||
Returns:
|
||||
bool: True if VM is still running after verification period
|
||||
"""
|
||||
logger.info("Verifying VM remains running...")
|
||||
time.sleep(5) # Wait to ensure VM is stable
|
||||
|
||||
if not check_vm_running(domain_name):
|
||||
logger.error("VM stopped too quickly after starting")
|
||||
return False
|
||||
|
||||
logger.info("VM verified running")
|
||||
return True
|
||||
|
||||
def wait_for_vm_shutdown(domain_name, timeout=600):
|
||||
"""
|
||||
Wait for VM to shutdown.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to monitor
|
||||
timeout (int): Maximum time to wait in seconds
|
||||
|
||||
Returns:
|
||||
bool: True if VM shutdown, False if timeout occurred
|
||||
"""
|
||||
logger.info("Waiting for cloud-init to complete and VM to shutdown...")
|
||||
start_time = time.time()
|
||||
check_count = 0
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
if not check_vm_running(domain_name):
|
||||
logger.info("VM has shutdown")
|
||||
return True
|
||||
|
||||
# Log status every minute (after 12 checks at 5 second intervals)
|
||||
check_count += 1
|
||||
if check_count % 12 == 0:
|
||||
elapsed = int(time.time() - start_time)
|
||||
logger.info(f"Still waiting for cloud-init... ({elapsed} seconds elapsed)")
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
logger.error(f"Timeout waiting for VM {domain_name} to shutdown")
|
||||
return False
|
||||
|
||||
def verify_vm_shutdown(domain_name):
|
||||
"""
|
||||
Verify VM remains shutdown.
|
||||
|
||||
Args:
|
||||
domain_name (str): Name of the domain to verify
|
||||
|
||||
Returns:
|
||||
bool: True if VM remains shutdown after verification period
|
||||
"""
|
||||
logger.info("Verifying VM remains shutdown...")
|
||||
time.sleep(5) # Wait to ensure VM state is stable
|
||||
|
||||
if check_vm_running(domain_name):
|
||||
logger.error("VM is still running after shutdown check")
|
||||
return False
|
||||
|
||||
logger.info("VM verified shutdown")
|
||||
return True
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Wait for cloud-init to complete on a Security Onion VM"
|
||||
)
|
||||
parser.add_argument("-n", "--name", required=True,
|
||||
help="Domain name of the VM to monitor")
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
# Wait for VM to start
|
||||
if not wait_for_vm_start(args.name):
|
||||
sys.exit(2) # VM never started
|
||||
|
||||
# Verify VM remains running
|
||||
if not verify_vm_running(args.name):
|
||||
sys.exit(3) # VM stopped too quickly
|
||||
|
||||
# Wait for VM to shutdown
|
||||
if not wait_for_vm_shutdown(args.name):
|
||||
sys.exit(4) # VM failed to shutdown
|
||||
|
||||
# Verify VM remains shutdown
|
||||
if not verify_vm_shutdown(args.name):
|
||||
sys.exit(4) # VM failed to stay shutdown
|
||||
|
||||
logger.info("Cloud-init completed successfully")
|
||||
sys.exit(0)
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Unexpected error: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
60
salt/hypervisor/tools/sbin/so_vm_utils.py
Normal file
60
salt/hypervisor/tools/sbin/so_vm_utils.py
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
import sys
|
||||
import time
|
||||
import libvirt
|
||||
import logging
|
||||
|
||||
def stop_vm(conn, vm_name, logger):
|
||||
"""
|
||||
Stops the specified virtual machine if it is running.
|
||||
|
||||
Parameters:
|
||||
conn (libvirt.virConnect): The libvirt connection object.
|
||||
vm_name (str): The name of the virtual machine.
|
||||
logger (logging.Logger): The logger object.
|
||||
|
||||
Returns:
|
||||
libvirt.virDomain: The domain object of the VM.
|
||||
|
||||
Raises:
|
||||
SystemExit: If the VM cannot be found or an error occurs.
|
||||
"""
|
||||
try:
|
||||
dom = conn.lookupByName(vm_name)
|
||||
if dom.isActive():
|
||||
logger.info(f"Shutting down VM '{vm_name}'...")
|
||||
dom.shutdown()
|
||||
# Wait for the VM to shut down
|
||||
while dom.isActive():
|
||||
time.sleep(1)
|
||||
logger.info(f"VM '{vm_name}' has been stopped.")
|
||||
else:
|
||||
logger.info(f"VM '{vm_name}' is already stopped.")
|
||||
return dom
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"Failed to stop VM '{vm_name}': {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def start_vm(dom, logger):
|
||||
"""
|
||||
Starts the specified virtual machine.
|
||||
|
||||
Parameters:
|
||||
dom (libvirt.virDomain): The domain object of the VM.
|
||||
logger (logging.Logger): The logger object.
|
||||
|
||||
Raises:
|
||||
SystemExit: If the VM cannot be started.
|
||||
"""
|
||||
try:
|
||||
dom.create()
|
||||
logger.info(f"VM '{dom.name()}' started successfully.")
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"Failed to start VM '{dom.name()}': {e}")
|
||||
sys.exit(1)
|
||||
586
salt/hypervisor/tools/sbin_jinja/so-kvm-create-volume
Normal file
586
salt/hypervisor/tools/sbin_jinja/so-kvm-create-volume
Normal file
@@ -0,0 +1,586 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
"""
|
||||
Script for creating and attaching virtual volumes to KVM virtual machines for NSM storage.
|
||||
This script provides functionality to create pre-allocated raw disk images and attach them
|
||||
to VMs as virtio-blk devices for high-performance network security monitoring data storage.
|
||||
|
||||
The script handles the complete volume lifecycle:
|
||||
1. Volume Creation: Creates pre-allocated raw disk images using qemu-img
|
||||
2. Volume Attachment: Attaches volumes to VMs as virtio-blk devices
|
||||
3. VM Management: Stops/starts VMs as needed during the process
|
||||
|
||||
This script is designed to work with Security Onion's virtualization infrastructure and is typically
|
||||
used during VM provisioning to add dedicated NSM storage volumes.
|
||||
|
||||
**Usage:**
|
||||
so-kvm-create-volume -v <vm_name> -s <size_gb> [-S]
|
||||
|
||||
**Options:**
|
||||
-v, --vm Name of the virtual machine to attach the volume to (required).
|
||||
-s, --size Size of the volume in GB (required, must be a positive integer).
|
||||
-S, --start Start the VM after volume creation and attachment (optional).
|
||||
|
||||
**Examples:**
|
||||
|
||||
1. **Create and Attach 500GB Volume:**
|
||||
|
||||
```bash
|
||||
so-kvm-create-volume -v vm1_sensor -s 500
|
||||
```
|
||||
|
||||
This command creates and attaches a volume with the following settings:
|
||||
- VM Name: `vm1_sensor`
|
||||
- Volume Size: `500` GB
|
||||
- Volume Path: `/nsm/libvirt/volumes/vm1_sensor-nsm.img`
|
||||
- Device: `/dev/vdb` (virtio-blk)
|
||||
- VM remains stopped after attachment
|
||||
|
||||
2. **Create Volume and Start VM:**
|
||||
|
||||
```bash
|
||||
so-kvm-create-volume -v vm2_sensor -s 1000 -S
|
||||
```
|
||||
|
||||
This command creates a volume and starts the VM:
|
||||
- VM Name: `vm2_sensor`
|
||||
- Volume Size: `1000` GB (1 TB)
|
||||
- VM is started after volume attachment due to the `-S` flag
|
||||
|
||||
3. **Create Large Volume for Heavy Traffic:**
|
||||
|
||||
```bash
|
||||
so-kvm-create-volume -v vm3_sensor -s 2000 -S
|
||||
```
|
||||
|
||||
This command creates a large volume for high-traffic environments:
|
||||
- VM Name: `vm3_sensor`
|
||||
- Volume Size: `2000` GB (2 TB)
|
||||
- VM is started after attachment
|
||||
|
||||
**Notes:**
|
||||
|
||||
- The script automatically stops the VM if it's running before creating and attaching the volume.
|
||||
- Volumes are created with full pre-allocation for optimal performance.
|
||||
- Volume files are stored in `/nsm/libvirt/volumes/` with naming pattern `<vm_name>-nsm.img`.
|
||||
- Volumes are attached as `/dev/vdb` using virtio-blk for high performance.
|
||||
- The script checks available disk space before creating the volume.
|
||||
- Ownership is set to `qemu:qemu` with permissions `640`.
|
||||
- Without the `-S` flag, the VM remains stopped after volume attachment.
|
||||
|
||||
**Description:**
|
||||
|
||||
The `so-kvm-create-volume` script creates and attaches NSM storage volumes using the following process:
|
||||
|
||||
1. **Pre-flight Checks:**
|
||||
- Validates input parameters (VM name, size)
|
||||
- Checks available disk space in `/nsm/libvirt/volumes/`
|
||||
- Ensures sufficient space for the requested volume size
|
||||
|
||||
2. **VM State Management:**
|
||||
- Connects to the local libvirt daemon
|
||||
- Stops the VM if it's currently running
|
||||
- Retrieves current VM configuration
|
||||
|
||||
3. **Volume Creation:**
|
||||
- Creates volume directory if it doesn't exist
|
||||
- Uses `qemu-img create` with full pre-allocation
|
||||
- Sets proper ownership (qemu:qemu) and permissions (640)
|
||||
- Validates volume creation success
|
||||
|
||||
4. **Volume Attachment:**
|
||||
- Modifies VM's libvirt XML configuration
|
||||
- Adds disk element with virtio-blk driver
|
||||
- Configures cache='none' and io='native' for performance
|
||||
- Attaches volume as `/dev/vdb`
|
||||
|
||||
5. **VM Redefinition:**
|
||||
- Applies the new configuration by redefining the VM
|
||||
- Optionally starts the VM if requested
|
||||
- Emits deployment status events for monitoring
|
||||
|
||||
6. **Error Handling:**
|
||||
- Validates all input parameters
|
||||
- Checks disk space before creation
|
||||
- Handles volume creation failures
|
||||
- Handles volume attachment failures
|
||||
- Provides detailed error messages for troubleshooting
|
||||
|
||||
**Exit Codes:**
|
||||
|
||||
- `0`: Success
|
||||
- `1`: An error occurred during execution
|
||||
|
||||
**Logging:**
|
||||
|
||||
- Logs are written to `/opt/so/log/hypervisor/so-kvm-create-volume.log`
|
||||
- Both file and console logging are enabled for real-time monitoring
|
||||
- Log entries include timestamps and severity levels
|
||||
- Log prefixes: VOLUME:, VM:, HARDWARE:, SPACE:
|
||||
- Detailed error messages are logged for troubleshooting
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import os
|
||||
import libvirt
|
||||
import logging
|
||||
import socket
|
||||
import subprocess
|
||||
import pwd
|
||||
import grp
|
||||
import xml.etree.ElementTree as ET
|
||||
from io import StringIO
|
||||
from so_vm_utils import start_vm, stop_vm
|
||||
from so_logging_utils import setup_logging
|
||||
|
||||
# Get hypervisor name from local hostname
|
||||
HYPERVISOR = socket.gethostname()
|
||||
|
||||
# Volume storage directory
|
||||
VOLUME_DIR = '/nsm/libvirt/volumes'
|
||||
|
||||
# Custom exception classes
|
||||
class InsufficientSpaceError(Exception):
|
||||
"""Raised when there is insufficient disk space for volume creation."""
|
||||
pass
|
||||
|
||||
class VolumeCreationError(Exception):
|
||||
"""Raised when volume creation fails."""
|
||||
pass
|
||||
|
||||
class VolumeAttachmentError(Exception):
|
||||
"""Raised when volume attachment fails."""
|
||||
pass
|
||||
|
||||
# Custom log handler to capture output
|
||||
class StringIOHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.strio = StringIO()
|
||||
|
||||
def emit(self, record):
|
||||
msg = self.format(record)
|
||||
self.strio.write(msg + '\n')
|
||||
|
||||
def get_value(self):
|
||||
return self.strio.getvalue()
|
||||
|
||||
def parse_arguments():
|
||||
"""Parse command-line arguments."""
|
||||
parser = argparse.ArgumentParser(description='Create and attach a virtual volume to a KVM virtual machine for NSM storage.')
|
||||
parser.add_argument('-v', '--vm', required=True, help='Name of the virtual machine to attach the volume to.')
|
||||
parser.add_argument('-s', '--size', type=int, required=True, help='Size of the volume in GB (must be a positive integer).')
|
||||
parser.add_argument('-S', '--start', action='store_true', help='Start the VM after volume creation and attachment.')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate size is positive
|
||||
if args.size <= 0:
|
||||
parser.error("Volume size must be a positive integer.")
|
||||
|
||||
return args
|
||||
|
||||
def check_disk_space(size_gb, logger):
|
||||
"""
|
||||
Check if there is sufficient disk space available for volume creation.
|
||||
|
||||
Args:
|
||||
size_gb: Size of the volume in GB
|
||||
logger: Logger instance
|
||||
|
||||
Raises:
|
||||
InsufficientSpaceError: If there is not enough disk space
|
||||
"""
|
||||
try:
|
||||
stat = os.statvfs(VOLUME_DIR)
|
||||
# Available space in bytes
|
||||
available_bytes = stat.f_bavail * stat.f_frsize
|
||||
# Required space in bytes (add 10% buffer)
|
||||
required_bytes = size_gb * 1024 * 1024 * 1024 * 1.1
|
||||
|
||||
available_gb = available_bytes / (1024 * 1024 * 1024)
|
||||
required_gb = required_bytes / (1024 * 1024 * 1024)
|
||||
|
||||
logger.info(f"SPACE: Available: {available_gb:.2f} GB, Required: {required_gb:.2f} GB")
|
||||
|
||||
if available_bytes < required_bytes:
|
||||
raise InsufficientSpaceError(
|
||||
f"Insufficient disk space. Available: {available_gb:.2f} GB, Required: {required_gb:.2f} GB"
|
||||
)
|
||||
|
||||
logger.info(f"SPACE: Sufficient disk space available for {size_gb} GB volume")
|
||||
|
||||
except OSError as e:
|
||||
logger.error(f"SPACE: Failed to check disk space: {e}")
|
||||
raise
|
||||
|
||||
def create_volume_file(vm_name, size_gb, logger):
|
||||
"""
|
||||
Create a pre-allocated raw disk image for the VM.
|
||||
|
||||
Args:
|
||||
vm_name: Name of the VM
|
||||
size_gb: Size of the volume in GB
|
||||
logger: Logger instance
|
||||
|
||||
Returns:
|
||||
Path to the created volume file
|
||||
|
||||
Raises:
|
||||
VolumeCreationError: If volume creation fails
|
||||
"""
|
||||
# Define volume path (directory already created in main())
|
||||
volume_path = os.path.join(VOLUME_DIR, f"{vm_name}-nsm.img")
|
||||
|
||||
# Check if volume already exists
|
||||
if os.path.exists(volume_path):
|
||||
logger.error(f"VOLUME: Volume already exists: {volume_path}")
|
||||
raise VolumeCreationError(f"Volume already exists: {volume_path}")
|
||||
|
||||
logger.info(f"VOLUME: Creating {size_gb} GB volume at {volume_path}")
|
||||
|
||||
# Create volume using qemu-img with full pre-allocation
|
||||
try:
|
||||
cmd = [
|
||||
'qemu-img', 'create',
|
||||
'-f', 'raw',
|
||||
'-o', 'preallocation=full',
|
||||
volume_path,
|
||||
f"{size_gb}G"
|
||||
]
|
||||
|
||||
result = subprocess.run(
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True
|
||||
)
|
||||
|
||||
logger.info(f"VOLUME: Volume created successfully")
|
||||
if result.stdout:
|
||||
logger.debug(f"VOLUME: qemu-img output: {result.stdout.strip()}")
|
||||
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"VOLUME: Failed to create volume: {e}")
|
||||
if e.stderr:
|
||||
logger.error(f"VOLUME: qemu-img error: {e.stderr.strip()}")
|
||||
raise VolumeCreationError(f"Failed to create volume: {e}")
|
||||
|
||||
# Set ownership to qemu:qemu
|
||||
try:
|
||||
qemu_uid = pwd.getpwnam('qemu').pw_uid
|
||||
qemu_gid = grp.getgrnam('qemu').gr_gid
|
||||
os.chown(volume_path, qemu_uid, qemu_gid)
|
||||
logger.info(f"VOLUME: Set ownership to qemu:qemu")
|
||||
except (KeyError, OSError) as e:
|
||||
logger.error(f"VOLUME: Failed to set ownership: {e}")
|
||||
raise VolumeCreationError(f"Failed to set ownership: {e}")
|
||||
|
||||
# Set permissions to 640
|
||||
try:
|
||||
os.chmod(volume_path, 0o640)
|
||||
logger.info(f"VOLUME: Set permissions to 640")
|
||||
except OSError as e:
|
||||
logger.error(f"VOLUME: Failed to set permissions: {e}")
|
||||
raise VolumeCreationError(f"Failed to set permissions: {e}")
|
||||
|
||||
# Verify volume was created
|
||||
if not os.path.exists(volume_path):
|
||||
logger.error(f"VOLUME: Volume file not found after creation: {volume_path}")
|
||||
raise VolumeCreationError(f"Volume file not found after creation: {volume_path}")
|
||||
|
||||
volume_size = os.path.getsize(volume_path)
|
||||
logger.info(f"VOLUME: Volume created: {volume_path} ({volume_size} bytes)")
|
||||
|
||||
return volume_path
|
||||
|
||||
def attach_volume_to_vm(conn, vm_name, volume_path, logger):
|
||||
"""
|
||||
Attach the volume to the VM's libvirt XML configuration.
|
||||
|
||||
Args:
|
||||
conn: Libvirt connection
|
||||
vm_name: Name of the VM
|
||||
volume_path: Path to the volume file
|
||||
logger: Logger instance
|
||||
|
||||
Raises:
|
||||
VolumeAttachmentError: If volume attachment fails
|
||||
"""
|
||||
try:
|
||||
# Get the VM domain
|
||||
dom = conn.lookupByName(vm_name)
|
||||
|
||||
# Get the XML description of the VM
|
||||
xml_desc = dom.XMLDesc()
|
||||
root = ET.fromstring(xml_desc)
|
||||
|
||||
# Find the devices element
|
||||
devices_elem = root.find('./devices')
|
||||
if devices_elem is None:
|
||||
logger.error("VM: Could not find <devices> element in XML")
|
||||
raise VolumeAttachmentError("Could not find <devices> element in VM XML")
|
||||
|
||||
# Log ALL devices with PCI addresses to find conflicts
|
||||
logger.info("DISK_DEBUG: Examining ALL devices with PCI addresses")
|
||||
for device in devices_elem:
|
||||
address = device.find('./address')
|
||||
if address is not None and address.get('type') == 'pci':
|
||||
bus = address.get('bus', 'unknown')
|
||||
slot = address.get('slot', 'unknown')
|
||||
function = address.get('function', 'unknown')
|
||||
logger.info(f"DISK_DEBUG: Device {device.tag}: bus={bus}, slot={slot}, function={function}")
|
||||
|
||||
# Log existing disk configuration for debugging
|
||||
logger.info("DISK_DEBUG: Examining existing disk configuration")
|
||||
existing_disks = devices_elem.findall('./disk')
|
||||
for idx, disk in enumerate(existing_disks):
|
||||
target = disk.find('./target')
|
||||
source = disk.find('./source')
|
||||
address = disk.find('./address')
|
||||
|
||||
dev_name = target.get('dev') if target is not None else 'unknown'
|
||||
source_file = source.get('file') if source is not None else 'unknown'
|
||||
|
||||
if address is not None:
|
||||
slot = address.get('slot', 'unknown')
|
||||
bus = address.get('bus', 'unknown')
|
||||
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, slot={slot}, bus={bus}")
|
||||
else:
|
||||
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, no address element")
|
||||
|
||||
# Check if vdb already exists
|
||||
for disk in devices_elem.findall('./disk'):
|
||||
target = disk.find('./target')
|
||||
if target is not None and target.get('dev') == 'vdb':
|
||||
logger.error("VM: Device vdb already exists in VM configuration")
|
||||
raise VolumeAttachmentError("Device vdb already exists in VM configuration")
|
||||
|
||||
logger.info(f"VM: Attaching volume to {vm_name} as /dev/vdb")
|
||||
|
||||
# Create disk element
|
||||
disk_elem = ET.SubElement(devices_elem, 'disk', attrib={
|
||||
'type': 'file',
|
||||
'device': 'disk'
|
||||
})
|
||||
|
||||
# Add driver element
|
||||
ET.SubElement(disk_elem, 'driver', attrib={
|
||||
'name': 'qemu',
|
||||
'type': 'raw',
|
||||
'cache': 'none',
|
||||
'io': 'native'
|
||||
})
|
||||
|
||||
# Add source element
|
||||
ET.SubElement(disk_elem, 'source', attrib={
|
||||
'file': volume_path
|
||||
})
|
||||
|
||||
# Add target element
|
||||
ET.SubElement(disk_elem, 'target', attrib={
|
||||
'dev': 'vdb',
|
||||
'bus': 'virtio'
|
||||
})
|
||||
|
||||
# Add address element
|
||||
# Use bus 0x07 with slot 0x00 to ensure NSM volume appears after OS disk (which is on bus 0x04)
|
||||
# Bus 0x05 is used by memballoon, bus 0x06 is used by rng device
|
||||
# Libvirt requires slot <= 0 for non-zero buses
|
||||
# This ensures vda = OS disk, vdb = NSM volume
|
||||
ET.SubElement(disk_elem, 'address', attrib={
|
||||
'type': 'pci',
|
||||
'domain': '0x0000',
|
||||
'bus': '0x07',
|
||||
'slot': '0x00',
|
||||
'function': '0x0'
|
||||
})
|
||||
|
||||
logger.info(f"HARDWARE: Added disk configuration for vdb")
|
||||
|
||||
# Log disk ordering after adding new disk
|
||||
logger.info("DISK_DEBUG: Disk configuration after adding NSM volume")
|
||||
all_disks = devices_elem.findall('./disk')
|
||||
for idx, disk in enumerate(all_disks):
|
||||
target = disk.find('./target')
|
||||
source = disk.find('./source')
|
||||
address = disk.find('./address')
|
||||
|
||||
dev_name = target.get('dev') if target is not None else 'unknown'
|
||||
source_file = source.get('file') if source is not None else 'unknown'
|
||||
|
||||
if address is not None:
|
||||
slot = address.get('slot', 'unknown')
|
||||
bus = address.get('bus', 'unknown')
|
||||
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, slot={slot}, bus={bus}")
|
||||
else:
|
||||
logger.info(f"DISK_DEBUG: Disk {idx}: dev={dev_name}, source={source_file}, no address element")
|
||||
|
||||
# Convert XML back to string
|
||||
new_xml_desc = ET.tostring(root, encoding='unicode')
|
||||
|
||||
# Redefine the VM with the new XML
|
||||
conn.defineXML(new_xml_desc)
|
||||
logger.info(f"VM: VM redefined with volume attached")
|
||||
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"VM: Failed to attach volume: {e}")
|
||||
raise VolumeAttachmentError(f"Failed to attach volume: {e}")
|
||||
except Exception as e:
|
||||
logger.error(f"VM: Failed to attach volume: {e}")
|
||||
raise VolumeAttachmentError(f"Failed to attach volume: {e}")
|
||||
|
||||
def emit_status_event(vm_name, status):
|
||||
"""
|
||||
Emit a deployment status event.
|
||||
|
||||
Args:
|
||||
vm_name: Name of the VM
|
||||
status: Status message
|
||||
"""
|
||||
try:
|
||||
subprocess.run([
|
||||
'so-salt-emit-vm-deployment-status-event',
|
||||
'-v', vm_name,
|
||||
'-H', HYPERVISOR,
|
||||
'-s', status
|
||||
], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
# Don't fail the entire operation if status event fails
|
||||
pass
|
||||
|
||||
def main():
|
||||
"""Main function to orchestrate volume creation and attachment."""
|
||||
# Set up logging using the so_logging_utils library
|
||||
string_handler = StringIOHandler()
|
||||
string_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
||||
logger = setup_logging(
|
||||
logger_name='so-kvm-create-volume',
|
||||
log_file_path='/opt/so/log/hypervisor/so-kvm-create-volume.log',
|
||||
log_level=logging.INFO,
|
||||
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger.addHandler(string_handler)
|
||||
|
||||
vm_name = None
|
||||
|
||||
try:
|
||||
# Parse arguments
|
||||
args = parse_arguments()
|
||||
|
||||
vm_name = args.vm
|
||||
size_gb = args.size
|
||||
start_vm_flag = args.start
|
||||
|
||||
logger.info(f"VOLUME: Starting volume creation for VM '{vm_name}' with size {size_gb} GB")
|
||||
|
||||
# Emit start status event
|
||||
emit_status_event(vm_name, 'Volume Creation')
|
||||
|
||||
# Ensure volume directory exists before checking disk space
|
||||
try:
|
||||
os.makedirs(VOLUME_DIR, mode=0o754, exist_ok=True)
|
||||
qemu_uid = pwd.getpwnam('qemu').pw_uid
|
||||
qemu_gid = grp.getgrnam('qemu').gr_gid
|
||||
os.chown(VOLUME_DIR, qemu_uid, qemu_gid)
|
||||
logger.debug(f"VOLUME: Ensured volume directory exists: {VOLUME_DIR}")
|
||||
except Exception as e:
|
||||
logger.error(f"VOLUME: Failed to create volume directory: {e}")
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
# Check disk space
|
||||
check_disk_space(size_gb, logger)
|
||||
|
||||
# Connect to libvirt
|
||||
try:
|
||||
conn = libvirt.open(None)
|
||||
logger.info("VM: Connected to libvirt")
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"VM: Failed to open connection to libvirt: {e}")
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
# Stop VM if running
|
||||
dom = stop_vm(conn, vm_name, logger)
|
||||
|
||||
# Create volume file
|
||||
volume_path = create_volume_file(vm_name, size_gb, logger)
|
||||
|
||||
# Attach volume to VM
|
||||
attach_volume_to_vm(conn, vm_name, volume_path, logger)
|
||||
|
||||
# Start VM if -S or --start argument is provided
|
||||
if start_vm_flag:
|
||||
dom = conn.lookupByName(vm_name)
|
||||
start_vm(dom, logger)
|
||||
logger.info(f"VM: VM '{vm_name}' started successfully")
|
||||
else:
|
||||
logger.info("VM: Start flag not provided; VM will remain stopped")
|
||||
|
||||
# Close connection
|
||||
conn.close()
|
||||
|
||||
# Emit success status event
|
||||
emit_status_event(vm_name, 'Volume Configuration')
|
||||
|
||||
logger.info(f"VOLUME: Volume creation and attachment completed successfully for VM '{vm_name}'")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
error_msg = "Operation cancelled by user"
|
||||
logger.error(error_msg)
|
||||
if vm_name:
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
except InsufficientSpaceError as e:
|
||||
error_msg = f"SPACE: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
if vm_name:
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
except VolumeCreationError as e:
|
||||
error_msg = f"VOLUME: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
if vm_name:
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
except VolumeAttachmentError as e:
|
||||
error_msg = f"VM: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
if vm_name:
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
except Exception as e:
|
||||
error_msg = f"An error occurred: {str(e)}"
|
||||
logger.error(error_msg)
|
||||
if vm_name:
|
||||
emit_status_event(vm_name, 'Volume Configuration Failed')
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
{%- else -%}
|
||||
|
||||
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||
for more information about purchasing a license to enable this feature."
|
||||
|
||||
{% endif -%}
|
||||
365
salt/hypervisor/tools/sbin_jinja/so-kvm-modify-hardware
Normal file
365
salt/hypervisor/tools/sbin_jinja/so-kvm-modify-hardware
Normal file
@@ -0,0 +1,365 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
"""
|
||||
Script for managing hardware configurations of KVM virtual machines. This script provides
|
||||
functionality to modify CPU, memory, and PCI device settings without manual XML editing
|
||||
or direct libvirt interaction.
|
||||
|
||||
The script offers three main configuration capabilities:
|
||||
1. CPU Management: Adjust virtual CPU count
|
||||
2. Memory Management: Modify memory allocation
|
||||
3. PCI Passthrough: Configure PCI device passthrough for direct hardware access
|
||||
|
||||
This script is designed to work with Security Onion's virtualization infrastructure and is typically
|
||||
used during VM provisioning and hardware reconfiguration tasks.
|
||||
|
||||
**Usage:**
|
||||
so-kvm-modify-hardware -v <vm_name> [-c <cpu_count>] [-m <memory_amount>] [-p <pci_id>] [-p <pci_id> ...] [-s]
|
||||
|
||||
**Options:**
|
||||
-v, --vm Name of the virtual machine to modify.
|
||||
-c, --cpu Number of virtual CPUs to assign.
|
||||
-m, --memory Amount of memory to assign in MiB.
|
||||
-p, --pci PCI hardware ID(s) to passthrough to the VM (e.g., 0000:00:1f.2). Can be specified multiple times.
|
||||
Format: domain:bus:device.function
|
||||
-s, --start Start the VM after modification.
|
||||
|
||||
**Examples:**
|
||||
|
||||
1. **Modify CPU and Memory with Multiple PCI Devices:**
|
||||
|
||||
```bash
|
||||
so-kvm-modify-hardware -v vm1_sensor -c 4 -m 8192 -p 0000:c7:00.0 -p 0000:c8:00.0 -s
|
||||
```
|
||||
|
||||
This command modifies a VM with the following settings:
|
||||
- VM Name: `vm1_sensor`
|
||||
- Hardware Configuration:
|
||||
- CPUs: `4`
|
||||
- Memory: `8192` MiB
|
||||
- PCI Device Passthrough: `0000:c7:00.0`, `0000:c8:00.0`
|
||||
- The VM is started after modification due to the `-s` flag
|
||||
|
||||
2. **Add PCI Device Without Other Changes:**
|
||||
|
||||
```bash
|
||||
so-kvm-modify-hardware -v vm2_master -p 0000:c7:00.0
|
||||
```
|
||||
|
||||
This command adds a single PCI device passthrough to the VM:
|
||||
- VM Name: `vm2_master`
|
||||
- PCI Device: `0000:c7:00.0`
|
||||
- Existing CPU and memory settings are preserved
|
||||
|
||||
3. **Update Resource Allocation:**
|
||||
|
||||
```bash
|
||||
so-kvm-modify-hardware -v vm3_search -c 2 -m 4096
|
||||
```
|
||||
|
||||
This command updates only compute resources:
|
||||
- VM Name: `vm3_search`
|
||||
- CPUs: `2`
|
||||
- Memory: `4096` MiB
|
||||
- VM remains stopped after modification
|
||||
|
||||
4. **Add Multiple PCI Devices:**
|
||||
|
||||
```bash
|
||||
so-kvm-modify-hardware -v vm4_node -p 0000:c7:00.0 -p 0000:c4:00.0 -p 0000:c4:00.1 -s
|
||||
```
|
||||
|
||||
This command adds multiple PCI devices and starts the VM:
|
||||
- VM Name: `vm4_node`
|
||||
- PCI Devices: `0000:c7:00.0`, `0000:c4:00.0`, `0000:c4:00.1`
|
||||
- VM is started after modification
|
||||
|
||||
**Notes:**
|
||||
|
||||
- The script automatically stops the VM if it's running before making modifications.
|
||||
- At least one modification option (-c, -m, or -p) should be provided.
|
||||
- The PCI hardware IDs must be in the format `domain:bus:device.function` (e.g., `0000:c7:00.0`).
|
||||
- Multiple PCI devices can be added by using the `-p` option multiple times.
|
||||
- Without the `-s` flag, the VM remains stopped after modification.
|
||||
- Existing hardware configurations are preserved if not explicitly modified.
|
||||
|
||||
**Description:**
|
||||
|
||||
The `so-kvm-modify-hardware` script modifies hardware parameters of KVM virtual machines using the following process:
|
||||
|
||||
1. **VM State Management:**
|
||||
- Connects to the local libvirt daemon
|
||||
- Stops the VM if it's currently running
|
||||
- Retrieves current VM configuration
|
||||
|
||||
2. **Hardware Configuration:**
|
||||
- Modifies CPU count if specified
|
||||
- Updates memory allocation if specified
|
||||
- Adds PCI device passthrough configurations if specified
|
||||
- All changes are made through libvirt XML configuration
|
||||
|
||||
3. **VM Redefinition:**
|
||||
- Applies the new configuration by redefining the VM
|
||||
- Optionally starts the VM if requested
|
||||
- Ensures clean shutdown and startup during modifications
|
||||
|
||||
4. **Error Handling:**
|
||||
- Validates all input parameters
|
||||
- Ensures proper XML structure
|
||||
- Provides detailed error messages for troubleshooting
|
||||
|
||||
**Exit Codes:**
|
||||
|
||||
- `0`: Success
|
||||
- `1`: An error occurred during execution
|
||||
|
||||
**Logging:**
|
||||
|
||||
- Logs are written to `/opt/so/log/hypervisor/so-kvm-modify-hardware.log`
|
||||
- Both file and console logging are enabled for real-time monitoring
|
||||
- Log entries include timestamps and severity levels
|
||||
- Detailed error messages are logged for troubleshooting
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sys
|
||||
import libvirt
|
||||
import logging
|
||||
import socket
|
||||
import xml.etree.ElementTree as ET
|
||||
from io import StringIO
|
||||
from so_vm_utils import start_vm, stop_vm
|
||||
from so_logging_utils import setup_logging
|
||||
import subprocess
|
||||
|
||||
# Get hypervisor name from local hostname
|
||||
HYPERVISOR = socket.gethostname()
|
||||
|
||||
# Custom log handler to capture output
|
||||
class StringIOHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.strio = StringIO()
|
||||
|
||||
def emit(self, record):
|
||||
msg = self.format(record)
|
||||
self.strio.write(msg + '\n')
|
||||
|
||||
def get_value(self):
|
||||
return self.strio.getvalue()
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser(description='Modify hardware parameters of a KVM virtual machine.')
|
||||
parser.add_argument('-v', '--vm', required=True, help='Name of the virtual machine to modify.')
|
||||
parser.add_argument('-c', '--cpu', type=int, help='Number of virtual CPUs to assign.')
|
||||
parser.add_argument('-m', '--memory', type=int, help='Amount of memory to assign in MiB.')
|
||||
parser.add_argument('-p', '--pci', action='append', help='PCI hardware ID(s) to passthrough to the VM (e.g., 0000:00:1f.2). Can be specified multiple times.')
|
||||
parser.add_argument('-s', '--start', action='store_true', help='Start the VM after modification.')
|
||||
args = parser.parse_args()
|
||||
return args
|
||||
|
||||
def modify_vm(dom, cpu_count, memory_amount, pci_ids, logger):
|
||||
try:
|
||||
# Get the XML description of the VM
|
||||
xml_desc = dom.XMLDesc()
|
||||
root = ET.fromstring(xml_desc)
|
||||
|
||||
# Modify CPU count
|
||||
if cpu_count is not None:
|
||||
vcpu_elem = root.find('./vcpu')
|
||||
if vcpu_elem is not None:
|
||||
vcpu_elem.text = str(cpu_count)
|
||||
logger.info(f"Set CPU count to {cpu_count}.")
|
||||
else:
|
||||
logger.error("Could not find <vcpu> element in XML.")
|
||||
sys.exit(1)
|
||||
|
||||
# Modify memory amount
|
||||
if memory_amount is not None:
|
||||
memory_elem = root.find('./memory')
|
||||
current_memory_elem = root.find('./currentMemory')
|
||||
if memory_elem is not None and current_memory_elem is not None:
|
||||
memory_elem.text = str(memory_amount * 1024) # Convert MiB to KiB
|
||||
current_memory_elem.text = str(memory_amount * 1024)
|
||||
logger.info(f"Set memory to {memory_amount} MiB.")
|
||||
else:
|
||||
logger.error("Could not find <memory> elements in XML.")
|
||||
sys.exit(1)
|
||||
|
||||
# Add PCI device passthrough(s)
|
||||
if pci_ids:
|
||||
devices_elem = root.find('./devices')
|
||||
if devices_elem is not None:
|
||||
for pci_id in pci_ids:
|
||||
hostdev_elem = ET.SubElement(devices_elem, 'hostdev', attrib={
|
||||
'mode': 'subsystem',
|
||||
'type': 'pci',
|
||||
'managed': 'yes'
|
||||
})
|
||||
source_elem = ET.SubElement(hostdev_elem, 'source')
|
||||
# Split PCI ID into components (domain:bus:slot.function)
|
||||
parts = pci_id.split(':')
|
||||
if len(parts) != 3:
|
||||
logger.error(f"Invalid PCI ID format: {pci_id}. Expected format: domain:bus:slot.function")
|
||||
sys.exit(1)
|
||||
domain_id = parts[0]
|
||||
bus = parts[1]
|
||||
slot_func = parts[2].split('.')
|
||||
if len(slot_func) != 2:
|
||||
logger.error(f"Invalid PCI ID format: {pci_id}. Expected format: domain:bus:slot.function")
|
||||
sys.exit(1)
|
||||
slot = slot_func[0]
|
||||
function = slot_func[1]
|
||||
address_attrs = {
|
||||
'domain': f'0x{domain_id}',
|
||||
'bus': f'0x{bus}',
|
||||
'slot': f'0x{slot}',
|
||||
'function': f'0x{function}'
|
||||
}
|
||||
ET.SubElement(source_elem, 'address', attrib=address_attrs)
|
||||
logger.info(f"Added PCI device passthrough for {pci_id}.")
|
||||
else:
|
||||
logger.error("Could not find <devices> element in XML.")
|
||||
sys.exit(1)
|
||||
|
||||
# Convert XML back to string
|
||||
new_xml_desc = ET.tostring(root, encoding='unicode')
|
||||
return new_xml_desc
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to modify VM XML: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def redefine_vm(conn, new_xml_desc, logger):
|
||||
try:
|
||||
conn.defineXML(new_xml_desc)
|
||||
logger.info("VM redefined with new hardware parameters.")
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"Failed to redefine VM: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def main():
|
||||
# Set up logging using the so_logging_utils library
|
||||
string_handler = StringIOHandler()
|
||||
string_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
||||
logger = setup_logging(
|
||||
logger_name='so-kvm-modify-hardware',
|
||||
log_file_path='/opt/so/log/hypervisor/so-kvm-modify-hardware.log',
|
||||
log_level=logging.INFO,
|
||||
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger.addHandler(string_handler)
|
||||
|
||||
try:
|
||||
args = parse_arguments()
|
||||
|
||||
vm_name = args.vm
|
||||
cpu_count = args.cpu
|
||||
memory_amount = args.memory
|
||||
pci_ids = args.pci # This will be a list or None
|
||||
start_vm_flag = args.start
|
||||
|
||||
# Connect to libvirt
|
||||
try:
|
||||
conn = libvirt.open(None)
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"Failed to open connection to libvirt: {e}")
|
||||
try:
|
||||
subprocess.run([
|
||||
'so-salt-emit-vm-deployment-status-event',
|
||||
'-v', vm_name,
|
||||
'-H', HYPERVISOR,
|
||||
'-s', 'Hardware Configuration Failed'
|
||||
], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to emit failure status event: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# Stop VM if running
|
||||
dom = stop_vm(conn, vm_name, logger)
|
||||
|
||||
# Modify VM XML
|
||||
new_xml_desc = modify_vm(dom, cpu_count, memory_amount, pci_ids, logger)
|
||||
|
||||
# Redefine VM
|
||||
redefine_vm(conn, new_xml_desc, logger)
|
||||
|
||||
# Start VM if -s or --start argument is provided
|
||||
if start_vm_flag:
|
||||
dom = conn.lookupByName(vm_name)
|
||||
start_vm(dom, logger)
|
||||
logger.info(f"VM '{vm_name}' started successfully.")
|
||||
else:
|
||||
logger.info("VM start flag not provided; VM will remain stopped.")
|
||||
|
||||
# Close connection
|
||||
conn.close()
|
||||
|
||||
# Send success status event
|
||||
try:
|
||||
subprocess.run([
|
||||
'so-salt-emit-vm-deployment-status-event',
|
||||
'-v', vm_name,
|
||||
'-H', HYPERVISOR,
|
||||
'-s', 'Hardware Configuration'
|
||||
], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to emit success status event: {e}")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
error_msg = "Operation cancelled by user"
|
||||
logger.error(error_msg)
|
||||
try:
|
||||
subprocess.run([
|
||||
'so-salt-emit-vm-deployment-status-event',
|
||||
'-v', vm_name,
|
||||
'-H', HYPERVISOR,
|
||||
'-s', 'Hardware Configuration Failed'
|
||||
], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to emit failure status event: {e}")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "Failed to open connection to libvirt" in error_msg:
|
||||
error_msg = f"Failed to connect to libvirt: {error_msg}"
|
||||
elif "Failed to redefine VM" in error_msg:
|
||||
error_msg = f"Failed to apply hardware changes: {error_msg}"
|
||||
elif "Failed to modify VM XML" in error_msg:
|
||||
error_msg = f"Failed to update hardware configuration: {error_msg}"
|
||||
else:
|
||||
error_msg = f"An error occurred: {error_msg}"
|
||||
logger.error(error_msg)
|
||||
try:
|
||||
subprocess.run([
|
||||
'so-salt-emit-vm-deployment-status-event',
|
||||
'-v', vm_name,
|
||||
'-h', HYPERVISOR,
|
||||
'-s', 'Hardware Configuration Failed'
|
||||
], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to emit failure status event: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
{%- else -%}
|
||||
|
||||
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||
for more information about purchasing a license to enable this feature."
|
||||
|
||||
{% endif -%}
|
||||
531
salt/hypervisor/tools/sbin_jinja/so-qcow2-modify-network
Normal file
531
salt/hypervisor/tools/sbin_jinja/so-qcow2-modify-network
Normal file
@@ -0,0 +1,531 @@
|
||||
#!/usr/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) -%}
|
||||
|
||||
"""
|
||||
Script for modifying network configurations within QCOW2 virtual machine images. This script provides
|
||||
functionality to update NetworkManager settings, supporting both DHCP and static IP configurations
|
||||
without requiring the VM to be running.
|
||||
|
||||
The script offers two main configuration modes:
|
||||
1. DHCP Configuration: Enable automatic IP address assignment
|
||||
2. Static IP Configuration: Set specific IP address, gateway, DNS servers, and search domains
|
||||
|
||||
For both configuration modes, the script automatically sets the following NetworkManager connection properties:
|
||||
- connection.autoconnect: yes (ensures interface connects automatically)
|
||||
- connection.autoconnect-priority: 999 (sets connection priority)
|
||||
- connection.autoconnect-retries: -1 (unlimited connection retries)
|
||||
- connection.multi-connect: 0 (single connection mode)
|
||||
- connection.wait-device-timeout: -1 (wait indefinitely for device)
|
||||
|
||||
This script is designed to work with Security Onion's virtualization infrastructure and is typically
|
||||
used during VM provisioning and network reconfiguration tasks.
|
||||
|
||||
**Usage:**
|
||||
so-qcow2-modify-network -I <qcow2_image_path> -i <interface> (--dhcp4 | --static4 --ip4 <ip_address> --gw4 <gateway>)
|
||||
[--dns4 <dns_servers>] [--search4 <search_domain>]
|
||||
|
||||
**Options:**
|
||||
-I, --image Path to the QCOW2 image.
|
||||
-i, --interface Network interface to modify (e.g., enp1s0).
|
||||
--dhcp4 Configure interface for DHCP (IPv4).
|
||||
--static4 Configure interface for static IPv4 settings.
|
||||
--ip4 IPv4 address (e.g., 192.168.1.10/24). Required for static IPv4 configuration.
|
||||
--gw4 IPv4 gateway (e.g., 192.168.1.1). Required for static IPv4 configuration.
|
||||
--dns4 Comma-separated list of IPv4 DNS servers (e.g., 8.8.8.8,8.8.4.4).
|
||||
--search4 DNS search domain for IPv4.
|
||||
|
||||
**Examples:**
|
||||
|
||||
1. **Static IP Configuration with DNS and Search Domain:**
|
||||
|
||||
```bash
|
||||
so-qcow2-modify-network -I /nsm/libvirt/images/sool9/sool9.qcow2 -i enp1s0 --static4 \
|
||||
--ip4 192.168.1.10/24 --gw4 192.168.1.1 --dns4 192.168.1.1,192.168.1.2 --search4 example.local
|
||||
```
|
||||
|
||||
This command configures the network settings in the QCOW2 image with:
|
||||
- Static IPv4 configuration:
|
||||
- IP Address: `192.168.1.10/24`
|
||||
- Gateway: `192.168.1.1`
|
||||
- DNS Servers: `192.168.1.1`, `192.168.1.2`
|
||||
- DNS Search Domain: `example.local`
|
||||
|
||||
2. **DHCP Configuration:**
|
||||
|
||||
```bash
|
||||
so-qcow2-modify-network -I /nsm/libvirt/images/sool9/sool9.qcow2 -i enp1s0 --dhcp4
|
||||
```
|
||||
|
||||
This command configures the network interface to use DHCP for automatic IP address assignment.
|
||||
|
||||
3. **Static IP Configuration without DNS Settings:**
|
||||
|
||||
```bash
|
||||
so-qcow2-modify-network -I /nsm/libvirt/images/sool9/sool9.qcow2 -i enp1s0 --static4 \
|
||||
--ip4 192.168.1.20/24 --gw4 192.168.1.1
|
||||
```
|
||||
|
||||
This command sets only the basic static IP configuration:
|
||||
- IP Address: `192.168.1.20/24`
|
||||
- Gateway: `192.168.1.1`
|
||||
|
||||
**Notes:**
|
||||
|
||||
- When using `--static4`, both `--ip4` and `--gw4` options are required.
|
||||
- The script validates IP addresses, DNS servers, and interface names before making any changes.
|
||||
- DNS servers can be specified as a comma-separated list for multiple servers.
|
||||
- The script requires write permissions for the QCOW2 image file.
|
||||
- Interface names must contain only alphanumeric characters, underscores, and hyphens.
|
||||
|
||||
**Description:**
|
||||
|
||||
The `so-qcow2-modify-network` script modifies network configuration within a QCOW2 image using the following process:
|
||||
|
||||
1. **Image Access:**
|
||||
- Mounts the QCOW2 image using libguestfs
|
||||
- Locates and accesses the NetworkManager configuration directory
|
||||
|
||||
2. **Configuration Update:**
|
||||
- Reads the existing network configuration for the specified interface
|
||||
- Updates IPv4 settings based on provided parameters
|
||||
- Supports both DHCP and static IP configurations
|
||||
- Validates all input parameters before making changes
|
||||
|
||||
3. **File Management:**
|
||||
- Creates or updates the NetworkManager connection file
|
||||
- Maintains proper file permissions and format
|
||||
- Safely unmounts the image after changes
|
||||
|
||||
**Exit Codes:**
|
||||
|
||||
- `0`: Success
|
||||
- Non-zero: An error occurred during execution
|
||||
|
||||
**Logging:**
|
||||
|
||||
- Logs are written to `/opt/so/log/hypervisor/so-qcow2-modify-network.log`
|
||||
- Both file and console logging are enabled for real-time monitoring
|
||||
- Log entries include:
|
||||
- Timestamps in ISO 8601 format
|
||||
- Severity levels (INFO, WARNING, ERROR)
|
||||
- Detailed error messages for troubleshooting
|
||||
- Critical operations logged:
|
||||
- Network configuration changes
|
||||
- Image mount/unmount operations
|
||||
- Validation failures
|
||||
- File access errors
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import guestfs
|
||||
import re
|
||||
import sys
|
||||
import logging
|
||||
import os
|
||||
import socket
|
||||
import ipaddress
|
||||
import configparser
|
||||
import uuid
|
||||
from io import StringIO
|
||||
import libvirt
|
||||
from so_logging_utils import setup_logging
|
||||
import subprocess
|
||||
|
||||
# Get hypervisor name from local hostname
|
||||
HYPERVISOR = socket.gethostname()
|
||||
|
||||
# Custom log handler to capture output
|
||||
class StringIOHandler(logging.Handler):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.strio = StringIO()
|
||||
|
||||
def emit(self, record):
|
||||
msg = self.format(record)
|
||||
self.strio.write(msg + '\n')
|
||||
|
||||
def get_value(self):
|
||||
return self.strio.getvalue()
|
||||
|
||||
# Set up logging using the so_logging_utils library
|
||||
string_handler = StringIOHandler()
|
||||
string_handler.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
||||
logger = setup_logging(
|
||||
logger_name='so-qcow2-modify-network',
|
||||
log_file_path='/opt/so/log/hypervisor/so-qcow2-modify-network.log',
|
||||
log_level=logging.INFO,
|
||||
format_str='%(asctime)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
logger.addHandler(string_handler)
|
||||
|
||||
NETWORK_CONFIG_DIR = "/etc/NetworkManager/system-connections"
|
||||
|
||||
def validate_ip_address(ip_str, description="IP address"):
|
||||
try:
|
||||
ipaddress.IPv4Interface(ip_str)
|
||||
except ValueError:
|
||||
try:
|
||||
ipaddress.IPv4Address(ip_str)
|
||||
except ValueError:
|
||||
raise ValueError(f"Invalid {description}: {ip_str}")
|
||||
|
||||
def validate_dns_addresses(dns_str):
|
||||
dns_list = dns_str.split(',')
|
||||
for dns in dns_list:
|
||||
dns = dns.strip()
|
||||
validate_ip_address(dns, description="DNS server address")
|
||||
|
||||
def validate_interface_name(interface_name):
|
||||
if not re.match(r'^[a-zA-Z0-9_\-]+$', interface_name):
|
||||
raise ValueError(f"Invalid interface name: {interface_name}")
|
||||
|
||||
def check_base_domain_status(image_path):
|
||||
"""
|
||||
Check if the base domain corresponding to the image path is currently running.
|
||||
Base domains should not be running when modifying their configuration.
|
||||
|
||||
Parameters:
|
||||
image_path (str): Path to the QCOW2 image.
|
||||
|
||||
Returns:
|
||||
bool: True if the base domain is running, False otherwise.
|
||||
"""
|
||||
base_domain = os.path.basename(os.path.dirname(image_path))
|
||||
logger.info(f"Verifying base domain status for image: {image_path}")
|
||||
logger.info(f"Checking if base domain '{base_domain}' is running...")
|
||||
|
||||
try:
|
||||
conn = libvirt.open('qemu:///system')
|
||||
try:
|
||||
dom = conn.lookupByName(base_domain)
|
||||
is_running = dom.isActive()
|
||||
if is_running:
|
||||
logger.error(f"Base domain '{base_domain}' is running - cannot modify configuration")
|
||||
return is_running
|
||||
except libvirt.libvirtError:
|
||||
logger.info(f"Base domain '{base_domain}' not found or not running")
|
||||
return False
|
||||
finally:
|
||||
conn.close()
|
||||
except libvirt.libvirtError as e:
|
||||
logger.error(f"Failed to connect to libvirt: {e}")
|
||||
return False
|
||||
|
||||
def update_network_config(content, mode, ip=None, gateway=None, dns=None, search_domain=None):
|
||||
config = configparser.ConfigParser(strict=False)
|
||||
config.optionxform = str
|
||||
config.read_string(content)
|
||||
|
||||
# Ensure connection section exists and set required properties
|
||||
if 'connection' not in config.sections():
|
||||
logger.info("Creating new connection section in network configuration")
|
||||
config.add_section('connection')
|
||||
|
||||
# Set mandatory connection properties
|
||||
config.set('connection', 'autoconnect', 'yes')
|
||||
config.set('connection', 'autoconnect-priority', '999')
|
||||
config.set('connection', 'autoconnect-retries', '-1')
|
||||
config.set('connection', 'multi-connect', '0')
|
||||
config.set('connection', 'wait-device-timeout', '-1')
|
||||
|
||||
# Ensure ipv4 section exists
|
||||
if 'ipv4' not in config.sections():
|
||||
logger.info("Creating new IPv4 section in network configuration")
|
||||
config.add_section('ipv4')
|
||||
|
||||
if mode == "dhcp4":
|
||||
logger.info("Configuring DHCP settings:")
|
||||
logger.info(" method: auto (DHCP enabled)")
|
||||
logger.info(" Removing any existing static configuration")
|
||||
config.set('ipv4', 'method', 'auto')
|
||||
config.remove_option('ipv4', 'address1')
|
||||
config.remove_option('ipv4', 'addresses')
|
||||
config.remove_option('ipv4', 'dns')
|
||||
config.remove_option('ipv4', 'dns-search')
|
||||
elif mode == "static4":
|
||||
logger.info("Configuring static IP settings:")
|
||||
logger.info(" method: manual (static configuration)")
|
||||
config.set('ipv4', 'method', 'manual')
|
||||
if ip and gateway:
|
||||
logger.info(f" Setting address: {ip}")
|
||||
logger.info(f" Setting gateway: {gateway}")
|
||||
config.set('ipv4', 'address1', f"{ip},{gateway}")
|
||||
else:
|
||||
logger.error("Missing required IP address or gateway for static configuration")
|
||||
raise ValueError("Both IP address and gateway are required for static configuration.")
|
||||
if dns:
|
||||
logger.info(f" Setting DNS servers: {dns}")
|
||||
config.set('ipv4', 'dns', f"{dns};")
|
||||
else:
|
||||
logger.info(" No DNS servers specified")
|
||||
config.remove_option('ipv4', 'dns')
|
||||
if search_domain:
|
||||
logger.info(f" Setting search domain: {search_domain}")
|
||||
config.set('ipv4', 'dns-search', f"{search_domain};")
|
||||
else:
|
||||
logger.info(" No search domain specified")
|
||||
config.remove_option('ipv4', 'dns-search')
|
||||
else:
|
||||
raise ValueError(f"Invalid mode '{mode}'. Expected 'dhcp4' or 'static4'.")
|
||||
|
||||
output = StringIO()
|
||||
config.write(output, space_around_delimiters=False)
|
||||
updated_content = output.getvalue()
|
||||
output.close()
|
||||
|
||||
return updated_content
|
||||
|
||||
def modify_network_config(image_path, interface, mode, ip=None, gateway=None, dns=None, search_domain=None):
|
||||
"""
|
||||
Modifies network configuration in a QCOW2 image, ensuring specific connection settings are set.
|
||||
|
||||
Handles both eth0 and predictable network interface names (e.g., enp1s0).
|
||||
If the requested interface configuration is not found but eth0.nmconnection exists,
|
||||
it will be renamed and updated with the proper interface configuration.
|
||||
"""
|
||||
# Check if base domain is running
|
||||
if check_base_domain_status(image_path):
|
||||
raise RuntimeError("Cannot modify network configuration while base domain is running")
|
||||
|
||||
if not os.access(image_path, os.W_OK):
|
||||
logger.error(f"Permission denied: Cannot write to image file {image_path}")
|
||||
raise PermissionError(f"Write permission denied for image file: {image_path}")
|
||||
|
||||
logger.info(f"Configuring network for VM image: {image_path}")
|
||||
logger.info(f"Network configuration details for interface {interface}:")
|
||||
logger.info(f" Mode: {mode.upper()}")
|
||||
if mode == "static4":
|
||||
logger.info(f" IP Address: {ip}")
|
||||
logger.info(f" Gateway: {gateway}")
|
||||
logger.info(f" DNS Servers: {dns if dns else 'Not configured'}")
|
||||
logger.info(f" Search Domain: {search_domain if search_domain else 'Not configured'}")
|
||||
|
||||
g = guestfs.GuestFS(python_return_dict=True)
|
||||
try:
|
||||
logger.info("Initializing GuestFS and mounting image...")
|
||||
g.set_network(False)
|
||||
g.selinux = False
|
||||
g.add_drive_opts(image_path, format="qcow2")
|
||||
g.launch()
|
||||
except RuntimeError as e:
|
||||
logger.error(f"Failed to initialize GuestFS: {e}")
|
||||
raise RuntimeError(f"Failed to initialize GuestFS or launch appliance: {e}")
|
||||
|
||||
try:
|
||||
os_list = g.inspect_os()
|
||||
if not os_list:
|
||||
logger.error(f"No operating system found in image: {image_path}")
|
||||
raise RuntimeError(f"Unable to find any OS in {image_path}.")
|
||||
|
||||
root_fs = os_list[0]
|
||||
try:
|
||||
g.mount(root_fs, "/")
|
||||
logger.info("Successfully mounted VM image filesystem")
|
||||
except RuntimeError as e:
|
||||
logger.error(f"Failed to mount filesystem: {e}")
|
||||
raise RuntimeError(f"Failed to mount the filesystem: {e}")
|
||||
|
||||
if not g.is_dir(NETWORK_CONFIG_DIR):
|
||||
logger.error(f"NetworkManager configuration directory not found: {NETWORK_CONFIG_DIR}")
|
||||
raise FileNotFoundError(f"NetworkManager configuration directory not found in the image at {NETWORK_CONFIG_DIR}.")
|
||||
|
||||
requested_config_path = f"{NETWORK_CONFIG_DIR}/{interface}.nmconnection"
|
||||
eth0_config_path = f"{NETWORK_CONFIG_DIR}/eth0.nmconnection"
|
||||
config_file_path = None
|
||||
current_content = None
|
||||
|
||||
# Try to read the requested interface config first
|
||||
try:
|
||||
file_content = g.read_file(requested_config_path)
|
||||
current_content = file_content.decode('utf-8')
|
||||
config_file_path = requested_config_path
|
||||
logger.info(f"Found existing network configuration for interface {interface}")
|
||||
except RuntimeError:
|
||||
# If not found, try eth0 config
|
||||
try:
|
||||
file_content = g.read_file(eth0_config_path)
|
||||
current_content = file_content.decode('utf-8')
|
||||
config_file_path = eth0_config_path
|
||||
logger.info("Found eth0 network configuration, will update for new interface")
|
||||
except RuntimeError:
|
||||
logger.error(f"No network configuration found for either {interface} or eth0")
|
||||
raise FileNotFoundError(f"No network configuration found at {requested_config_path} or {eth0_config_path}")
|
||||
except UnicodeDecodeError:
|
||||
logger.error(f"Failed to decode network configuration file")
|
||||
raise ValueError(f"Failed to decode the configuration file")
|
||||
|
||||
# If using eth0 config, update interface-specific fields
|
||||
if config_file_path == eth0_config_path:
|
||||
config = configparser.ConfigParser(strict=False)
|
||||
config.optionxform = str
|
||||
config.read_string(current_content)
|
||||
|
||||
if 'connection' not in config.sections():
|
||||
config.add_section('connection')
|
||||
|
||||
# Update interface-specific fields
|
||||
config.set('connection', 'id', interface)
|
||||
config.set('connection', 'interface-name', interface)
|
||||
config.set('connection', 'uuid', str(uuid.uuid4()))
|
||||
|
||||
# Write updated content back to string
|
||||
output = StringIO()
|
||||
config.write(output, space_around_delimiters=False)
|
||||
current_content = output.getvalue()
|
||||
output.close()
|
||||
|
||||
# Update config file path to new interface name
|
||||
config_file_path = requested_config_path
|
||||
|
||||
logger.info("Applying network configuration changes...")
|
||||
updated_content = update_network_config(current_content, mode, ip, gateway, dns, search_domain)
|
||||
|
||||
try:
|
||||
g.write(config_file_path, updated_content.encode('utf-8'))
|
||||
# Set proper permissions (600) on the network configuration file
|
||||
g.chmod(0o600, config_file_path)
|
||||
logger.info("Successfully wrote updated network configuration with proper permissions (600)")
|
||||
|
||||
# If we renamed eth0 to the new interface, remove the old eth0 config
|
||||
if config_file_path == requested_config_path and eth0_config_path != requested_config_path:
|
||||
try:
|
||||
g.rm(eth0_config_path)
|
||||
logger.info("Removed old eth0 configuration file")
|
||||
except RuntimeError:
|
||||
logger.warning("Could not remove old eth0 configuration file - it may have already been removed")
|
||||
|
||||
except RuntimeError as e:
|
||||
logger.error(f"Failed to write network configuration: {e}")
|
||||
raise IOError(f"Failed to write updated configuration to {config_file_path}: {e}")
|
||||
|
||||
logger.info(f"Successfully updated network configuration:")
|
||||
logger.info(f" Image: {image_path}")
|
||||
logger.info(f" Interface: {interface}")
|
||||
logger.info(f" Mode: {mode.upper()}")
|
||||
if mode == "static4":
|
||||
logger.info(f" Settings applied:")
|
||||
logger.info(f" IP Address: {ip}")
|
||||
logger.info(f" Gateway: {gateway}")
|
||||
logger.info(f" DNS Servers: {dns if dns else 'Not configured'}")
|
||||
logger.info(f" Search Domain: {search_domain if search_domain else 'Not configured'}")
|
||||
|
||||
except Exception as e:
|
||||
raise e
|
||||
finally:
|
||||
g.umount_all()
|
||||
g.close()
|
||||
|
||||
def parse_arguments():
|
||||
parser = argparse.ArgumentParser(description="Modify IPv4 settings in a QCOW2 image for a specified network interface.")
|
||||
parser.add_argument("-I", "--image", required=True, help="Path to the QCOW2 image.")
|
||||
parser.add_argument("-i", "--interface", required=True, help="Network interface to modify (e.g., enp1s0).")
|
||||
parser.add_argument("-n", "--vm-name", required=True, help="Full name of the VM (hostname_role).")
|
||||
group = parser.add_mutually_exclusive_group(required=True)
|
||||
group.add_argument("--dhcp4", action="store_true", help="Configure interface for DHCP (IPv4).")
|
||||
group.add_argument("--static4", action="store_true", help="Configure interface for static IPv4 settings.")
|
||||
parser.add_argument("--ip4", help="IPv4 address (e.g., 192.168.1.10/24). Required for static IPv4 configuration.")
|
||||
parser.add_argument("--gw4", help="IPv4 gateway (e.g., 192.168.1.1). Required for static IPv4 configuration.")
|
||||
parser.add_argument("--dns4", help="Comma-separated list of IPv4 DNS servers (e.g., 8.8.8.8,8.8.4.4).")
|
||||
parser.add_argument("--search4", help="DNS search domain for IPv4.")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.static4:
|
||||
if not args.ip4 or not args.gw4:
|
||||
parser.error("Both --ip4 and --gw4 are required for static IPv4 configuration.")
|
||||
return args
|
||||
|
||||
def main():
|
||||
try:
|
||||
logger.info("Starting network configuration update...")
|
||||
args = parse_arguments()
|
||||
|
||||
logger.info("Validating interface name...")
|
||||
validate_interface_name(args.interface)
|
||||
|
||||
if args.dhcp4:
|
||||
mode = "dhcp4"
|
||||
logger.info("Using DHCP configuration mode")
|
||||
elif args.static4:
|
||||
mode = "static4"
|
||||
logger.info("Using static IP configuration mode")
|
||||
if not args.ip4 or not args.gw4:
|
||||
logger.error("Missing required parameters for static configuration")
|
||||
raise ValueError("Both --ip4 and --gw4 are required for static IPv4 configuration.")
|
||||
|
||||
logger.info("Validating IP addresses...")
|
||||
validate_ip_address(args.ip4, description="IPv4 address")
|
||||
validate_ip_address(args.gw4, description="IPv4 gateway")
|
||||
if args.dns4:
|
||||
validate_dns_addresses(args.dns4)
|
||||
else:
|
||||
logger.error("No configuration mode specified")
|
||||
raise ValueError("Either --dhcp4 or --static4 must be specified.")
|
||||
|
||||
modify_network_config(args.image, args.interface, mode, args.ip4, args.gw4, args.dns4, args.search4)
|
||||
logger.info("Network configuration update completed successfully")
|
||||
|
||||
# Send success status event
|
||||
try:
|
||||
subprocess.run([
|
||||
'so-salt-emit-vm-deployment-status-event',
|
||||
'-v', args.vm_name,
|
||||
'-H', HYPERVISOR,
|
||||
'-s', 'IP Configuration'
|
||||
], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to emit success status event: {e}")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
error_msg = "Operation cancelled by user"
|
||||
logger.error(error_msg)
|
||||
try:
|
||||
subprocess.run([
|
||||
'so-salt-emit-vm-deployment-status-event',
|
||||
'-v', args.vm_name,
|
||||
'-H', HYPERVISOR,
|
||||
'-s', 'IP Configuration Failed'
|
||||
], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to emit failure status event: {e}")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
error_msg = str(e)
|
||||
if "base domain is running" in error_msg:
|
||||
logger.error("Cannot proceed: Base domain must not be running when modifying network configuration")
|
||||
error_msg = "Base domain must not be running when modifying network configuration"
|
||||
else:
|
||||
logger.error(f"An error occurred: {e}")
|
||||
try:
|
||||
subprocess.run([
|
||||
'so-salt-emit-vm-deployment-status-event',
|
||||
'-v', args.vm_name,
|
||||
'-H', HYPERVISOR,
|
||||
'-s', 'IP Configuration Failed'
|
||||
], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(f"Failed to emit failure status event: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
{%- else -%}
|
||||
|
||||
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||
for more information about purchasing a license to enable this feature."
|
||||
|
||||
{% endif -%}
|
||||
@@ -86,7 +86,7 @@ idh_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://idh/tools/sbin
|
||||
- user: 934
|
||||
- user: 939
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ idstools_sbin:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- source: salt://idstools/tools/sbin
|
||||
- user: 934
|
||||
- user: 939
|
||||
- group: 939
|
||||
- file_mode: 755
|
||||
|
||||
@@ -29,7 +29,7 @@ idstools_sbin:
|
||||
# file.recurse:
|
||||
# - name: /usr/sbin
|
||||
# - source: salt://idstools/tools/sbin_jinja
|
||||
# - user: 934
|
||||
# - user: 939
|
||||
# - group: 939
|
||||
# - file_mode: 755
|
||||
# - template: jinja
|
||||
@@ -38,7 +38,7 @@ idstools_so-rule-update:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-rule-update
|
||||
- source: salt://idstools/tools/sbin_jinja/so-rule-update
|
||||
- user: 934
|
||||
- user: 939
|
||||
- group: 939
|
||||
- mode: 755
|
||||
- template: jinja
|
||||
|
||||
@@ -22,7 +22,7 @@ kibana:
|
||||
- default
|
||||
- file
|
||||
migrations:
|
||||
discardCorruptObjects: "8.17.3"
|
||||
discardCorruptObjects: "8.18.8"
|
||||
telemetry:
|
||||
enabled: False
|
||||
security:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS -%}
|
||||
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "{{ ELASTICSEARCHDEFAULTS.elasticsearch.version }}","id": "{{ ELASTICSEARCHDEFAULTS.elasticsearch.version }}","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
|
||||
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"savedObjects:listingLimit":1500,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "{{ ELASTICSEARCHDEFAULTS.elasticsearch.version }}","id": "{{ ELASTICSEARCHDEFAULTS.elasticsearch.version }}","references": [],"type": "config","version": "WzI5NzUsMl0="}
|
||||
|
||||
@@ -13,6 +13,6 @@ echo "Setting up default Space:"
|
||||
{% if HIGHLANDER %}
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["enterpriseSearch"]} ' >> /opt/so/log/kibana/misc.log
|
||||
{% else %}
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV2","inventory","dataQuality","actions"]} ' >> /opt/so/log/kibana/misc.log
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV3","inventory","dataQuality","searchSynonyms","enterpriseSearchApplications","enterpriseSearchAnalytics","securitySolutionTimeline","securitySolutionNotes","entityManager"]} ' >> /opt/so/log/kibana/misc.log
|
||||
{% endif %}
|
||||
echo
|
||||
|
||||
@@ -54,6 +54,9 @@ so-kratos:
|
||||
- file: kratosconfig
|
||||
- file: kratoslogdir
|
||||
- file: kratosdir
|
||||
- retry:
|
||||
attempts: 10
|
||||
interval: 10
|
||||
|
||||
delete_so-kratos_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
18
salt/libvirt/64962/init.sls
Normal file
18
salt/libvirt/64962/init.sls
Normal file
@@ -0,0 +1,18 @@
|
||||
python3_lief:
|
||||
pkg.installed:
|
||||
- name: securityonion-python3-lief
|
||||
|
||||
so-fix-salt-ldap_script:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-fix-salt-ldap.py
|
||||
- source: salt://libvirt/64962/scripts/so-fix-salt-ldap.py
|
||||
- mode: 744
|
||||
|
||||
fix-salt-ldap:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-fix-salt-ldap.py
|
||||
- require:
|
||||
- pkg: python3_lief
|
||||
- file: so-fix-salt-ldap_script
|
||||
- onchanges:
|
||||
- file: so-fix-salt-ldap_script
|
||||
79
salt/libvirt/64962/scripts/so-fix-salt-ldap.py
Normal file
79
salt/libvirt/64962/scripts/so-fix-salt-ldap.py
Normal file
@@ -0,0 +1,79 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# this script comes from the user nf-brentsaner located here https://github.com/saltstack/salt/issues/64962
|
||||
|
||||
import datetime
|
||||
import grp
|
||||
import os
|
||||
import pathlib
|
||||
import pwd
|
||||
import shutil
|
||||
##
|
||||
import dbus # dnf -y install python3-dbus
|
||||
##
|
||||
import lief # https://pypi.org/project/lief/
|
||||
|
||||
salt_root = pathlib.Path('/opt/saltstack')
|
||||
src_lib = pathlib.Path('/lib64/libldap.so.2')
|
||||
dst_lib = salt_root.joinpath('salt', 'lib', 'libldap.so.2')
|
||||
|
||||
uname = 'salt'
|
||||
gname = 'salt'
|
||||
|
||||
lib = lief.parse(str(src_lib))
|
||||
|
||||
sym = next((i for i in lib.imported_symbols if i.name == 'EVP_md2'), None)
|
||||
|
||||
if sym:
|
||||
# Get the Salt services from DBus.
|
||||
sysbus = dbus.SystemBus()
|
||||
sysd = sysbus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
|
||||
mgr = dbus.Interface(sysd, 'org.freedesktop.systemd1.Manager')
|
||||
svcs = []
|
||||
for i in mgr.ListUnits():
|
||||
# first element is unit name.
|
||||
if not str(i[0]).startswith('salt-'):
|
||||
continue
|
||||
svc = sysbus.get_object('org.freedesktop.systemd1', object_path = mgr.GetUnit(str(i[0])))
|
||||
props = dbus.Interface(svc, dbus_interface = 'org.freedesktop.DBus.Properties')
|
||||
state = props.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
|
||||
if str(state) == 'active':
|
||||
svcs.append(i[0])
|
||||
# Get the user/group
|
||||
u = pwd.getpwnam(uname)
|
||||
g = grp.getgrnam(gname)
|
||||
# Modify
|
||||
print('Modifications necessary.')
|
||||
if svcs:
|
||||
# Stop the services first.
|
||||
for sn in svcs:
|
||||
mgr.StopUnit(sn, 'replace')
|
||||
if dst_lib.exists():
|
||||
# 3.10 deprecated .utcnow().
|
||||
#dst_lib_bak = pathlib.Path(str(dst_lib) + '.bak_{0}'.format(datetime.datetime.now(datetime.UTC).timestamp()))
|
||||
dst_lib_bak = pathlib.Path(str(dst_lib) + '.bak_{0}'.format(datetime.datetime.utcnow().timestamp()))
|
||||
os.rename(dst_lib, dst_lib_bak)
|
||||
print('Destination file {0} exists; backed up to {1}.'.format(dst_lib, dst_lib_bak))
|
||||
lib.remove_dynamic_symbol(sym)
|
||||
lib.write(str(dst_lib))
|
||||
os.chown(dst_lib, u.pw_uid, g.gr_gid)
|
||||
os.chmod(dst_lib, src_lib.stat().st_mode)
|
||||
# Before we restart services, we also want to remove any python caches.
|
||||
for root, dirs, files in os.walk(salt_root):
|
||||
for f in files:
|
||||
if f.lower().endswith('.pyc'):
|
||||
fpath = os.path.join(root, f)
|
||||
os.remove(fpath)
|
||||
print('Removed file {0}'.format(fpath))
|
||||
if '__pycache__' in dirs:
|
||||
dpath = os.path.join(root, '__pycache__')
|
||||
shutil.rmtree(dpath)
|
||||
print('Removed directory {0}'.format(dpath))
|
||||
# And then start the units that were started before.
|
||||
if svcs:
|
||||
for sn in svcs:
|
||||
mgr.RestartUnit(sn, 'replace')
|
||||
else:
|
||||
print('No EVP_md2 symbol found in the library. No modifications needed.')
|
||||
|
||||
print('Done.')
|
||||
44
salt/libvirt/bridge.sls
Normal file
44
salt/libvirt/bridge.sls
Normal file
@@ -0,0 +1,44 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# We do not import GLOBALS in this state because it is called during setup
|
||||
include:
|
||||
- salt.minion.service_file
|
||||
- salt.mine_functions
|
||||
|
||||
down_original_mgmt_interface:
|
||||
cmd.run:
|
||||
- name: "nmcli con down {{ pillar.host.mainint }}"
|
||||
- unless:
|
||||
- nmcli -f GENERAL.CONNECTION dev show {{ pillar.host.mainint }} | grep bridge-slave-{{ pillar.host.mainint }}
|
||||
- order: last
|
||||
|
||||
wait_for_br0_ip:
|
||||
cmd.run:
|
||||
- name: |
|
||||
counter=0
|
||||
until ip addr show br0 | grep -q "inet "; do
|
||||
sleep 1
|
||||
counter=$((counter+1))
|
||||
if [ $counter -ge 90 ]; then
|
||||
echo "Timeout waiting for br0 to get an IP address"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
echo "br0 has IP address: $(ip addr show br0 | grep 'inet ' | awk '{print $2}')"
|
||||
- timeout: 95
|
||||
- onchanges:
|
||||
- cmd: down_original_mgmt_interface
|
||||
- onchanges_in:
|
||||
- file: salt_minion_service_unit_file
|
||||
- file: mine_functions
|
||||
|
||||
restart_salt_minion_service:
|
||||
service.running:
|
||||
- name: salt-minion
|
||||
- enable: True
|
||||
- listen:
|
||||
- file: salt_minion_service_unit_file
|
||||
- file: mine_functions
|
||||
53
salt/libvirt/defaults.yaml
Normal file
53
salt/libvirt/defaults.yaml
Normal file
@@ -0,0 +1,53 @@
|
||||
libvirt:
|
||||
config:
|
||||
listen_tls: 1
|
||||
listen_tcp: 0
|
||||
tls_port: "16514"
|
||||
tcp_port: "16509"
|
||||
listen_addr: "0.0.0.0"
|
||||
unix_sock_group: "root"
|
||||
unix_sock_ro_perms: "0777"
|
||||
unix_sock_rw_perms: "0770"
|
||||
unix_sock_admin_perms: "0700"
|
||||
unix_sock_dir: "/run/libvirt"
|
||||
auth_unix_ro: "polkit"
|
||||
auth_unix_rw: "polkit"
|
||||
auth_tcp: "sasl"
|
||||
auth_tls: "none"
|
||||
tcp_min_ssf: 112
|
||||
access_drivers: ["polkit"]
|
||||
key_file: "/etc/pki/libvirt/private/serverkey.pem"
|
||||
cert_file: "/etc/pki/libvirt/servercert.pem"
|
||||
ca_file: "/etc/pki/CA/cacert.pem"
|
||||
#crl_file: "/etc/pki/CA/crl.pem"
|
||||
tls_no_sanity_certificate: 0
|
||||
tls_no_verify_certificate: 0
|
||||
tls_allowed_dn_list: ["DN1", "DN2"]
|
||||
tls_priority: "NORMAL"
|
||||
sasl_allowed_username_list: ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM"]
|
||||
max_clients: 5000
|
||||
max_queued_clients: 1000
|
||||
max_anonymous_clients: 20
|
||||
min_workers: 5
|
||||
max_workers: 20
|
||||
prio_workers: 5
|
||||
max_client_requests: 5
|
||||
admin_min_workers: 1
|
||||
admin_max_workers: 5
|
||||
admin_max_clients: 5
|
||||
admin_max_queued_clients: 5
|
||||
admin_max_client_requests: 5
|
||||
log_level: 3
|
||||
log_filters: "1:qemu 1:libvirt 4:object 4:json 4:event 1:util"
|
||||
log_outputs: "3:syslog:libvirtd"
|
||||
audit_level: 2
|
||||
audit_logging: 1
|
||||
#host_uuid: "00000000-0000-0000-0000-000000000000"
|
||||
host_uuid_source: "smbios"
|
||||
keepalive_interval: 5
|
||||
keepalive_count: 5
|
||||
keepalive_required: 1
|
||||
admin_keepalive_required: 1
|
||||
admin_keepalive_interval: 5
|
||||
admin_keepalive_count: 5
|
||||
ovs_timeout: 5
|
||||
536
salt/libvirt/etc/libvirtd.conf
Normal file
536
salt/libvirt/etc/libvirtd.conf
Normal file
@@ -0,0 +1,536 @@
|
||||
# Master libvirt daemon configuration file
|
||||
#
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Network connectivity controls
|
||||
#
|
||||
|
||||
# Flag listening for secure TLS connections on the public TCP/IP port.
|
||||
#
|
||||
# To enable listening sockets with the 'libvirtd' daemon it's also required to
|
||||
# pass the '--listen' flag on the commandline of the daemon.
|
||||
# This is not needed with 'virtproxyd'.
|
||||
#
|
||||
# This setting is not required or honoured if using systemd socket
|
||||
# activation.
|
||||
#
|
||||
# It is necessary to setup a CA and issue server certificates before
|
||||
# using this capability.
|
||||
#
|
||||
# This is enabled by default, uncomment this to disable it
|
||||
#listen_tls = 0
|
||||
|
||||
# Listen for unencrypted TCP connections on the public TCP/IP port.
|
||||
#
|
||||
# To enable listening sockets with the 'libvirtd' daemon it's also required to
|
||||
# pass the '--listen' flag on the commandline of the daemon.
|
||||
# This is not needed with 'virtproxyd'.
|
||||
#
|
||||
# This setting is not required or honoured if using systemd socket
|
||||
# activation.
|
||||
#
|
||||
# Using the TCP socket requires SASL authentication by default. Only
|
||||
# SASL mechanisms which support data encryption are allowed. This is
|
||||
# DIGEST_MD5 and GSSAPI (Kerberos5)
|
||||
#
|
||||
# This is disabled by default, uncomment this to enable it.
|
||||
#listen_tcp = 1
|
||||
|
||||
|
||||
|
||||
# Override the port for accepting secure TLS connections
|
||||
# This can be a port number, or service name
|
||||
#
|
||||
# This setting is not required or honoured if using systemd socket
|
||||
# activation.
|
||||
#
|
||||
#tls_port = "16514"
|
||||
|
||||
# Override the port for accepting insecure TCP connections
|
||||
# This can be a port number, or service name
|
||||
#
|
||||
# This setting is not required or honoured if using systemd socket
|
||||
# activation.
|
||||
#
|
||||
#tcp_port = "16509"
|
||||
|
||||
|
||||
# Override the default configuration which binds to all network
|
||||
# interfaces. This can be a numeric IPv4/6 address, or hostname
|
||||
#
|
||||
# This setting is not required or honoured if using systemd socket
|
||||
# activation.
|
||||
#
|
||||
# If the libvirtd service is started in parallel with network
|
||||
# startup (e.g. with systemd), binding to addresses other than
|
||||
# the wildcards (0.0.0.0/::) might not be available yet.
|
||||
#
|
||||
#listen_addr = "192.168.0.1"
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# UNIX socket access controls
|
||||
#
|
||||
|
||||
# Set the UNIX domain socket group ownership. This can be used to
|
||||
# allow a 'trusted' set of users access to management capabilities
|
||||
# without becoming root.
|
||||
#
|
||||
# This setting is not required or honoured if using systemd socket
|
||||
# activation.
|
||||
#
|
||||
# This is restricted to 'root' by default.
|
||||
#unix_sock_group = "libvirt"
|
||||
|
||||
# Set the UNIX socket permissions for the R/O socket. This is used
|
||||
# for monitoring VM status only
|
||||
#
|
||||
# This setting is not required or honoured if using systemd socket
|
||||
# activation.
|
||||
#
|
||||
# Default allows any user. If setting group ownership, you may want to
|
||||
# restrict this too.
|
||||
#unix_sock_ro_perms = "0777"
|
||||
|
||||
# Set the UNIX socket permissions for the R/W socket. This is used
|
||||
# for full management of VMs
|
||||
#
|
||||
# This setting is not required or honoured if using systemd socket
|
||||
# activation.
|
||||
#
|
||||
# Default allows only root. If PolicyKit is enabled on the socket,
|
||||
# the default will change to allow everyone (eg, 0777)
|
||||
#
|
||||
# If not using PolicyKit and setting group ownership for access
|
||||
# control, then you may want to relax this too.
|
||||
#unix_sock_rw_perms = "0770"
|
||||
|
||||
# Set the UNIX socket permissions for the admin interface socket.
|
||||
#
|
||||
# This setting is not required or honoured if using systemd socket
|
||||
# activation.
|
||||
#
|
||||
# Default allows only owner (root), do not change it unless you are
|
||||
# sure to whom you are exposing the access to.
|
||||
#unix_sock_admin_perms = "0700"
|
||||
|
||||
# Set the name of the directory in which sockets will be found/created.
|
||||
#
|
||||
# This setting is not required or honoured if using systemd socket
|
||||
# activation.
|
||||
#
|
||||
#unix_sock_dir = "/run/libvirt"
|
||||
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Authentication.
|
||||
#
|
||||
# There are the following choices available:
|
||||
#
|
||||
# - none: do not perform auth checks. If you can connect to the
|
||||
# socket you are allowed. This is suitable if there are
|
||||
# restrictions on connecting to the socket (eg, UNIX
|
||||
# socket permissions), or if there is a lower layer in
|
||||
# the network providing auth (eg, TLS/x509 certificates)
|
||||
#
|
||||
# - sasl: use SASL infrastructure. The actual auth scheme is then
|
||||
# controlled from /etc/sasl2/libvirt.conf. For the TCP
|
||||
# socket only GSSAPI & DIGEST-MD5 mechanisms will be used.
|
||||
# For non-TCP or TLS sockets, any scheme is allowed.
|
||||
#
|
||||
# - polkit: use PolicyKit to authenticate. This is only suitable
|
||||
# for use on the UNIX sockets. The default policy will
|
||||
# require a user to supply their own password to gain
|
||||
# full read/write access (aka sudo like), while anyone
|
||||
# is allowed read/only access.
|
||||
#
|
||||
|
||||
# Set an authentication scheme for UNIX read-only sockets
|
||||
#
|
||||
# By default socket permissions allow anyone to connect
|
||||
#
|
||||
# If libvirt was compiled without support for 'polkit', then
|
||||
# no access control checks are done, but libvirt still only
|
||||
# allows execution of APIs which don't change state.
|
||||
#
|
||||
# If libvirt was compiled with support for 'polkit', then
|
||||
# the libvirt socket will perform a check with polkit after
|
||||
# connections. The default policy still allows any local
|
||||
# user access.
|
||||
#
|
||||
# To restrict monitoring of domains you may wish to either
|
||||
# enable 'sasl' here, or change the polkit policy definition.
|
||||
#auth_unix_ro = "polkit"
|
||||
|
||||
# Set an authentication scheme for UNIX read-write sockets.
|
||||
#
|
||||
# If libvirt was compiled without support for 'polkit', then
|
||||
# the systemd .socket files will use SocketMode=0600 by default
|
||||
# thus only allowing root user to connect, and 'auth_unix_rw'
|
||||
# will default to 'none'.
|
||||
#
|
||||
# If libvirt was compiled with support for 'polkit', then
|
||||
# the systemd .socket files will use SocketMode=0666 which
|
||||
# allows any user to connect and 'auth_unix_rw' will default
|
||||
# to 'polkit'. If you disable use of 'polkit' here, then it
|
||||
# is essential to change the systemd SocketMode parameter
|
||||
# back to 0600, to avoid an insecure configuration.
|
||||
#
|
||||
#auth_unix_rw = "polkit"
|
||||
|
||||
# Change the authentication scheme for TCP sockets.
|
||||
#
|
||||
# If you don't enable SASL, then all TCP traffic is cleartext.
|
||||
# Don't do this outside of a dev/test scenario. For real world
|
||||
# use, always enable SASL and use the GSSAPI or DIGEST-MD5
|
||||
# mechanism in /etc/sasl2/libvirt.conf
|
||||
#auth_tcp = "sasl"
|
||||
|
||||
# Change the authentication scheme for TLS sockets.
|
||||
#
|
||||
# TLS sockets already have encryption provided by the TLS
|
||||
# layer, and limited authentication is done by certificates
|
||||
#
|
||||
# It is possible to make use of any SASL authentication
|
||||
# mechanism as well, by using 'sasl' for this option
|
||||
#auth_tls = "none"
|
||||
|
||||
# Enforce a minimum SSF value for TCP sockets
|
||||
#
|
||||
# The default minimum is currently 56 (single-DES) which will
|
||||
# be raised to 112 in the future.
|
||||
#
|
||||
# This option can be used to set values higher than 112
|
||||
#tcp_min_ssf = 112
|
||||
|
||||
|
||||
# Change the API access control scheme
|
||||
#
|
||||
# By default an authenticated user is allowed access
|
||||
# to all APIs. Access drivers can place restrictions
|
||||
# on this. By default the 'nop' driver is enabled,
|
||||
# meaning no access control checks are done once a
|
||||
# client has authenticated with libvirtd
|
||||
#
|
||||
#access_drivers = [ "polkit" ]
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# TLS x509 certificate configuration
|
||||
#
|
||||
|
||||
# Use of TLS requires that x509 certificates be issued. The default locations
|
||||
# for the certificate files is as follows:
|
||||
#
|
||||
# /etc/pki/CA/cacert.pem - The CA master certificate
|
||||
# /etc/pki/libvirt/servercert.pem - The server certificate signed by cacert.pem
|
||||
# /etc/pki/libvirt/private/serverkey.pem - The server private key
|
||||
#
|
||||
# It is possible to override the default locations by altering the 'key_file',
|
||||
# 'cert_file', and 'ca_file' values and uncommenting them below.
|
||||
#
|
||||
# NB, overriding the default of one location requires uncommenting and
|
||||
# possibly additionally overriding the other settings.
|
||||
#
|
||||
|
||||
# Override the default server key file path
|
||||
#
|
||||
#key_file = "/etc/pki/libvirt/private/serverkey.pem"
|
||||
|
||||
# Override the default server certificate file path
|
||||
#
|
||||
#cert_file = "/etc/pki/libvirt/servercert.pem"
|
||||
|
||||
# Override the default CA certificate path
|
||||
#
|
||||
#ca_file = "/etc/pki/CA/cacert.pem"
|
||||
|
||||
# Specify a certificate revocation list.
|
||||
#
|
||||
# Defaults to not using a CRL, uncomment to enable it
|
||||
#crl_file = "/etc/pki/CA/crl.pem"
|
||||
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Authorization controls
|
||||
#
|
||||
|
||||
|
||||
# Flag to disable verification of our own server certificates
|
||||
#
|
||||
# When libvirtd starts it performs some sanity checks against
|
||||
# its own certificates.
|
||||
#
|
||||
# Default is to always run sanity checks. Uncommenting this
|
||||
# will disable sanity checks which is not a good idea
|
||||
#tls_no_sanity_certificate = 1
|
||||
|
||||
# Flag to disable verification of client certificates
|
||||
#
|
||||
# Client certificate verification is the primary authentication mechanism.
|
||||
# Any client which does not present a certificate signed by the CA
|
||||
# will be rejected.
|
||||
#
|
||||
# Default is to always verify. Uncommenting this will disable
|
||||
# verification.
|
||||
#tls_no_verify_certificate = 1
|
||||
|
||||
|
||||
# An access control list of allowed x509 Distinguished Names
|
||||
# This list may contain wildcards such as
|
||||
#
|
||||
# "C=GB,ST=London,L=London,O=Red Hat,CN=*"
|
||||
#
|
||||
# Any * matches any number of consecutive spaces, like a simplified glob(7).
|
||||
#
|
||||
# The format of the DN for a particular certificate can be queried
|
||||
# using:
|
||||
#
|
||||
# virt-pki-query-dn clientcert.pem
|
||||
#
|
||||
# NB If this is an empty list, no client can connect, so comment out
|
||||
# entirely rather than using empty list to disable these checks
|
||||
#
|
||||
# By default, no DN's are checked
|
||||
#tls_allowed_dn_list = ["DN1", "DN2"]
|
||||
|
||||
|
||||
# Override the compile time default TLS priority string. The
|
||||
# default is usually "NORMAL" unless overridden at build time.
|
||||
# Only set this is it is desired for libvirt to deviate from
|
||||
# the global default settings.
|
||||
#
|
||||
#tls_priority="NORMAL"
|
||||
|
||||
|
||||
# An access control list of allowed SASL usernames. The format for username
|
||||
# depends on the SASL authentication mechanism. Kerberos usernames
|
||||
# look like username@REALM
|
||||
#
|
||||
# This list may contain wildcards such as
|
||||
#
|
||||
# "*@EXAMPLE.COM"
|
||||
#
|
||||
# See the g_pattern_match function for the format of the wildcards.
|
||||
#
|
||||
# https://developer.gnome.org/glib/stable/glib-Glob-style-pattern-matching.html
|
||||
#
|
||||
# NB If this is an empty list, no client can connect, so comment out
|
||||
# entirely rather than using empty list to disable these checks
|
||||
#
|
||||
# By default, no Username's are checked
|
||||
#sasl_allowed_username_list = ["joe@EXAMPLE.COM", "fred@EXAMPLE.COM" ]
|
||||
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Processing controls
|
||||
#
|
||||
|
||||
# The maximum number of concurrent client connections to allow
|
||||
# over all sockets combined.
|
||||
#max_clients = 5000
|
||||
|
||||
# The maximum length of queue of connections waiting to be
|
||||
# accepted by the daemon. Note, that some protocols supporting
|
||||
# retransmission may obey this so that a later reattempt at
|
||||
# connection succeeds.
|
||||
#max_queued_clients = 1000
|
||||
|
||||
# The maximum length of queue of accepted but not yet
|
||||
# authenticated clients. The default value is 20. Set this to
|
||||
# zero to turn this feature off.
|
||||
#max_anonymous_clients = 20
|
||||
|
||||
# The minimum limit sets the number of workers to start up
|
||||
# initially. If the number of active clients exceeds this,
|
||||
# then more threads are spawned, up to max_workers limit.
|
||||
# Typically you'd want max_workers to equal maximum number
|
||||
# of clients allowed
|
||||
#min_workers = 5
|
||||
#max_workers = 20
|
||||
|
||||
|
||||
# The number of priority workers. If all workers from above
|
||||
# pool are stuck, some calls marked as high priority
|
||||
# (notably domainDestroy) can be executed in this pool.
|
||||
#prio_workers = 5
|
||||
|
||||
# Limit on concurrent requests from a single client
|
||||
# connection. To avoid one client monopolizing the server
|
||||
# this should be a small fraction of the global max_workers
|
||||
# parameter.
|
||||
# Setting this too low may cause keepalive timeouts.
|
||||
#max_client_requests = 5
|
||||
|
||||
# Same processing controls, but this time for the admin interface.
|
||||
# For description of each option, be so kind to scroll few lines
|
||||
# upwards.
|
||||
|
||||
#admin_min_workers = 1
|
||||
#admin_max_workers = 5
|
||||
#admin_max_clients = 5
|
||||
#admin_max_queued_clients = 5
|
||||
#admin_max_client_requests = 5
|
||||
|
||||
#################################################################
|
||||
#
|
||||
# Logging controls
|
||||
#
|
||||
|
||||
# Logging level: 4 errors, 3 warnings, 2 information, 1 debug
|
||||
# basically 1 will log everything possible
|
||||
#
|
||||
# WARNING: USE OF THIS IS STRONGLY DISCOURAGED.
|
||||
#
|
||||
# WARNING: It outputs too much information to practically read.
|
||||
# WARNING: The "log_filters" setting is recommended instead.
|
||||
#
|
||||
# WARNING: Journald applies rate limiting of messages and so libvirt
|
||||
# WARNING: will limit "log_level" to only allow values 3 or 4 if
|
||||
# WARNING: journald is the current output.
|
||||
#
|
||||
# WARNING: USE OF THIS IS STRONGLY DISCOURAGED.
|
||||
#log_level = 3
|
||||
|
||||
# Logging filters:
|
||||
# A filter allows to select a different logging level for a given category
|
||||
# of logs. The format for a filter is:
|
||||
#
|
||||
# level:match
|
||||
#
|
||||
# where 'match' is a string which is matched against the category
|
||||
# given in the VIR_LOG_INIT() at the top of each libvirt source
|
||||
# file, e.g., "remote", "qemu", or "util.json". The 'match' in the
|
||||
# filter matches using shell wildcard syntax (see 'man glob(7)').
|
||||
# The 'match' is always treated as a substring match. IOW a match
|
||||
# string 'foo' is equivalent to '*foo*'.
|
||||
#
|
||||
# 'level' is the minimal level where matching messages should
|
||||
# be logged:
|
||||
#
|
||||
# 1: DEBUG
|
||||
# 2: INFO
|
||||
# 3: WARNING
|
||||
# 4: ERROR
|
||||
#
|
||||
# Multiple filters can be defined in a single @log_filters, they just need
|
||||
# to be separated by spaces. Note that libvirt performs "first" match, i.e.
|
||||
# if there are concurrent filters, the first one that matches will be applied,
|
||||
# given the order in @log_filters.
|
||||
#
|
||||
# A typical need is to capture information from a hypervisor driver,
|
||||
# public API entrypoints and some of the utility code. Some utility
|
||||
# code is very verbose and is generally not desired. Taking the QEMU
|
||||
# hypervisor as an example, a suitable filter string for debugging
|
||||
# might be to turn off object, json & event logging, but enable the
|
||||
# rest of the util code:
|
||||
#
|
||||
#log_filters="1:qemu 1:libvirt 4:object 4:json 4:event 1:util"
|
||||
|
||||
# Logging outputs:
|
||||
# An output is one of the places to save logging information
|
||||
# The format for an output can be:
|
||||
# level:stderr
|
||||
# output goes to stderr
|
||||
# level:syslog:name
|
||||
# use syslog for the output and use the given name as the ident
|
||||
# level:file:file_path
|
||||
# output to a file, with the given filepath
|
||||
# level:journald
|
||||
# output to journald logging system
|
||||
# In all cases 'level' is the minimal priority, acting as a filter
|
||||
# 1: DEBUG
|
||||
# 2: INFO
|
||||
# 3: WARNING
|
||||
# 4: ERROR
|
||||
#
|
||||
# Multiple outputs can be defined, they just need to be separated by spaces.
|
||||
# e.g. to log all warnings and errors to syslog under the libvirtd ident:
|
||||
#log_outputs="3:syslog:libvirtd"
|
||||
|
||||
|
||||
##################################################################
|
||||
#
|
||||
# Auditing
|
||||
#
|
||||
# This setting allows usage of the auditing subsystem to be altered:
|
||||
#
|
||||
# audit_level == 0 -> disable all auditing
|
||||
# audit_level == 1 -> enable auditing, only if enabled on host (default)
|
||||
# audit_level == 2 -> enable auditing, and exit if disabled on host
|
||||
#
|
||||
#audit_level = 2
|
||||
#
|
||||
# If set to 1, then audit messages will also be sent
|
||||
# via libvirt logging infrastructure. Defaults to 0
|
||||
#
|
||||
#audit_logging = 1
|
||||
|
||||
###################################################################
|
||||
# UUID of the host:
|
||||
# Host UUID is read from one of the sources specified in host_uuid_source.
|
||||
#
|
||||
# - 'smbios': fetch the UUID from 'dmidecode -s system-uuid'
|
||||
# - 'machine-id': fetch the UUID from /etc/machine-id
|
||||
#
|
||||
# The host_uuid_source default is 'smbios'. If 'dmidecode' does not provide
|
||||
# a valid UUID a temporary UUID will be generated.
|
||||
#
|
||||
# Another option is to specify host UUID in host_uuid.
|
||||
#
|
||||
# Keep the format of the example UUID below. UUID must not have all digits
|
||||
# be the same.
|
||||
|
||||
# NB This default all-zeros UUID will not work. Replace
|
||||
# it with the output of the 'uuidgen' command and then
|
||||
# uncomment this entry
|
||||
#host_uuid = "00000000-0000-0000-0000-000000000000"
|
||||
#host_uuid_source = "smbios"
|
||||
|
||||
###################################################################
|
||||
# Keepalive protocol:
|
||||
# This allows libvirtd to detect broken client connections or even
|
||||
# dead clients. A keepalive message is sent to a client after
|
||||
# keepalive_interval seconds of inactivity to check if the client is
|
||||
# still responding; keepalive_count is a maximum number of keepalive
|
||||
# messages that are allowed to be sent to the client without getting
|
||||
# any response before the connection is considered broken. In other
|
||||
# words, the connection is automatically closed approximately after
|
||||
# keepalive_interval * (keepalive_count + 1) seconds since the last
|
||||
# message received from the client. If keepalive_interval is set to
|
||||
# -1, libvirtd will never send keepalive requests; however clients
|
||||
# can still send them and the daemon will send responses. When
|
||||
# keepalive_count is set to 0, connections will be automatically
|
||||
# closed after keepalive_interval seconds of inactivity without
|
||||
# sending any keepalive messages.
|
||||
#
|
||||
#keepalive_interval = 5
|
||||
#keepalive_count = 5
|
||||
|
||||
#
|
||||
# These configuration options are no longer used. There is no way to
|
||||
# restrict such clients from connecting since they first need to
|
||||
# connect in order to ask for keepalive.
|
||||
#
|
||||
#keepalive_required = 1
|
||||
#admin_keepalive_required = 1
|
||||
|
||||
# Keepalive settings for the admin interface
|
||||
#admin_keepalive_interval = 5
|
||||
#admin_keepalive_count = 5
|
||||
|
||||
###################################################################
|
||||
# Open vSwitch:
|
||||
# This allows to specify a timeout for openvswitch calls made by
|
||||
# libvirt. The ovs-vsctl utility is used for the configuration and
|
||||
# its timeout option is set by default to 5 seconds to avoid
|
||||
# potential infinite waits blocking libvirt.
|
||||
#
|
||||
#ovs_timeout = 5
|
||||
8
salt/libvirt/etc/libvirtd.conf.jinja
Normal file
8
salt/libvirt/etc/libvirtd.conf.jinja
Normal file
@@ -0,0 +1,8 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. -#}
|
||||
|
||||
{%- for k, v in LIBVIRTMERGED.config.items() %}
|
||||
{{ k }} = {{ v | json }}
|
||||
{%- endfor %}
|
||||
201
salt/libvirt/images/init.sls
Normal file
201
salt/libvirt/images/init.sls
Normal file
@@ -0,0 +1,201 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
|
||||
{% if 'vrt' in salt['pillar.get']('features', []) %}
|
||||
|
||||
include:
|
||||
- hypervisor
|
||||
- libvirt.packages
|
||||
|
||||
nsm_libvirt_images:
|
||||
file.directory:
|
||||
- name: /nsm/libvirt/images/sool9
|
||||
- dir_mode: 775
|
||||
- file_mode: 640
|
||||
- recurse:
|
||||
- mode
|
||||
- makedirs: True
|
||||
|
||||
# Remove hash file if image isn't present. This will allow for the image to redownload and initialize.
|
||||
remove_sha256_sool9:
|
||||
file.absent:
|
||||
- name: /nsm/libvirt/images/sool9/sool9.sha256
|
||||
- unless: test -f /nsm/libvirt/images/sool9/sool9.qcow2
|
||||
|
||||
# Manage SHA256 hash file
|
||||
manage_sha256_sool9:
|
||||
file.managed:
|
||||
- name: /nsm/libvirt/images/sool9/sool9.sha256
|
||||
- source: salt://libvirt/images/sool9/sool9.sha256
|
||||
|
||||
# Manage cloud-init files
|
||||
manage_metadata_sool9:
|
||||
file.managed:
|
||||
- name: /nsm/libvirt/images/sool9/meta-data
|
||||
- source: salt://libvirt/images/sool9/meta-data
|
||||
|
||||
manage_userdata_sool9:
|
||||
file.managed:
|
||||
- name: /nsm/libvirt/images/sool9/user-data
|
||||
- source: salt://libvirt/images/sool9/user-data
|
||||
- show_changes: False
|
||||
|
||||
# Manage qcow2 image
|
||||
manage_qcow2_sool9:
|
||||
file.managed:
|
||||
- name: /nsm/libvirt/images/sool9/sool9.qcow2
|
||||
- source: salt://libvirt/images/sool9/sool9.qcow2
|
||||
- onchanges:
|
||||
- file: manage_sha256_sool9
|
||||
- file: manage_metadata_sool9
|
||||
- file: manage_userdata_sool9
|
||||
|
||||
manage_cidata_sool9:
|
||||
file.managed:
|
||||
- name: /nsm/libvirt/images/sool9/sool9-cidata.iso
|
||||
- source: salt://libvirt/images/sool9/sool9-cidata.iso
|
||||
- onchanges:
|
||||
- file: manage_qcow2_sool9
|
||||
|
||||
# Define the storage pool
|
||||
define_storage_pool_sool9:
|
||||
virt.pool_defined:
|
||||
- name: sool9
|
||||
- ptype: dir
|
||||
- target: /nsm/libvirt/images/sool9
|
||||
- require:
|
||||
- file: manage_metadata_sool9
|
||||
- file: manage_userdata_sool9
|
||||
- file: manage_cidata_sool9
|
||||
- cmd: libvirt_python_module
|
||||
- unless:
|
||||
- virsh pool-list --all | grep -q sool9
|
||||
|
||||
# Set pool autostart
|
||||
set_pool_autostart_sool9:
|
||||
cmd.run:
|
||||
- name: virsh pool-autostart sool9
|
||||
- require:
|
||||
- virt: define_storage_pool_sool9
|
||||
- unless:
|
||||
- virsh pool-info sool9 | grep -q "Autostart.*yes"
|
||||
|
||||
# Start the storage pool
|
||||
start_storage_pool_sool9:
|
||||
cmd.run:
|
||||
- name: virsh pool-start sool9
|
||||
- require:
|
||||
- virt: define_storage_pool_sool9
|
||||
- cmd: libvirt_python_module
|
||||
- unless:
|
||||
- virsh pool-info sool9 | grep -q "State.*running"
|
||||
|
||||
# Stop the VM if running and base image files change
|
||||
stop_vm_sool9:
|
||||
module.run:
|
||||
- virt.stop:
|
||||
- name: sool9
|
||||
- onchanges:
|
||||
- file: manage_qcow2_sool9
|
||||
- file: manage_metadata_sool9
|
||||
- file: manage_userdata_sool9
|
||||
- file: manage_cidata_sool9
|
||||
- require_in:
|
||||
- module: undefine_vm_sool9
|
||||
- onlyif:
|
||||
# Only try to stop if VM is actually running
|
||||
- virsh list --state-running --name | grep -q sool9
|
||||
|
||||
undefine_vm_sool9:
|
||||
module.run:
|
||||
- virt.undefine:
|
||||
- vm_: sool9
|
||||
- onchanges:
|
||||
- file: manage_qcow2_sool9
|
||||
- file: manage_metadata_sool9
|
||||
- file: manage_userdata_sool9
|
||||
- file: manage_cidata_sool9
|
||||
# Note: When VM doesn't exist, you'll see "error: failed to get domain 'sool9'" - this is expected
|
||||
# [ERROR ] Command 'virsh' failed with return code: 1
|
||||
# [ERROR ] stdout: error: failed to get domain 'sool9'
|
||||
- onlyif:
|
||||
- virsh dominfo sool9
|
||||
|
||||
# Create and start the VM, letting cloud-init run
|
||||
create_vm_sool9:
|
||||
cmd.run:
|
||||
- name: |
|
||||
virt-install --name sool9 \
|
||||
--memory 12288 --vcpus 8 --cpu host-model \
|
||||
--disk /nsm/libvirt/images/sool9/sool9.qcow2,format=qcow2,bus=virtio \
|
||||
--disk /nsm/libvirt/images/sool9/sool9-cidata.iso,device=cdrom \
|
||||
--network bridge=br0,model=virtio \
|
||||
--os-variant=ol9.5 \
|
||||
--import \
|
||||
--noautoconsole
|
||||
- require:
|
||||
- cmd: start_storage_pool_sool9
|
||||
- pkg: install_virt-install
|
||||
- onchanges:
|
||||
- file: manage_qcow2_sool9
|
||||
- file: manage_metadata_sool9
|
||||
- file: manage_userdata_sool9
|
||||
- file: manage_cidata_sool9
|
||||
|
||||
# Wait for cloud-init to complete and VM to shutdown
|
||||
wait_for_cloud_init_sool9:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-wait-cloud-init -n sool9
|
||||
- require:
|
||||
- cmd: create_vm_sool9
|
||||
- onchanges:
|
||||
- cmd: create_vm_sool9
|
||||
- timeout: 600
|
||||
|
||||
# Configure network predictability after cloud-init
|
||||
configure_network_predictable_sool9:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-qcow2-network-predictable -n sool9
|
||||
- require:
|
||||
- cmd: wait_for_cloud_init_sool9
|
||||
- onchanges:
|
||||
- cmd: create_vm_sool9
|
||||
|
||||
# Fire event here that causes soc.dyanno.hypervisor state to be applied
|
||||
base_domain_ready:
|
||||
event.send:
|
||||
- name: soc/dyanno/hypervisor/baseDomain
|
||||
- data:
|
||||
status: 'Initialized'
|
||||
- require:
|
||||
- cmd: configure_network_predictable_sool9
|
||||
- onchanges:
|
||||
- cmd: create_vm_sool9
|
||||
|
||||
{% else %}
|
||||
{{sls}}_no_license_detected:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_no_license_detected
|
||||
- comment:
|
||||
- "Hypervisor nodes are a feature supported only for customers with a valid license.
|
||||
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
|
||||
for more information about purchasing a license to enable this feature."
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
1
salt/libvirt/images/sool9/README
Normal file
1
salt/libvirt/images/sool9/README
Normal file
@@ -0,0 +1 @@
|
||||
# The files in this directory (/opt/so/saltstack/local/salt/libvirt/images/sool9) are generated by the setup_hypervisor runner. They are then distributed to the hypervisors where a storage pool will be created then the image can be installed.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user