mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Compare commits
931 Commits
hotfix/2.4
...
30487a54c1
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
30487a54c1 | ||
|
|
55e3a2c6b6 | ||
|
|
ef092e2893 | ||
|
|
89eb95c077 | ||
|
|
e871ec358e | ||
|
|
271a2f74ad | ||
|
|
d6bd951c37 | ||
|
|
6fbed2dd9f | ||
|
|
875de88cb4 | ||
|
|
63bb44886e | ||
|
|
edf3c9464f | ||
|
|
9c06713f32 | ||
|
|
23da0d4ba0 | ||
|
|
d5f2cfb354 | ||
|
|
fb5ad4193d | ||
|
|
1f5f283c06 | ||
|
|
cf048030c4 | ||
|
|
2d716b44a8 | ||
|
|
d70d652310 | ||
|
|
c5db7c8752 | ||
|
|
6f42ff3442 | ||
|
|
433dab7376 | ||
|
|
97c1a46013 | ||
|
|
fbe97221bb | ||
|
|
841ce6b6ec | ||
|
|
dd0b4c3820 | ||
|
|
b407c68d88 | ||
|
|
5b6a7035af | ||
|
|
12d490ad4a | ||
|
|
76cbd18d2c | ||
|
|
a7337c95e1 | ||
|
|
3f7c3326ea | ||
|
|
bf41de8c14 | ||
|
|
136a829509 | ||
|
|
bcec999be4 | ||
|
|
7c73b4713f | ||
|
|
45b4b1d963 | ||
|
|
fcfd74ec1e | ||
|
|
68b0cd7549 | ||
|
|
715d801ce8 | ||
|
|
4a810696e7 | ||
|
|
6b525a2c21 | ||
|
|
a5d8385f07 | ||
|
|
211bf7e77b | ||
|
|
1542b74133 | ||
|
|
4314c79f85 | ||
|
|
da9717bc79 | ||
|
|
045cf7866c | ||
|
|
431e0b0780 | ||
|
|
e782266caa | ||
|
|
a4666b2c08 | ||
|
|
dcc3206e51 | ||
|
|
8358b6ea6f | ||
|
|
d1a66a91c6 | ||
|
|
7fdcb92614 | ||
|
|
cec1890b6b | ||
|
|
b1b66045ea | ||
|
|
33b22bf2e4 | ||
|
|
3a38886345 | ||
|
|
7be70faab6 | ||
|
|
2729fdbea6 | ||
|
|
bfd08d1d2e | ||
|
|
fed75c7b39 | ||
|
|
3427df2a54 | ||
|
|
be11c718f6 | ||
|
|
235dfd78f1 | ||
|
|
7c8b9b4374 | ||
|
|
7762faf075 | ||
|
|
80fbb31372 | ||
|
|
7c45db2295 | ||
|
|
0545e1d33b | ||
|
|
08147e27b0 | ||
|
|
c9153617be | ||
|
|
245ceb2d49 | ||
|
|
4c65975907 | ||
|
|
dfef7036ce | ||
|
|
44594ba726 | ||
|
|
1876c4d9df | ||
|
|
a2ff66b5d0 | ||
|
|
e3972dc5af | ||
|
|
18c0f197b2 | ||
|
|
5b371c220c | ||
|
|
78c193f0a2 | ||
|
|
274295bc97 | ||
|
|
6c7ef622c1 | ||
|
|
da1cac0d53 | ||
|
|
a84df14137 | ||
|
|
4a49f9d004 | ||
|
|
1eb4b5379a | ||
|
|
35c7fc06d7 | ||
|
|
b69d453a68 | ||
|
|
b7e1989d45 | ||
|
|
202b03b32b | ||
|
|
1aa871ec94 | ||
|
|
4ffbb0bbd9 | ||
|
|
f859fe6517 | ||
|
|
021b425b8b | ||
|
|
d95122ca01 | ||
|
|
81d3c7351b | ||
|
|
ccb8ffd6eb | ||
|
|
5a8ea57a1b | ||
|
|
60228ec6e6 | ||
|
|
574703e551 | ||
|
|
fa154f1a8f | ||
|
|
635545630b | ||
|
|
df8afda999 | ||
|
|
f80b090c93 | ||
|
|
806173f7e3 | ||
|
|
2f6c1b82a6 | ||
|
|
b8c2808abe | ||
|
|
9027e4e065 | ||
|
|
8ca5276a0e | ||
|
|
ee45a5524d | ||
|
|
70d4223a75 | ||
|
|
7ab2840381 | ||
|
|
78c951cb70 | ||
|
|
a0a3a80151 | ||
|
|
3ecffd5588 | ||
|
|
8ea66bb0e9 | ||
|
|
9359fbbad6 | ||
|
|
1949be90c2 | ||
|
|
30970acfaf | ||
|
|
6d12a8bfa1 | ||
|
|
2fb41c8d65 | ||
|
|
835b2609b6 | ||
|
|
10ae53f108 | ||
|
|
68bfceb727 | ||
|
|
f348c7168f | ||
|
|
627d9bf45d | ||
|
|
2aee8ab511 | ||
|
|
de9d3c9726 | ||
|
|
39572f36f4 | ||
|
|
0994cd515a | ||
|
|
bdcd1e099d | ||
|
|
c64760b5f4 | ||
|
|
d2aa60b961 | ||
|
|
83d615d236 | ||
|
|
e910de0a06 | ||
|
|
26b80aba38 | ||
|
|
ee617eeff4 | ||
|
|
463766782c | ||
|
|
d9f70898dd | ||
|
|
7e15c89510 | ||
|
|
ed5bd19f0e | ||
|
|
feba97738f | ||
|
|
348809bdbb | ||
|
|
ca0edb1cab | ||
|
|
0172f64f15 | ||
|
|
48f8944e3b | ||
|
|
3e22043ea6 | ||
|
|
e572b854b9 | ||
|
|
c8aad2b03b | ||
|
|
8773ebc3dc | ||
|
|
2baf2478da | ||
|
|
378d37d74e | ||
|
|
f8c8e5d8e5 | ||
|
|
dca38c286a | ||
|
|
860710f5f9 | ||
|
|
d56af4acab | ||
|
|
793e98f75c | ||
|
|
f9c5aa3fef | ||
|
|
254e782da6 | ||
|
|
fe3caf66a1 | ||
|
|
09d699432a | ||
|
|
79b44586ce | ||
|
|
feddd90e41 | ||
|
|
ca935e4272 | ||
|
|
8f75bfb0a4 | ||
|
|
e551c6e037 | ||
|
|
1c5a72ee85 | ||
|
|
8a8ea04088 | ||
|
|
92be8df95d | ||
|
|
f730e23e30 | ||
|
|
a3e7649a3c | ||
|
|
af42c31740 | ||
|
|
a22c9f6bcf | ||
|
|
bad9a16ebb | ||
|
|
7827e05c24 | ||
|
|
e45b0bf871 | ||
|
|
659c039ba8 | ||
|
|
c7edaac42a | ||
|
|
a1a8f75409 | ||
|
|
23e25fa2d7 | ||
|
|
f077484121 | ||
|
|
c16bf50493 | ||
|
|
564374a8fb | ||
|
|
4ab4264f77 | ||
|
|
60cccb21b4 | ||
|
|
39432198cc | ||
|
|
7af95317db | ||
|
|
8675193d1f | ||
|
|
ac0d6c57e1 | ||
|
|
3db6542398 | ||
|
|
9fd1b9aec1 | ||
|
|
e5563eb9b8 | ||
|
|
e8de9e3c26 | ||
|
|
c8a3603577 | ||
|
|
05321cf1ed | ||
|
|
7deef44ff6 | ||
|
|
9752d61699 | ||
|
|
6b8e2e2643 | ||
|
|
b1acbf3114 | ||
|
|
e3ac1dd1b4 | ||
|
|
86eca53d4b | ||
|
|
bfd3d822b1 | ||
|
|
030e4961d7 | ||
|
|
14bd92067b | ||
|
|
066e227325 | ||
|
|
f1cfb9cd91 | ||
|
|
5a2e704909 | ||
|
|
f04e54d1d5 | ||
|
|
e9af46a8cb | ||
|
|
b4b051908b | ||
|
|
0148e5638c | ||
|
|
c8814d0632 | ||
|
|
6c892fed78 | ||
|
|
8043e09ec1 | ||
|
|
e775299480 | ||
|
|
c4ca9c62aa | ||
|
|
c37aeff364 | ||
|
|
cdac49052f | ||
|
|
8e5fa9576c | ||
|
|
25c746bb14 | ||
|
|
cd04d1e5a7 | ||
|
|
1fb558cc77 | ||
|
|
7f1b76912c | ||
|
|
3a2ceb0b6f | ||
|
|
1345756fce | ||
|
|
d81d9a0722 | ||
|
|
55074fda69 | ||
|
|
23e12811a1 | ||
|
|
5d1edf6d86 | ||
|
|
a91e8b26f6 | ||
|
|
c836dd2acd | ||
|
|
e826ea5d04 | ||
|
|
3a87af805f | ||
|
|
328ac329ec | ||
|
|
a3401aad11 | ||
|
|
5a67b89a80 | ||
|
|
431f71cc82 | ||
|
|
23a9780ebb | ||
|
|
4587301cca | ||
|
|
9cb8ebbaa7 | ||
|
|
14ddbd32ad | ||
|
|
4599b95ae7 | ||
|
|
c92dc580a2 | ||
|
|
4666aa9818 | ||
|
|
f066baf6ba | ||
|
|
ba710c9944 | ||
|
|
198695af03 | ||
|
|
fec78f5fb5 | ||
|
|
d03dd7ac2d | ||
|
|
d2dd52b42a | ||
|
|
c9db52433f | ||
|
|
138849d258 | ||
|
|
a9ec12e402 | ||
|
|
87281efc24 | ||
|
|
29ac4f23c6 | ||
|
|
878a3f8962 | ||
|
|
21e27bce87 | ||
|
|
336ca0dbbd | ||
|
|
d9eba3cd0e | ||
|
|
81b7e2b420 | ||
|
|
cd5483623b | ||
|
|
faa112eddf | ||
|
|
f663f22628 | ||
|
|
8b07ff453d | ||
|
|
24a0fa3f6d | ||
|
|
a5011b398d | ||
|
|
5b70398c0a | ||
|
|
f3aaee1e41 | ||
|
|
d0e875928d | ||
|
|
3e16bc8335 | ||
|
|
c1d85493df | ||
|
|
e01d0f81ea | ||
|
|
376d0f3295 | ||
|
|
4418623f73 | ||
|
|
d1f4e26e29 | ||
|
|
5166db1caa | ||
|
|
ff5ad586af | ||
|
|
9e24d21282 | ||
|
|
5806999f63 | ||
|
|
4dae1afe0b | ||
|
|
456cad1ada | ||
|
|
063a2b3348 | ||
|
|
bcd2e95fbe | ||
|
|
94e8cd84e6 | ||
|
|
948d72c282 | ||
|
|
bdeb92ab05 | ||
|
|
fdb5ad810a | ||
|
|
f588a80ec7 | ||
|
|
562b7e54cb | ||
|
|
3c847bca8b | ||
|
|
ce2cc26224 | ||
|
|
f3c574679c | ||
|
|
5da3fed1ce | ||
|
|
e6bcf5db6b | ||
|
|
4d24c57903 | ||
|
|
0606c0a454 | ||
|
|
bb984e05e3 | ||
|
|
b35b0aaf2c | ||
|
|
62f04fa5dd | ||
|
|
d89df5f0dd | ||
|
|
f0c1922600 | ||
|
|
ab2cdd18ed | ||
|
|
889bb7ddf4 | ||
|
|
a959f90d0b | ||
|
|
a54cd004d6 | ||
|
|
5100032fbd | ||
|
|
0f235baa7e | ||
|
|
e5660b8c8e | ||
|
|
588a1b86d1 | ||
|
|
46f0afa24b | ||
|
|
a7651b2734 | ||
|
|
890f76e45c | ||
|
|
03892bad5e | ||
|
|
e6eecc93c8 | ||
|
|
8dc0f8d20e | ||
|
|
fbdc0c4705 | ||
|
|
d1a2b57aa2 | ||
|
|
f5ec1d4b7c | ||
|
|
0aa556e375 | ||
|
|
d9e86c15bc | ||
|
|
4107fa006f | ||
|
|
29980ea958 | ||
|
|
8f36d2ec00 | ||
|
|
10511b8431 | ||
|
|
2535ae953d | ||
|
|
2f68cd7483 | ||
|
|
6655276410 | ||
|
|
9f7bcb0f7d | ||
|
|
aa43177d8c | ||
|
|
12959d114c | ||
|
|
855b489c4b | ||
|
|
673f9cb544 | ||
|
|
0a3ff47008 | ||
|
|
834e34128d | ||
|
|
73776f8d11 | ||
|
|
120e61e45c | ||
|
|
fc2d450de0 | ||
|
|
cea4eaf081 | ||
|
|
b1753f86f9 | ||
|
|
6323fbf46b | ||
|
|
ba601c39b3 | ||
|
|
ec27517bdd | ||
|
|
624ec3c93e | ||
|
|
f318a84c18 | ||
|
|
8cca58dba9 | ||
|
|
6c196ea61a | ||
|
|
207572f2f9 | ||
|
|
4afc986f48 | ||
|
|
ba5d140d4b | ||
|
|
348f9dcaec | ||
|
|
915b9e7bd7 | ||
|
|
dfec29d18e | ||
|
|
77fef02116 | ||
|
|
38ef4a6046 | ||
|
|
f3328c41fb | ||
|
|
a007fa6505 | ||
|
|
1a32a0897c | ||
|
|
e26310d172 | ||
|
|
c7cdb0b466 | ||
|
|
df0b484b45 | ||
|
|
2181cddf49 | ||
|
|
a2b6968cef | ||
|
|
285fbc2783 | ||
|
|
94c5a1fd98 | ||
|
|
19362fe5e5 | ||
|
|
a7a81e9825 | ||
|
|
31484d1158 | ||
|
|
f51cd008f2 | ||
|
|
a5675a79fe | ||
|
|
1ea7b3c09f | ||
|
|
d9127a288f | ||
|
|
23ae259c82 | ||
|
|
ebb78bc9bd | ||
|
|
e5920b6465 | ||
|
|
153a99a002 | ||
|
|
69a5e1e2f5 | ||
|
|
0858160be2 | ||
|
|
ccd79c814d | ||
|
|
45f25ca62d | ||
|
|
a8a01b8191 | ||
|
|
ac2c044a94 | ||
|
|
e10d00d114 | ||
|
|
cbdd369a18 | ||
|
|
b2e7f58b3d | ||
|
|
a6600b8762 | ||
|
|
5479d49379 | ||
|
|
304985b61e | ||
|
|
d6c725299b | ||
|
|
d99857002d | ||
|
|
2a6c74917e | ||
|
|
9f0bd4bad3 | ||
|
|
924b06976c | ||
|
|
1357f19e48 | ||
|
|
c91e9ea4e0 | ||
|
|
c2c96dad6e | ||
|
|
1a08833e77 | ||
|
|
d16dfcf4e8 | ||
|
|
b79c7b0540 | ||
|
|
9f45792217 | ||
|
|
d3108c3549 | ||
|
|
7d883cb5e0 | ||
|
|
ebd81c1df9 | ||
|
|
418dbee9fa | ||
|
|
cccc3bf625 | ||
|
|
a3e0072631 | ||
|
|
220e485312 | ||
|
|
67f8fca043 | ||
|
|
0e0ab8384c | ||
|
|
58228f70ca | ||
|
|
7968de06b4 | ||
|
|
87fdd90f56 | ||
|
|
65e7e56fbe | ||
|
|
424fdff934 | ||
|
|
f72996d9d1 | ||
|
|
d77556c672 | ||
|
|
c412e9bad2 | ||
|
|
87a28e8ce7 | ||
|
|
9ca0c7d53a | ||
|
|
2e94e452ed | ||
|
|
6a0d40ee0d | ||
|
|
0cebcf4432 | ||
|
|
ed0e24fcaf | ||
|
|
24be2f869b | ||
|
|
f8058a4a3a | ||
|
|
d0ba6df2fc | ||
|
|
95bee91b12 | ||
|
|
751b5bd556 | ||
|
|
77273449c9 | ||
|
|
46e1f1bc5c | ||
|
|
884bec7465 | ||
|
|
8d3220f94b | ||
|
|
9cb42911dc | ||
|
|
a3cc6f025e | ||
|
|
6fae4a9974 | ||
|
|
f7a1a3a172 | ||
|
|
292e1ad782 | ||
|
|
af1fe86586 | ||
|
|
97100cdfdd | ||
|
|
5f60ef1541 | ||
|
|
c7e7a0a871 | ||
|
|
f09eff530e | ||
|
|
50b34a116a | ||
|
|
42874fb0d0 | ||
|
|
482847187c | ||
|
|
a19b99268d | ||
|
|
3c5a03d7b6 | ||
|
|
c1a5c2b2d1 | ||
|
|
baf0f7ba95 | ||
|
|
ee27965314 | ||
|
|
d02093295b | ||
|
|
6381444fdc | ||
|
|
01b313868d | ||
|
|
3859ebd69c | ||
|
|
9753e431e3 | ||
|
|
b307667ae2 | ||
|
|
5d7dcbbcee | ||
|
|
281b395053 | ||
|
|
3518f39d39 | ||
|
|
ae0ffc4977 | ||
|
|
bc2f716c99 | ||
|
|
9617da1791 | ||
|
|
2ba5d7d64b | ||
|
|
437b9016ca | ||
|
|
c5db0a7195 | ||
|
|
82894d88b6 | ||
|
|
4a4146f515 | ||
|
|
59a4d0129f | ||
|
|
5cf2149218 | ||
|
|
453c32df0d | ||
|
|
1df10b80b2 | ||
|
|
9d96a11753 | ||
|
|
e9e3252bb5 | ||
|
|
930c8147e7 | ||
|
|
378ecad94c | ||
|
|
02299a6742 | ||
|
|
15cbc626c4 | ||
|
|
8720a4540a | ||
|
|
7b5980bfe5 | ||
|
|
ebfb670f6a | ||
|
|
c98042fa80 | ||
|
|
70181e3e08 | ||
|
|
adb1e01c7a | ||
|
|
cdb7f0602c | ||
|
|
d52e817dd5 | ||
|
|
07305d8799 | ||
|
|
fbf5bafae7 | ||
|
|
d49cd3cb85 | ||
|
|
b60b9e7743 | ||
|
|
26fd8562c5 | ||
|
|
84b38daf62 | ||
|
|
a0f9d5dc61 | ||
|
|
e8c25d157f | ||
|
|
214f4f0f0c | ||
|
|
7ae0369a3b | ||
|
|
2e5682f11c | ||
|
|
2e7cb0e362 | ||
|
|
56748ea6e7 | ||
|
|
621f03994c | ||
|
|
ab8ad72920 | ||
|
|
3fc244ee85 | ||
|
|
4728b96c51 | ||
|
|
f303363a73 | ||
|
|
2a166af524 | ||
|
|
ab4d055fd1 | ||
|
|
af49a8e4ef | ||
|
|
669d219fdc | ||
|
|
442aecb9f4 | ||
|
|
beda0bc89c | ||
|
|
64fd6bf979 | ||
|
|
1955434416 | ||
|
|
ab6a083fa8 | ||
|
|
eabca5df18 | ||
|
|
5dac3ff2a6 | ||
|
|
93024738d3 | ||
|
|
05a368681a | ||
|
|
246161018c | ||
|
|
f27714890a | ||
|
|
47831eb300 | ||
|
|
0b1f2252ee | ||
|
|
3ce6b555f7 | ||
|
|
c29f11863e | ||
|
|
952403b696 | ||
|
|
b3eb06f53e | ||
|
|
5198d0cdf0 | ||
|
|
e61e2f04b3 | ||
|
|
1aa876f4eb | ||
|
|
a3fb2f13be | ||
|
|
9e77eae71e | ||
|
|
cd5de5cd05 | ||
|
|
98a67530f5 | ||
|
|
58ffe576d7 | ||
|
|
b0a515f2c3 | ||
|
|
a037421809 | ||
|
|
6bb6c24641 | ||
|
|
617834a044 | ||
|
|
2c5c0e7830 | ||
|
|
81d2c52867 | ||
|
|
4f8bd16910 | ||
|
|
ab9d03bc2e | ||
|
|
10bf3e8fab | ||
|
|
f8108e93d5 | ||
|
|
3108556495 | ||
|
|
f97b2444e7 | ||
|
|
415f456661 | ||
|
|
e49b3fc260 | ||
|
|
9b125fbe53 | ||
|
|
10e3b32fed | ||
|
|
5386c07b66 | ||
|
|
7149d20b42 | ||
|
|
8a57b79b77 | ||
|
|
a4e8e7ea53 | ||
|
|
95ba327eb3 | ||
|
|
3056410fd1 | ||
|
|
bf8da60605 | ||
|
|
226f858866 | ||
|
|
317d7dea7d | ||
|
|
4e548ceb6e | ||
|
|
d846fe55e1 | ||
|
|
3b2942651e | ||
|
|
fa6f4100dd | ||
|
|
33e2d18aa7 | ||
|
|
a03764d956 | ||
|
|
3fb703cd22 | ||
|
|
f1cbe23f57 | ||
|
|
07a22a0b4b | ||
|
|
b9d813cef2 | ||
|
|
76ab0eac03 | ||
|
|
08a2ad2c40 | ||
|
|
47bbc9987e | ||
|
|
59628ec8b7 | ||
|
|
bef2fa9e8d | ||
|
|
d4f0cbcb67 | ||
|
|
9e96b12e94 | ||
|
|
42552810fb | ||
|
|
4bf2c931e9 | ||
|
|
beda6ac20d | ||
|
|
d8be6e42e1 | ||
|
|
4fb7fe9e45 | ||
|
|
6d7066c381 | ||
|
|
d003e1380f | ||
|
|
ef8badaef1 | ||
|
|
dea9c149d7 | ||
|
|
56c9fa3129 | ||
|
|
a86105294b | ||
|
|
33c23c30d3 | ||
|
|
fe76a79ebd | ||
|
|
5035ec2539 | ||
|
|
9f35b20664 | ||
|
|
b93c6c0270 | ||
|
|
e5dd403dd1 | ||
|
|
493359e5a2 | ||
|
|
b0f5218775 | ||
|
|
8fdc7049f9 | ||
|
|
d79d7e2ba1 | ||
|
|
596b3e2614 | ||
|
|
59f8544324 | ||
|
|
daaad3699c | ||
|
|
1e9f3a65a4 | ||
|
|
b2acf2f807 | ||
|
|
34e561f358 | ||
|
|
e5a07170b3 | ||
|
|
02dbbc5289 | ||
|
|
5e62d3ecb2 | ||
|
|
373ef9fe91 | ||
|
|
2f1e6fd625 | ||
|
|
6b8ef43cc1 | ||
|
|
7e746b87c5 | ||
|
|
2ad2a3110c | ||
|
|
bc24a6c574 | ||
|
|
b25bb0faf0 | ||
|
|
38c74b46b6 | ||
|
|
fbb6d8146a | ||
|
|
83ecc02589 | ||
|
|
21d9964827 | ||
|
|
f3b6d9febb | ||
|
|
b052a75e64 | ||
|
|
0602601655 | ||
|
|
480e248131 | ||
|
|
6fc7c930a6 | ||
|
|
31cd5b1365 | ||
|
|
19fb081fa0 | ||
|
|
d3b1a4f928 | ||
|
|
4729e194a0 | ||
|
|
ab6060c484 | ||
|
|
0b65021f75 | ||
|
|
bd4f2093db | ||
|
|
48dfcab9f0 | ||
|
|
849f8f13bc | ||
|
|
07359ad6ec | ||
|
|
1e2453eddf | ||
|
|
4c9773c68d | ||
|
|
4666670f4f | ||
|
|
0f71b45e0f | ||
|
|
92e9bd43ca | ||
|
|
a600c64229 | ||
|
|
121dec0180 | ||
|
|
b451c4c034 | ||
|
|
dbdbffa4b0 | ||
|
|
f360c6ecbc | ||
|
|
b9ea151846 | ||
|
|
b428573a0a | ||
|
|
350e1c9d91 | ||
|
|
a3b5db5945 | ||
|
|
3efe0eac13 | ||
|
|
aca54b4645 | ||
|
|
643afeeae7 | ||
|
|
d9fb79403b | ||
|
|
2ef89be67d | ||
|
|
43e994f2c2 | ||
|
|
ab89858d04 | ||
|
|
395c4e37ba | ||
|
|
3da2c7cabc | ||
|
|
832d66052e | ||
|
|
add538f6dd | ||
|
|
fc9107f129 | ||
|
|
d9790b04f6 | ||
|
|
88fa04b0f6 | ||
|
|
d240fca721 | ||
|
|
4d6171bde6 | ||
|
|
6238a5b3ed | ||
|
|
061600fa7a | ||
|
|
1b89cc6818 | ||
|
|
6e1e617124 | ||
|
|
7f8bf850a2 | ||
|
|
0277891392 | ||
|
|
08d99a3890 | ||
|
|
773606d876 | ||
|
|
bf38055a6c | ||
|
|
90b8d6b2f7 | ||
|
|
2d78fa1a41 | ||
|
|
45d541d4f2 | ||
|
|
b3c48674c5 | ||
|
|
8d42739030 | ||
|
|
27358137f2 | ||
|
|
a54b9ddbe4 | ||
|
|
58936b31d5 | ||
|
|
fcdacc3b0d | ||
|
|
40531dd919 | ||
|
|
05dfce62fb | ||
|
|
9df9cc2247 | ||
|
|
d3ee5ed7b8 | ||
|
|
502e1e1f1b | ||
|
|
e5b12ecdb9 | ||
|
|
be5e41227f | ||
|
|
08f208cd38 | ||
|
|
db08ac9022 | ||
|
|
ad5a27f991 | ||
|
|
07ec302267 | ||
|
|
18d899a7f9 | ||
|
|
b2650da057 | ||
|
|
31df0b5d7d | ||
|
|
a430a47a30 | ||
|
|
2e8ab648fd | ||
|
|
b753d40861 | ||
|
|
a32aac7111 | ||
|
|
2fff6232c1 | ||
|
|
f751c82e1c | ||
|
|
39f74fe547 | ||
|
|
11fb33fdeb | ||
|
|
58f4db95ea | ||
|
|
b55cb257b6 | ||
|
|
b0a8191f59 | ||
|
|
28aedcf50b | ||
|
|
6988f03ebc | ||
|
|
2948577b0e | ||
|
|
870a9ff80c | ||
|
|
689db57f5f | ||
|
|
2768722132 | ||
|
|
df103b3dca | ||
|
|
0542c77137 | ||
|
|
9022dc24fb | ||
|
|
78b7068638 | ||
|
|
70339b9a94 | ||
|
|
5c8460fd26 | ||
|
|
69e90e1e70 | ||
|
|
8c5ea19d3c | ||
|
|
82562f89f6 | ||
|
|
ede36b5ef8 | ||
|
|
fd00a4db85 | ||
|
|
510c7a0c19 | ||
|
|
9e0f13cce5 | ||
|
|
8c37a4454c | ||
|
|
ef436026d5 | ||
|
|
a595bc4b31 | ||
|
|
a167e5e520 | ||
|
|
26d7ceebb2 | ||
|
|
e5c0f8a46c | ||
|
|
5965459423 | ||
|
|
3a31d80a85 | ||
|
|
5a8e542f96 | ||
|
|
7a60afdd5a | ||
|
|
c3b3e0ab21 | ||
|
|
6246e25fbe | ||
|
|
102ddaf262 | ||
|
|
151db2af30 | ||
|
|
b2bd8577b9 | ||
|
|
4df3070a1d | ||
|
|
142609ea67 | ||
|
|
ed80c4e13b | ||
|
|
285d73d526 | ||
|
|
0bcb6040c9 | ||
|
|
07ef3d632c | ||
|
|
21bb325157 | ||
|
|
888ab162bd | ||
|
|
8ab38956d1 | ||
|
|
0f120f7500 | ||
|
|
f6a0e62853 | ||
|
|
cc0e91aa96 | ||
|
|
bf9f92b04e | ||
|
|
8f3664f26c | ||
|
|
445afca6ee | ||
|
|
3083e3bc63 | ||
|
|
9e16c03d25 | ||
|
|
b22fe5bd3d | ||
|
|
a60e55e5cd | ||
|
|
e7aa4428de | ||
|
|
64f71143dc | ||
|
|
7aad298720 | ||
|
|
4165b33995 | ||
|
|
f9bf4e4130 | ||
|
|
269919b980 | ||
|
|
2dc977ddd8 | ||
|
|
28c7362cfa | ||
|
|
c93a5de460 | ||
|
|
44a5b3b1e5 | ||
|
|
ae94722eda | ||
|
|
ae993c47c1 | ||
|
|
c784a6e440 | ||
|
|
c66cd3b2f3 | ||
|
|
f30938ed59 | ||
|
|
6c472dd383 | ||
|
|
2c5861a0c2 | ||
|
|
8047e196fe | ||
|
|
c6c979dc19 | ||
|
|
c8a1c8377a | ||
|
|
4e954c24f7 | ||
|
|
52839e2a7d | ||
|
|
1a9d5f151f | ||
|
|
d6f527881a | ||
|
|
5811b184be | ||
|
|
e0a3b51ca2 | ||
|
|
b5276a6a1d | ||
|
|
cc1b030c00 | ||
|
|
c896785480 | ||
|
|
0006948c29 | ||
|
|
6ac14f832e | ||
|
|
fd9a4966ec | ||
|
|
3246176c0a | ||
|
|
b68f561e6f | ||
|
|
8ffd4fc664 | ||
|
|
f46548ed88 | ||
|
|
0d335e3056 | ||
|
|
6ff701bd5c | ||
|
|
c34be5313d | ||
|
|
ec2fc0a5f2 | ||
|
|
ad54afe39a | ||
|
|
eb4cd75218 | ||
|
|
a84f5a1e32 | ||
|
|
e193347fb4 | ||
|
|
ad27c8674b | ||
|
|
5123a86062 | ||
|
|
010c205eec | ||
|
|
160c84ec1a | ||
|
|
924c0b63bd | ||
|
|
9b8dce0c77 | ||
|
|
7159678385 | ||
|
|
c8e232c598 | ||
|
|
a3013ff85b | ||
|
|
65c5abfa88 | ||
|
|
0114e36cfa | ||
|
|
5c56e0f498 | ||
|
|
61992ae787 | ||
|
|
08bbeedbd7 | ||
|
|
a5f2db8c80 | ||
|
|
8d1ce0460f | ||
|
|
3c85b48291 | ||
|
|
ea2e026c56 | ||
|
|
8b3f310212 | ||
|
|
87136e9e2b | ||
|
|
5a6a9d6ec2 | ||
|
|
d3b3a0eb8a | ||
|
|
91fc59cffc | ||
|
|
e32dbad0d0 | ||
|
|
b66aafd168 | ||
|
|
2cd0f69069 | ||
|
|
0177f641c8 | ||
|
|
b3969a6ce0 | ||
|
|
ab97d3b8b7 | ||
|
|
213df68d04 | ||
|
|
9db3cd901c | ||
|
|
64c9230423 | ||
|
|
17943ef0db | ||
|
|
8ed3f0b1cc | ||
|
|
7c50a5e17b | ||
|
|
c13c85bd2d | ||
|
|
ae01dc9639 | ||
|
|
a74ed0daf0 | ||
|
|
60387651d2 | ||
|
|
3a78be68d6 | ||
|
|
a896332db3 | ||
|
|
54eeb0e327 | ||
|
|
1f13554bd9 | ||
|
|
4cc3691489 | ||
|
|
24eadf2507 | ||
|
|
a274bfb744 | ||
|
|
2277c792b9 | ||
|
|
61f5614ac9 | ||
|
|
6367aed62a | ||
|
|
739f592061 | ||
|
|
116c2b73c1 | ||
|
|
58be7ae5db | ||
|
|
0e0fb885d2 | ||
|
|
e8546b82f8 | ||
|
|
837fbab96d | ||
|
|
cbd2d88000 | ||
|
|
01ac1cdcca | ||
|
|
161e8a6c21 | ||
|
|
2e3c1adc63 | ||
|
|
776afa4a36 | ||
|
|
3cac19d498 | ||
|
|
2ba8a87c9d | ||
|
|
d677dc51de | ||
|
|
ebbfcd169c | ||
|
|
574d2994d1 | ||
|
|
ecc5d64584 | ||
|
|
6888682f92 | ||
|
|
0197cdb33d | ||
|
|
3c59858f70 | ||
|
|
6f0161e9da | ||
|
|
f2bd735f51 | ||
|
|
7a8fd8c3e5 | ||
|
|
b24aa2f797 | ||
|
|
5e4f1fc279 | ||
|
|
e779d180f9 | ||
|
|
a84a32c075 | ||
|
|
5649986834 | ||
|
|
7eaa8d54dc | ||
|
|
61a1fbde6e | ||
|
|
a0a18973d8 | ||
|
|
efbf62f56a | ||
|
|
39391c8088 | ||
|
|
9ac5ef09ad | ||
|
|
3394588602 | ||
|
|
c64a05f2ff | ||
|
|
0c4426a55e | ||
|
|
feb700393e | ||
|
|
0476585370 | ||
|
|
dcc1738978 | ||
|
|
0b0ff62bc5 | ||
|
|
9f76371449 | ||
|
|
50bd8448cc | ||
|
|
0b326370bd | ||
|
|
d0963baad4 | ||
|
|
75e8c60fe2 | ||
|
|
e7ea27a1b3 | ||
|
|
aaa48f6a1a | ||
|
|
0766a5da91 | ||
|
|
267d1a27ac | ||
|
|
f5e6e49075 | ||
|
|
d44ce0a070 | ||
|
|
9ddccba780 | ||
|
|
301894f6e8 | ||
|
|
a425a7fda2 | ||
|
|
21c3835322 | ||
|
|
d110503639 | ||
|
|
64bf7eb363 | ||
|
|
205560cc95 | ||
|
|
7698243caf | ||
|
|
67f0934930 | ||
|
|
30e998edf7 | ||
|
|
2a35e45920 | ||
|
|
aa5de9f7bd | ||
|
|
f9eeb76518 | ||
|
|
957235a656 | ||
|
|
64a0c171f3 | ||
|
|
a28ac3bee6 | ||
|
|
3643303a51 | ||
|
|
81d407f0ff | ||
|
|
d29b0660f0 | ||
|
|
59b94177d6 | ||
|
|
9d2c5d54b0 | ||
|
|
a6f1a0245a | ||
|
|
fcf859ffed | ||
|
|
fe3f87e1fd | ||
|
|
5a24a7775e | ||
|
|
52e52f35f7 | ||
|
|
810be2c9d2 | ||
|
|
8e4777a5ff |
3
.github/.gitleaks.toml
vendored
3
.github/.gitleaks.toml
vendored
@@ -541,5 +541,6 @@ paths = [
|
|||||||
'''gitleaks.toml''',
|
'''gitleaks.toml''',
|
||||||
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
||||||
'''(go.mod|go.sum)$''',
|
'''(go.mod|go.sum)$''',
|
||||||
'''salt/nginx/files/enterprise-attack.json'''
|
'''salt/nginx/files/enterprise-attack.json''',
|
||||||
|
'''(.*?)whl$'''
|
||||||
]
|
]
|
||||||
|
|||||||
5
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
5
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -28,6 +28,11 @@ body:
|
|||||||
- 2.4.140
|
- 2.4.140
|
||||||
- 2.4.141
|
- 2.4.141
|
||||||
- 2.4.150
|
- 2.4.150
|
||||||
|
- 2.4.160
|
||||||
|
- 2.4.170
|
||||||
|
- 2.4.180
|
||||||
|
- 2.4.190
|
||||||
|
- 2.4.200
|
||||||
- Other (please provide detail below)
|
- Other (please provide detail below)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
2
.github/workflows/pythontest.yml
vendored
2
.github/workflows/pythontest.yml
vendored
@@ -4,7 +4,7 @@ on:
|
|||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- "salt/sensoroni/files/analyzers/**"
|
- "salt/sensoroni/files/analyzers/**"
|
||||||
- "salt/manager/tools/sbin"
|
- "salt/manager/tools/sbin/**"
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
build:
|
build:
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
# Created by https://www.gitignore.io/api/macos,windows
|
# Created by https://www.gitignore.io/api/macos,windows
|
||||||
# Edit at https://www.gitignore.io/?templates=macos,windows
|
# Edit at https://www.gitignore.io/?templates=macos,windows
|
||||||
|
|
||||||
@@ -67,4 +66,4 @@ __pycache__
|
|||||||
|
|
||||||
# Analyzer dev/test config files
|
# Analyzer dev/test config files
|
||||||
*_dev.yaml
|
*_dev.yaml
|
||||||
site-packages
|
site-packages
|
||||||
|
|||||||
@@ -1,17 +1,17 @@
|
|||||||
### 2.4.150-20250522 ISO image released on 2025/05/22
|
### 2.4.190-20251024 ISO image released on 2025/10/24
|
||||||
|
|
||||||
|
|
||||||
### Download and Verify
|
### Download and Verify
|
||||||
|
|
||||||
2.4.150-20250522 ISO image:
|
2.4.190-20251024 ISO image:
|
||||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.150-20250522.iso
|
https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
|
||||||
|
|
||||||
MD5: 239E69B83072BBF2602D4043FE53A160
|
MD5: 25358481FB876226499C011FC0710358
|
||||||
SHA1: C62893D3C7F5592665BFDCBC9A45BB20A926F9A8
|
SHA1: 0B26173C0CE136F2CA40A15046D1DFB78BCA1165
|
||||||
SHA256: 2ADE037C7FD34591030B1FAC10392C4E6613F152DD24BFBD897E57EE300895B9
|
SHA256: 4FD9F62EDA672408828B3C0C446FE5EA9FF3C4EE8488A7AB1101544A3C487872
|
||||||
|
|
||||||
Signature for ISO image:
|
Signature for ISO image:
|
||||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.150-20250522.iso.sig
|
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
|
||||||
|
|
||||||
Signing key:
|
Signing key:
|
||||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||||
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
|||||||
|
|
||||||
Download the signature file for the ISO:
|
Download the signature file for the ISO:
|
||||||
```
|
```
|
||||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.150-20250522.iso.sig
|
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.190-20251024.iso.sig
|
||||||
```
|
```
|
||||||
|
|
||||||
Download the ISO image:
|
Download the ISO image:
|
||||||
```
|
```
|
||||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.150-20250522.iso
|
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.190-20251024.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
Verify the downloaded ISO image using the signature file:
|
Verify the downloaded ISO image using the signature file:
|
||||||
```
|
```
|
||||||
gpg --verify securityonion-2.4.150-20250522.iso.sig securityonion-2.4.150-20250522.iso
|
gpg --verify securityonion-2.4.190-20251024.iso.sig securityonion-2.4.190-20251024.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||||
```
|
```
|
||||||
gpg: Signature made Thu 22 May 2025 11:15:06 AM EDT using RSA key ID FE507013
|
gpg: Signature made Thu 23 Oct 2025 07:21:46 AM EDT using RSA key ID FE507013
|
||||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||||
gpg: WARNING: This key is not certified with a trusted signature!
|
gpg: WARNING: This key is not certified with a trusted signature!
|
||||||
gpg: There is no indication that the signature belongs to the owner.
|
gpg: There is no indication that the signature belongs to the owner.
|
||||||
|
|||||||
34
pillar/hypervisor/nodes.sls
Normal file
34
pillar/hypervisor/nodes.sls
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
{% set node_types = {} %}
|
||||||
|
{% for minionid, ip in salt.saltutil.runner(
|
||||||
|
'mine.get',
|
||||||
|
tgt='G@role:so-hypervisor or G@role:so-managerhype',
|
||||||
|
fun='network.ip_addrs',
|
||||||
|
tgt_type='compound') | dictsort()
|
||||||
|
%}
|
||||||
|
|
||||||
|
# only add a node to the pillar if it returned an ip from the mine
|
||||||
|
{% if ip | length > 0%}
|
||||||
|
{% set hostname = minionid.split('_') | first %}
|
||||||
|
{% set node_type = minionid.split('_') | last %}
|
||||||
|
{% if node_type not in node_types.keys() %}
|
||||||
|
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
|
||||||
|
{% else %}
|
||||||
|
{% if hostname not in node_types[node_type] %}
|
||||||
|
{% do node_types[node_type].update({hostname: ip[0]}) %}
|
||||||
|
{% else %}
|
||||||
|
{% do node_types[node_type][hostname].update(ip[0]) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
|
||||||
|
hypervisor:
|
||||||
|
nodes:
|
||||||
|
{% for node_type, values in node_types.items() %}
|
||||||
|
{{node_type}}:
|
||||||
|
{% for hostname, ip in values.items() %}
|
||||||
|
{{hostname}}:
|
||||||
|
ip: {{ip}}
|
||||||
|
{% endfor %}
|
||||||
|
{% endfor %}
|
||||||
@@ -18,6 +18,7 @@ base:
|
|||||||
- telegraf.adv_telegraf
|
- telegraf.adv_telegraf
|
||||||
- versionlock.soc_versionlock
|
- versionlock.soc_versionlock
|
||||||
- versionlock.adv_versionlock
|
- versionlock.adv_versionlock
|
||||||
|
- soc.license
|
||||||
|
|
||||||
'* and not *_desktop':
|
'* and not *_desktop':
|
||||||
- firewall.soc_firewall
|
- firewall.soc_firewall
|
||||||
@@ -25,7 +26,12 @@ base:
|
|||||||
- nginx.soc_nginx
|
- nginx.soc_nginx
|
||||||
- nginx.adv_nginx
|
- nginx.adv_nginx
|
||||||
|
|
||||||
'*_manager or *_managersearch':
|
'salt-cloud:driver:libvirt':
|
||||||
|
- match: grain
|
||||||
|
- vm.soc_vm
|
||||||
|
- vm.adv_vm
|
||||||
|
|
||||||
|
'*_manager or *_managersearch or *_managerhype':
|
||||||
- match: compound
|
- match: compound
|
||||||
- node_data.ips
|
- node_data.ips
|
||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||||
@@ -44,7 +50,6 @@ base:
|
|||||||
- logstash.adv_logstash
|
- logstash.adv_logstash
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- kratos.soc_kratos
|
- kratos.soc_kratos
|
||||||
@@ -70,6 +75,9 @@ base:
|
|||||||
- kafka.nodes
|
- kafka.nodes
|
||||||
- kafka.soc_kafka
|
- kafka.soc_kafka
|
||||||
- kafka.adv_kafka
|
- kafka.adv_kafka
|
||||||
|
- hypervisor.nodes
|
||||||
|
- hypervisor.soc_hypervisor
|
||||||
|
- hypervisor.adv_hypervisor
|
||||||
- stig.soc_stig
|
- stig.soc_stig
|
||||||
|
|
||||||
'*_sensor':
|
'*_sensor':
|
||||||
@@ -87,7 +95,6 @@ base:
|
|||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- stig.soc_stig
|
- stig.soc_stig
|
||||||
- soc.license
|
|
||||||
|
|
||||||
'*_eval':
|
'*_eval':
|
||||||
- node_data.ips
|
- node_data.ips
|
||||||
@@ -114,7 +121,6 @@ base:
|
|||||||
- idstools.adv_idstools
|
- idstools.adv_idstools
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- strelka.soc_strelka
|
- strelka.soc_strelka
|
||||||
@@ -174,7 +180,6 @@ base:
|
|||||||
- manager.adv_manager
|
- manager.adv_manager
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- strelka.soc_strelka
|
- strelka.soc_strelka
|
||||||
@@ -240,7 +245,6 @@ base:
|
|||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- stig.soc_stig
|
- stig.soc_stig
|
||||||
- soc.license
|
|
||||||
- kafka.nodes
|
- kafka.nodes
|
||||||
- kafka.soc_kafka
|
- kafka.soc_kafka
|
||||||
- kafka.adv_kafka
|
- kafka.adv_kafka
|
||||||
@@ -258,8 +262,9 @@ base:
|
|||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- kafka.nodes
|
- kafka.nodes
|
||||||
- kafka.soc_kafka
|
- kafka.soc_kafka
|
||||||
- kafka.adv_kafka
|
- stig.soc_stig
|
||||||
- soc.license
|
- elasticfleet.soc_elasticfleet
|
||||||
|
- elasticfleet.adv_elasticfleet
|
||||||
|
|
||||||
'*_import':
|
'*_import':
|
||||||
- node_data.ips
|
- node_data.ips
|
||||||
@@ -283,7 +288,6 @@ base:
|
|||||||
- manager.adv_manager
|
- manager.adv_manager
|
||||||
- soc.soc_soc
|
- soc.soc_soc
|
||||||
- soc.adv_soc
|
- soc.adv_soc
|
||||||
- soc.license
|
|
||||||
- kibana.soc_kibana
|
- kibana.soc_kibana
|
||||||
- kibana.adv_kibana
|
- kibana.adv_kibana
|
||||||
- backup.soc_backup
|
- backup.soc_backup
|
||||||
@@ -318,9 +322,15 @@ base:
|
|||||||
- elasticfleet.adv_elasticfleet
|
- elasticfleet.adv_elasticfleet
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
|
- stig.soc_stig
|
||||||
|
|
||||||
|
'*_hypervisor':
|
||||||
|
- minions.{{ grains.id }}
|
||||||
|
- minions.adv_{{ grains.id }}
|
||||||
|
- stig.soc_stig
|
||||||
|
|
||||||
'*_desktop':
|
'*_desktop':
|
||||||
- minions.{{ grains.id }}
|
- minions.{{ grains.id }}
|
||||||
- minions.adv_{{ grains.id }}
|
- minions.adv_{{ grains.id }}
|
||||||
- stig.soc_stig
|
- stig.soc_stig
|
||||||
- soc.license
|
|
||||||
|
|||||||
91
salt/_modules/hypervisor.py
Normal file
91
salt/_modules/hypervisor.py
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
#!/opt/saltstack/salt/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
"""
|
||||||
|
Salt execution module for hypervisor operations.
|
||||||
|
|
||||||
|
This module provides functions for managing hypervisor configurations,
|
||||||
|
including VM file management.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
__virtualname__ = 'hypervisor'
|
||||||
|
|
||||||
|
|
||||||
|
def __virtual__():
|
||||||
|
"""
|
||||||
|
Only load this module if we're on a system that can manage hypervisors.
|
||||||
|
"""
|
||||||
|
return __virtualname__
|
||||||
|
|
||||||
|
|
||||||
|
def remove_vm_from_vms_file(vms_file_path, vm_hostname, vm_role):
|
||||||
|
"""
|
||||||
|
Remove a VM entry from the hypervisorVMs file.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
vms_file_path (str): Path to the hypervisorVMs file
|
||||||
|
vm_hostname (str): Hostname of the VM to remove (without role suffix)
|
||||||
|
vm_role (str): Role of the VM
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
dict: Result dictionary with success status and message
|
||||||
|
|
||||||
|
CLI Example:
|
||||||
|
salt '*' hypervisor.remove_vm_from_vms_file /opt/so/saltstack/local/salt/hypervisor/hosts/hypervisor1VMs node1 nsm
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
# Check if file exists
|
||||||
|
if not os.path.exists(vms_file_path):
|
||||||
|
msg = f"VMs file not found: {vms_file_path}"
|
||||||
|
log.error(msg)
|
||||||
|
return {'result': False, 'comment': msg}
|
||||||
|
|
||||||
|
# Read current VMs
|
||||||
|
with open(vms_file_path, 'r') as f:
|
||||||
|
content = f.read().strip()
|
||||||
|
vms = json.loads(content) if content else []
|
||||||
|
|
||||||
|
# Find and remove the VM entry
|
||||||
|
original_count = len(vms)
|
||||||
|
vms = [vm for vm in vms if not (vm.get('hostname') == vm_hostname and vm.get('role') == vm_role)]
|
||||||
|
|
||||||
|
if len(vms) < original_count:
|
||||||
|
# VM was found and removed, write back to file
|
||||||
|
with open(vms_file_path, 'w') as f:
|
||||||
|
json.dump(vms, f, indent=2)
|
||||||
|
|
||||||
|
# Set socore:socore ownership (939:939)
|
||||||
|
os.chown(vms_file_path, 939, 939)
|
||||||
|
|
||||||
|
msg = f"Removed VM {vm_hostname}_{vm_role} from {vms_file_path}"
|
||||||
|
log.info(msg)
|
||||||
|
return {'result': True, 'comment': msg}
|
||||||
|
else:
|
||||||
|
msg = f"VM {vm_hostname}_{vm_role} not found in {vms_file_path}"
|
||||||
|
log.warning(msg)
|
||||||
|
return {'result': False, 'comment': msg}
|
||||||
|
|
||||||
|
except json.JSONDecodeError as e:
|
||||||
|
msg = f"Failed to parse JSON in {vms_file_path}: {str(e)}"
|
||||||
|
log.error(msg)
|
||||||
|
return {'result': False, 'comment': msg}
|
||||||
|
except Exception as e:
|
||||||
|
msg = f"Failed to remove VM {vm_hostname}_{vm_role} from {vms_file_path}: {str(e)}"
|
||||||
|
log.error(msg)
|
||||||
|
return {'result': False, 'comment': msg}
|
||||||
335
salt/_modules/qcow2.py
Normal file
335
salt/_modules/qcow2.py
Normal file
@@ -0,0 +1,335 @@
|
|||||||
|
#!py
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions
|
||||||
|
for modifying network configurations within QCOW2 images, adjusting virtual machine hardware settings, and
|
||||||
|
creating virtual storage volumes. It serves as a Salt interface to the so-qcow2-modify-network,
|
||||||
|
so-kvm-modify-hardware, and so-kvm-create-volume scripts.
|
||||||
|
|
||||||
|
The module offers three main capabilities:
|
||||||
|
1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images
|
||||||
|
2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough)
|
||||||
|
3. Volume Management: Create and attach virtual storage volumes for NSM data
|
||||||
|
|
||||||
|
This module is intended to work with Security Onion's virtualization infrastructure and is typically
|
||||||
|
used in conjunction with salt-cloud for VM provisioning and management.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import subprocess
|
||||||
|
import shlex
|
||||||
|
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
__virtualname__ = 'qcow2'
|
||||||
|
|
||||||
|
def __virtual__():
|
||||||
|
return __virtualname__
|
||||||
|
|
||||||
|
def modify_network_config(image, interface, mode, vm_name, ip4=None, gw4=None, dns4=None, search4=None):
|
||||||
|
'''
|
||||||
|
Usage:
|
||||||
|
salt '*' qcow2.modify_network_config image=<path> interface=<iface> mode=<mode> vm_name=<name> [ip4=<addr>] [gw4=<addr>] [dns4=<servers>] [search4=<domain>]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
image
|
||||||
|
Path to the QCOW2 image file that will be modified
|
||||||
|
interface
|
||||||
|
Network interface name to configure (e.g., 'enp1s0')
|
||||||
|
mode
|
||||||
|
Network configuration mode, either 'dhcp4' or 'static4'
|
||||||
|
vm_name
|
||||||
|
Full name of the VM (hostname_role)
|
||||||
|
ip4
|
||||||
|
IPv4 address with CIDR notation (e.g., '192.168.1.10/24')
|
||||||
|
Required when mode='static4'
|
||||||
|
gw4
|
||||||
|
IPv4 gateway address (e.g., '192.168.1.1')
|
||||||
|
Required when mode='static4'
|
||||||
|
dns4
|
||||||
|
Comma-separated list of IPv4 DNS servers (e.g., '8.8.8.8,8.8.4.4')
|
||||||
|
Optional for both DHCP and static configurations
|
||||||
|
search4
|
||||||
|
DNS search domain for IPv4 (e.g., 'example.local')
|
||||||
|
Optional for both DHCP and static configurations
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
1. **Configure DHCP:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='dhcp4'
|
||||||
|
```
|
||||||
|
This configures enp1s0 to use DHCP for IP assignment
|
||||||
|
|
||||||
|
2. **Configure Static IP:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.modify_network_config image='/nsm/libvirt/images/sool9/sool9.qcow2' interface='enp1s0' mode='static4' ip4='192.168.1.10/24' gw4='192.168.1.1' dns4='192.168.1.1,8.8.8.8' search4='example.local'
|
||||||
|
```
|
||||||
|
This sets a static IP configuration with DNS servers and search domain
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- The QCOW2 image must be accessible and writable by the salt minion
|
||||||
|
- The image should not be in use by a running VM when modified
|
||||||
|
- Network changes take effect on next VM boot
|
||||||
|
- Requires so-qcow2-modify-network script to be installed
|
||||||
|
|
||||||
|
Description:
|
||||||
|
This function modifies network configuration within a QCOW2 image file by executing
|
||||||
|
the so-qcow2-modify-network script. It supports both DHCP and static IPv4 configuration.
|
||||||
|
The script mounts the image, modifies the network configuration files, and unmounts
|
||||||
|
safely. All operations are logged for troubleshooting purposes.
|
||||||
|
|
||||||
|
Exit Codes:
|
||||||
|
0: Success
|
||||||
|
1: Invalid parameters or configuration
|
||||||
|
2: Image access or mounting error
|
||||||
|
3: Network configuration error
|
||||||
|
4: System command error
|
||||||
|
255: Unexpected error
|
||||||
|
|
||||||
|
Logging:
|
||||||
|
- All operations are logged to the salt minion log
|
||||||
|
- Log entries are prefixed with 'qcow2 module:'
|
||||||
|
- Error conditions include detailed error messages and stack traces
|
||||||
|
- Success/failure status is logged for verification
|
||||||
|
'''
|
||||||
|
|
||||||
|
cmd = ['/usr/sbin/so-qcow2-modify-network', '-I', image, '-i', interface, '-n', vm_name]
|
||||||
|
|
||||||
|
if mode.lower() == 'dhcp4':
|
||||||
|
cmd.append('--dhcp4')
|
||||||
|
elif mode.lower() == 'static4':
|
||||||
|
cmd.append('--static4')
|
||||||
|
if not ip4 or not gw4:
|
||||||
|
raise ValueError('Both ip4 and gw4 are required for static configuration.')
|
||||||
|
cmd.extend(['--ip4', ip4, '--gw4', gw4])
|
||||||
|
if dns4:
|
||||||
|
cmd.extend(['--dns4', dns4])
|
||||||
|
if search4:
|
||||||
|
cmd.extend(['--search4', search4])
|
||||||
|
else:
|
||||||
|
raise ValueError("Invalid mode '{}'. Expected 'dhcp4' or 'static4'.".format(mode))
|
||||||
|
|
||||||
|
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||||
|
ret = {
|
||||||
|
'retcode': result.returncode,
|
||||||
|
'stdout': result.stdout,
|
||||||
|
'stderr': result.stderr
|
||||||
|
}
|
||||||
|
if result.returncode != 0:
|
||||||
|
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||||
|
else:
|
||||||
|
log.info('qcow2 module: Script executed successfully.')
|
||||||
|
return ret
|
||||||
|
except Exception as e:
|
||||||
|
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def modify_hardware_config(vm_name, cpu=None, memory=None, pci=None, start=False):
|
||||||
|
'''
|
||||||
|
Usage:
|
||||||
|
salt '*' qcow2.modify_hardware_config vm_name=<name> [cpu=<count>] [memory=<size>] [pci=<id>] [pci=<id>] [start=<bool>]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
vm_name
|
||||||
|
Name of the virtual machine to modify
|
||||||
|
cpu
|
||||||
|
Number of virtual CPUs to assign (positive integer)
|
||||||
|
Optional - VM's current CPU count retained if not specified
|
||||||
|
memory
|
||||||
|
Amount of memory to assign in MiB (positive integer)
|
||||||
|
Optional - VM's current memory size retained if not specified
|
||||||
|
pci
|
||||||
|
PCI hardware ID(s) to passthrough to the VM (e.g., '0000:c7:00.0')
|
||||||
|
Can be specified multiple times for multiple devices
|
||||||
|
Optional - no PCI passthrough if not specified
|
||||||
|
start
|
||||||
|
Boolean flag to start the VM after modification
|
||||||
|
Optional - defaults to False
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
1. **Modify CPU and Memory:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=4 memory=8192
|
||||||
|
```
|
||||||
|
This assigns 4 CPUs and 8GB memory to the VM
|
||||||
|
|
||||||
|
2. **Enable PCI Passthrough:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.modify_hardware_config vm_name='sensor1' pci='0000:c7:00.0' pci='0000:c4:00.0' start=True
|
||||||
|
```
|
||||||
|
This configures PCI passthrough and starts the VM
|
||||||
|
|
||||||
|
3. **Complete Hardware Configuration:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.modify_hardware_config vm_name='sensor1' cpu=8 memory=16384 pci='0000:c7:00.0' start=True
|
||||||
|
```
|
||||||
|
This sets CPU, memory, PCI passthrough, and starts the VM
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- VM must be stopped before modification unless only the start flag is set
|
||||||
|
- Memory is specified in MiB (1024 = 1GB)
|
||||||
|
- PCI devices must be available and not in use by the host
|
||||||
|
- CPU count should align with host capabilities
|
||||||
|
- Requires so-kvm-modify-hardware script to be installed
|
||||||
|
|
||||||
|
Description:
|
||||||
|
This function modifies the hardware configuration of a KVM virtual machine using
|
||||||
|
the so-kvm-modify-hardware script. It can adjust CPU count, memory allocation,
|
||||||
|
and PCI device passthrough. Changes are applied to the VM's libvirt configuration.
|
||||||
|
The VM can optionally be started after modifications are complete.
|
||||||
|
|
||||||
|
Exit Codes:
|
||||||
|
0: Success
|
||||||
|
1: Invalid parameters
|
||||||
|
2: VM state error (running when should be stopped)
|
||||||
|
3: Hardware configuration error
|
||||||
|
4: System command error
|
||||||
|
255: Unexpected error
|
||||||
|
|
||||||
|
Logging:
|
||||||
|
- All operations are logged to the salt minion log
|
||||||
|
- Log entries are prefixed with 'qcow2 module:'
|
||||||
|
- Hardware configuration changes are logged
|
||||||
|
- Errors include detailed messages and stack traces
|
||||||
|
- Final status of modification is logged
|
||||||
|
'''
|
||||||
|
|
||||||
|
cmd = ['/usr/sbin/so-kvm-modify-hardware', '-v', vm_name]
|
||||||
|
|
||||||
|
if cpu is not None:
|
||||||
|
if isinstance(cpu, int) and cpu > 0:
|
||||||
|
cmd.extend(['-c', str(cpu)])
|
||||||
|
else:
|
||||||
|
raise ValueError('cpu must be a positive integer.')
|
||||||
|
if memory is not None:
|
||||||
|
if isinstance(memory, int) and memory > 0:
|
||||||
|
cmd.extend(['-m', str(memory)])
|
||||||
|
else:
|
||||||
|
raise ValueError('memory must be a positive integer.')
|
||||||
|
if pci:
|
||||||
|
# Handle PCI IDs (can be a single device or comma-separated list)
|
||||||
|
if isinstance(pci, str):
|
||||||
|
devices = [dev.strip() for dev in pci.split(',') if dev.strip()]
|
||||||
|
elif isinstance(pci, list):
|
||||||
|
devices = pci
|
||||||
|
else:
|
||||||
|
devices = [pci]
|
||||||
|
|
||||||
|
# Add each device with its own -p flag
|
||||||
|
for device in devices:
|
||||||
|
cmd.extend(['-p', str(device)])
|
||||||
|
if start:
|
||||||
|
cmd.append('-s')
|
||||||
|
|
||||||
|
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||||
|
ret = {
|
||||||
|
'retcode': result.returncode,
|
||||||
|
'stdout': result.stdout,
|
||||||
|
'stderr': result.stderr
|
||||||
|
}
|
||||||
|
if result.returncode != 0:
|
||||||
|
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||||
|
else:
|
||||||
|
log.info('qcow2 module: Script executed successfully.')
|
||||||
|
return ret
|
||||||
|
except Exception as e:
|
||||||
|
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||||
|
raise
|
||||||
|
|
||||||
|
def create_volume_config(vm_name, size_gb, start=False):
|
||||||
|
'''
|
||||||
|
Usage:
|
||||||
|
salt '*' qcow2.create_volume_config vm_name=<name> size_gb=<size> [start=<bool>]
|
||||||
|
|
||||||
|
Options:
|
||||||
|
vm_name
|
||||||
|
Name of the virtual machine to attach the volume to
|
||||||
|
size_gb
|
||||||
|
Volume size in GB (positive integer)
|
||||||
|
This determines the capacity of the virtual storage volume
|
||||||
|
start
|
||||||
|
Boolean flag to start the VM after volume creation
|
||||||
|
Optional - defaults to False
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
1. **Create 500GB Volume:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=500
|
||||||
|
```
|
||||||
|
This creates a 500GB virtual volume for NSM storage
|
||||||
|
|
||||||
|
2. **Create 1TB Volume and Start VM:**
|
||||||
|
```bash
|
||||||
|
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=1000 start=True
|
||||||
|
```
|
||||||
|
This creates a 1TB volume and starts the VM after attachment
|
||||||
|
|
||||||
|
Notes:
|
||||||
|
- VM must be stopped before volume creation
|
||||||
|
- Volume is created as a qcow2 image and attached to the VM
|
||||||
|
- This is an alternative to disk passthrough via modify_hardware_config
|
||||||
|
- Volume is automatically attached to the VM's libvirt configuration
|
||||||
|
- Requires so-kvm-create-volume script to be installed
|
||||||
|
- Volume files are stored in the hypervisor's VM storage directory
|
||||||
|
|
||||||
|
Description:
|
||||||
|
This function creates and attaches a virtual storage volume to a KVM virtual machine
|
||||||
|
using the so-kvm-create-volume script. It creates a qcow2 disk image of the specified
|
||||||
|
size and attaches it to the VM for NSM (Network Security Monitoring) storage purposes.
|
||||||
|
This provides an alternative to physical disk passthrough, allowing flexible storage
|
||||||
|
allocation without requiring dedicated hardware. The VM can optionally be started
|
||||||
|
after the volume is successfully created and attached.
|
||||||
|
|
||||||
|
Exit Codes:
|
||||||
|
0: Success
|
||||||
|
1: Invalid parameters
|
||||||
|
2: VM state error (running when should be stopped)
|
||||||
|
3: Volume creation error
|
||||||
|
4: System command error
|
||||||
|
255: Unexpected error
|
||||||
|
|
||||||
|
Logging:
|
||||||
|
- All operations are logged to the salt minion log
|
||||||
|
- Log entries are prefixed with 'qcow2 module:'
|
||||||
|
- Volume creation and attachment operations are logged
|
||||||
|
- Errors include detailed messages and stack traces
|
||||||
|
- Final status of volume creation is logged
|
||||||
|
'''
|
||||||
|
|
||||||
|
# Validate size_gb parameter
|
||||||
|
if not isinstance(size_gb, int) or size_gb <= 0:
|
||||||
|
raise ValueError('size_gb must be a positive integer.')
|
||||||
|
|
||||||
|
cmd = ['/usr/sbin/so-kvm-create-volume', '-v', vm_name, '-s', str(size_gb)]
|
||||||
|
|
||||||
|
if start:
|
||||||
|
cmd.append('-S')
|
||||||
|
|
||||||
|
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||||
|
|
||||||
|
try:
|
||||||
|
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||||
|
ret = {
|
||||||
|
'retcode': result.returncode,
|
||||||
|
'stdout': result.stdout,
|
||||||
|
'stderr': result.stderr
|
||||||
|
}
|
||||||
|
if result.returncode != 0:
|
||||||
|
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||||
|
else:
|
||||||
|
log.info('qcow2 module: Script executed successfully.')
|
||||||
|
return ret
|
||||||
|
except Exception as e:
|
||||||
|
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||||
|
raise
|
||||||
1171
salt/_runners/setup_hypervisor.py
Normal file
1171
salt/_runners/setup_hypervisor.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,264 +1,180 @@
|
|||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
# Elastic License 2.0.
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
|
{% set ISAIRGAP = salt['pillar.get']('global:airgap', False) %}
|
||||||
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
|
{% import_yaml 'salt/minion.defaults.yaml' as saltversion %}
|
||||||
{% set saltversion = saltversion.salt.minion.version %}
|
{% set saltversion = saltversion.salt.minion.version %}
|
||||||
|
|
||||||
{# this is the list we are returning from this map file, it gets built below #}
|
{# Define common state groups to reduce redundancy #}
|
||||||
{% set allowed_states= [] %}
|
{% set base_states = [
|
||||||
|
'common',
|
||||||
|
'patch.os.schedule',
|
||||||
|
'motd',
|
||||||
|
'salt.minion-check',
|
||||||
|
'sensoroni',
|
||||||
|
'salt.lasthighstate',
|
||||||
|
'salt.minion'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set ssl_states = [
|
||||||
|
'ssl',
|
||||||
|
'telegraf',
|
||||||
|
'firewall',
|
||||||
|
'schedule',
|
||||||
|
'docker_clean'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set manager_states = [
|
||||||
|
'salt.master',
|
||||||
|
'ca',
|
||||||
|
'registry',
|
||||||
|
'manager',
|
||||||
|
'nginx',
|
||||||
|
'influxdb',
|
||||||
|
'soc',
|
||||||
|
'kratos',
|
||||||
|
'hydra',
|
||||||
|
'elasticfleet',
|
||||||
|
'elastic-fleet-package-registry',
|
||||||
|
'idstools',
|
||||||
|
'suricata.manager',
|
||||||
|
'utility'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set sensor_states = [
|
||||||
|
'pcap',
|
||||||
|
'suricata',
|
||||||
|
'healthcheck',
|
||||||
|
'tcpreplay',
|
||||||
|
'zeek',
|
||||||
|
'strelka'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set kafka_states = [
|
||||||
|
'kafka'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set stig_states = [
|
||||||
|
'stig'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{% set elastic_stack_states = [
|
||||||
|
'elasticsearch',
|
||||||
|
'elasticsearch.auth',
|
||||||
|
'kibana',
|
||||||
|
'kibana.secrets',
|
||||||
|
'elastalert',
|
||||||
|
'logstash',
|
||||||
|
'redis'
|
||||||
|
] %}
|
||||||
|
|
||||||
|
{# Initialize the allowed_states list #}
|
||||||
|
{% set allowed_states = [] %}
|
||||||
|
|
||||||
{% if grains.saltversion | string == saltversion | string %}
|
{% if grains.saltversion | string == saltversion | string %}
|
||||||
|
{# Map role-specific states #}
|
||||||
|
{% set role_states = {
|
||||||
|
'so-eval': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
sensor_states +
|
||||||
|
elastic_stack_states | reject('equalto', 'logstash') | list
|
||||||
|
),
|
||||||
|
'so-heavynode': (
|
||||||
|
ssl_states +
|
||||||
|
sensor_states +
|
||||||
|
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
|
||||||
|
),
|
||||||
|
'so-idh': (
|
||||||
|
ssl_states +
|
||||||
|
['idh']
|
||||||
|
),
|
||||||
|
'so-import': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
|
||||||
|
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
|
||||||
|
),
|
||||||
|
'so-manager': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||||
|
stig_states +
|
||||||
|
kafka_states +
|
||||||
|
elastic_stack_states
|
||||||
|
),
|
||||||
|
'so-managerhype': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
|
||||||
|
stig_states +
|
||||||
|
kafka_states +
|
||||||
|
elastic_stack_states
|
||||||
|
),
|
||||||
|
'so-managersearch': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||||
|
stig_states +
|
||||||
|
kafka_states +
|
||||||
|
elastic_stack_states
|
||||||
|
),
|
||||||
|
'so-searchnode': (
|
||||||
|
ssl_states +
|
||||||
|
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
|
||||||
|
stig_states
|
||||||
|
),
|
||||||
|
'so-standalone': (
|
||||||
|
ssl_states +
|
||||||
|
manager_states +
|
||||||
|
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
|
||||||
|
sensor_states +
|
||||||
|
stig_states +
|
||||||
|
kafka_states +
|
||||||
|
elastic_stack_states
|
||||||
|
),
|
||||||
|
'so-sensor': (
|
||||||
|
ssl_states +
|
||||||
|
sensor_states +
|
||||||
|
['nginx'] +
|
||||||
|
stig_states
|
||||||
|
),
|
||||||
|
'so-fleet': (
|
||||||
|
ssl_states +
|
||||||
|
stig_states +
|
||||||
|
['logstash', 'nginx', 'healthcheck', 'elasticfleet']
|
||||||
|
),
|
||||||
|
'so-receiver': (
|
||||||
|
ssl_states +
|
||||||
|
kafka_states +
|
||||||
|
stig_states +
|
||||||
|
['logstash', 'redis']
|
||||||
|
),
|
||||||
|
'so-hypervisor': (
|
||||||
|
ssl_states +
|
||||||
|
stig_states +
|
||||||
|
['hypervisor', 'libvirt']
|
||||||
|
),
|
||||||
|
'so-desktop': (
|
||||||
|
['ssl', 'docker_clean', 'telegraf'] +
|
||||||
|
stig_states
|
||||||
|
)
|
||||||
|
} %}
|
||||||
|
|
||||||
{% set allowed_states= salt['grains.filter_by']({
|
{# Get states for the current role #}
|
||||||
'so-eval': [
|
{% if grains.role in role_states %}
|
||||||
'salt.master',
|
{% set allowed_states = role_states[grains.role] %}
|
||||||
'ca',
|
{% endif %}
|
||||||
'ssl',
|
|
||||||
'registry',
|
|
||||||
'manager',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'influxdb',
|
|
||||||
'soc',
|
|
||||||
'kratos',
|
|
||||||
'hydra',
|
|
||||||
'elasticfleet',
|
|
||||||
'elastic-fleet-package-registry',
|
|
||||||
'firewall',
|
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
|
||||||
'healthcheck',
|
|
||||||
'pcap',
|
|
||||||
'suricata',
|
|
||||||
'utility',
|
|
||||||
'schedule',
|
|
||||||
'tcpreplay',
|
|
||||||
'docker_clean'
|
|
||||||
],
|
|
||||||
'so-heavynode': [
|
|
||||||
'ssl',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'pcap',
|
|
||||||
'suricata',
|
|
||||||
'healthcheck',
|
|
||||||
'elasticagent',
|
|
||||||
'schedule',
|
|
||||||
'tcpreplay',
|
|
||||||
'docker_clean'
|
|
||||||
],
|
|
||||||
'so-idh': [
|
|
||||||
'ssl',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'idh',
|
|
||||||
'schedule',
|
|
||||||
'docker_clean'
|
|
||||||
],
|
|
||||||
'so-import': [
|
|
||||||
'salt.master',
|
|
||||||
'ca',
|
|
||||||
'ssl',
|
|
||||||
'registry',
|
|
||||||
'manager',
|
|
||||||
'nginx',
|
|
||||||
'strelka.manager',
|
|
||||||
'soc',
|
|
||||||
'kratos',
|
|
||||||
'hydra',
|
|
||||||
'influxdb',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
|
||||||
'pcap',
|
|
||||||
'utility',
|
|
||||||
'suricata',
|
|
||||||
'zeek',
|
|
||||||
'schedule',
|
|
||||||
'tcpreplay',
|
|
||||||
'docker_clean',
|
|
||||||
'elasticfleet',
|
|
||||||
'elastic-fleet-package-registry'
|
|
||||||
],
|
|
||||||
'so-manager': [
|
|
||||||
'salt.master',
|
|
||||||
'ca',
|
|
||||||
'ssl',
|
|
||||||
'registry',
|
|
||||||
'manager',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'influxdb',
|
|
||||||
'strelka.manager',
|
|
||||||
'soc',
|
|
||||||
'kratos',
|
|
||||||
'hydra',
|
|
||||||
'elasticfleet',
|
|
||||||
'elastic-fleet-package-registry',
|
|
||||||
'firewall',
|
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
|
||||||
'utility',
|
|
||||||
'schedule',
|
|
||||||
'docker_clean',
|
|
||||||
'stig',
|
|
||||||
'kafka'
|
|
||||||
],
|
|
||||||
'so-managersearch': [
|
|
||||||
'salt.master',
|
|
||||||
'ca',
|
|
||||||
'ssl',
|
|
||||||
'registry',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'influxdb',
|
|
||||||
'strelka.manager',
|
|
||||||
'soc',
|
|
||||||
'kratos',
|
|
||||||
'hydra',
|
|
||||||
'elastic-fleet-package-registry',
|
|
||||||
'elasticfleet',
|
|
||||||
'firewall',
|
|
||||||
'manager',
|
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
|
||||||
'utility',
|
|
||||||
'schedule',
|
|
||||||
'docker_clean',
|
|
||||||
'stig',
|
|
||||||
'kafka'
|
|
||||||
],
|
|
||||||
'so-searchnode': [
|
|
||||||
'ssl',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'schedule',
|
|
||||||
'docker_clean',
|
|
||||||
'stig',
|
|
||||||
'kafka.ca',
|
|
||||||
'kafka.ssl'
|
|
||||||
],
|
|
||||||
'so-standalone': [
|
|
||||||
'salt.master',
|
|
||||||
'ca',
|
|
||||||
'ssl',
|
|
||||||
'registry',
|
|
||||||
'manager',
|
|
||||||
'nginx',
|
|
||||||
'telegraf',
|
|
||||||
'influxdb',
|
|
||||||
'soc',
|
|
||||||
'kratos',
|
|
||||||
'hydra',
|
|
||||||
'elastic-fleet-package-registry',
|
|
||||||
'elasticfleet',
|
|
||||||
'firewall',
|
|
||||||
'idstools',
|
|
||||||
'suricata.manager',
|
|
||||||
'pcap',
|
|
||||||
'suricata',
|
|
||||||
'healthcheck',
|
|
||||||
'utility',
|
|
||||||
'schedule',
|
|
||||||
'tcpreplay',
|
|
||||||
'docker_clean',
|
|
||||||
'stig',
|
|
||||||
'kafka'
|
|
||||||
],
|
|
||||||
'so-sensor': [
|
|
||||||
'ssl',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'nginx',
|
|
||||||
'pcap',
|
|
||||||
'suricata',
|
|
||||||
'healthcheck',
|
|
||||||
'schedule',
|
|
||||||
'tcpreplay',
|
|
||||||
'docker_clean',
|
|
||||||
'stig'
|
|
||||||
],
|
|
||||||
'so-fleet': [
|
|
||||||
'ssl',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'logstash',
|
|
||||||
'nginx',
|
|
||||||
'healthcheck',
|
|
||||||
'schedule',
|
|
||||||
'elasticfleet',
|
|
||||||
'docker_clean'
|
|
||||||
],
|
|
||||||
'so-receiver': [
|
|
||||||
'ssl',
|
|
||||||
'telegraf',
|
|
||||||
'firewall',
|
|
||||||
'schedule',
|
|
||||||
'docker_clean',
|
|
||||||
'kafka',
|
|
||||||
'stig'
|
|
||||||
],
|
|
||||||
'so-desktop': [
|
|
||||||
'ssl',
|
|
||||||
'docker_clean',
|
|
||||||
'telegraf',
|
|
||||||
'stig'
|
|
||||||
],
|
|
||||||
}, grain='role') %}
|
|
||||||
|
|
||||||
{%- if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
|
||||||
{% do allowed_states.append('zeek') %}
|
|
||||||
{%- endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-sensor', 'so-eval', 'so-standalone', 'so-heavynode'] %}
|
|
||||||
{% do allowed_states.append('strelka') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-import'] %}
|
|
||||||
{% do allowed_states.append('elasticsearch') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
|
||||||
{% do allowed_states.append('elasticsearch.auth') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
|
||||||
{% do allowed_states.append('kibana') %}
|
|
||||||
{% do allowed_states.append('kibana.secrets') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %}
|
|
||||||
{% do allowed_states.append('elastalert') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-searchnode', 'so-managersearch', 'so-heavynode', 'so-receiver'] %}
|
|
||||||
{% do allowed_states.append('logstash') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if grains.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-receiver', 'so-eval'] %}
|
|
||||||
{% do allowed_states.append('redis') %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{# all nodes on the right salt version can run the following states #}
|
|
||||||
{% do allowed_states.append('common') %}
|
|
||||||
{% do allowed_states.append('patch.os.schedule') %}
|
|
||||||
{% do allowed_states.append('motd') %}
|
|
||||||
{% do allowed_states.append('salt.minion-check') %}
|
|
||||||
{% do allowed_states.append('sensoroni') %}
|
|
||||||
{% do allowed_states.append('salt.lasthighstate') %}
|
|
||||||
|
|
||||||
|
{# Add base states that apply to all roles #}
|
||||||
|
{% for state in base_states %}
|
||||||
|
{% do allowed_states.append(state) %}
|
||||||
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
{# Add airgap state if needed #}
|
||||||
{% if ISAIRGAP %}
|
{% if ISAIRGAP %}
|
||||||
{% do allowed_states.append('airgap') %}
|
{% do allowed_states.append('airgap') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{# all nodes can always run salt.minion state #}
|
|
||||||
{% do allowed_states.append('salt.minion') %}
|
|
||||||
|
|||||||
@@ -11,6 +11,10 @@ TODAY=$(date '+%Y_%m_%d')
|
|||||||
BACKUPDIR={{ DESTINATION }}
|
BACKUPDIR={{ DESTINATION }}
|
||||||
BACKUPFILE="$BACKUPDIR/so-config-backup-$TODAY.tar"
|
BACKUPFILE="$BACKUPDIR/so-config-backup-$TODAY.tar"
|
||||||
MAXBACKUPS=7
|
MAXBACKUPS=7
|
||||||
|
EXCLUSIONS=(
|
||||||
|
"--exclude=/opt/so/saltstack/local/salt/elasticfleet/files/so_agent-installers"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
# Create backup dir if it does not exist
|
# Create backup dir if it does not exist
|
||||||
mkdir -p /nsm/backup
|
mkdir -p /nsm/backup
|
||||||
@@ -23,7 +27,7 @@ if [ ! -f $BACKUPFILE ]; then
|
|||||||
|
|
||||||
# Loop through all paths defined in global.sls, and append them to backup file
|
# Loop through all paths defined in global.sls, and append them to backup file
|
||||||
{%- for LOCATION in BACKUPLOCATIONS %}
|
{%- for LOCATION in BACKUPLOCATIONS %}
|
||||||
tar -rf $BACKUPFILE {{ LOCATION }}
|
tar -rf $BACKUPFILE "${EXCLUSIONS[@]}" {{ LOCATION }}
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -1,4 +1,7 @@
|
|||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
{% set PCAP_BPF_STATUS = 0 %}
|
||||||
|
{% set STENO_BPF_COMPILED = "" %}
|
||||||
|
|
||||||
{% if GLOBALS.pcap_engine == "TRANSITION" %}
|
{% if GLOBALS.pcap_engine == "TRANSITION" %}
|
||||||
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
|
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
|
||||||
{% else %}
|
{% else %}
|
||||||
@@ -8,3 +11,11 @@
|
|||||||
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
|
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
|
||||||
{% set PCAPBPF = BPFMERGED.pcap %}
|
{% set PCAPBPF = BPFMERGED.pcap %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
{% if PCAPBPF %}
|
||||||
|
{% set PCAP_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ PCAPBPF|join(" "), cwd='/root') %}
|
||||||
|
{% if PCAP_BPF_CALC['retcode'] == 0 %}
|
||||||
|
{% set PCAP_BPF_STATUS = 1 %}
|
||||||
|
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|||||||
@@ -1,11 +1,11 @@
|
|||||||
bpf:
|
bpf:
|
||||||
pcap:
|
pcap:
|
||||||
description: List of BPF filters to apply to Stenographer.
|
description: List of BPF filters to apply to the PCAP engine.
|
||||||
multiline: True
|
multiline: True
|
||||||
forcedType: "[]string"
|
forcedType: "[]string"
|
||||||
helpLink: bpf.html
|
helpLink: bpf.html
|
||||||
suricata:
|
suricata:
|
||||||
description: List of BPF filters to apply to Suricata.
|
description: List of BPF filters to apply to Suricata. This will apply to alerts and, if enabled, to metadata and PCAP logs generated by Suricata.
|
||||||
multiline: True
|
multiline: True
|
||||||
forcedType: "[]string"
|
forcedType: "[]string"
|
||||||
helpLink: bpf.html
|
helpLink: bpf.html
|
||||||
|
|||||||
@@ -1,7 +1,16 @@
|
|||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||||
|
{% set SURICATA_BPF_STATUS = 0 %}
|
||||||
{% import 'bpf/macros.jinja' as MACROS %}
|
{% import 'bpf/macros.jinja' as MACROS %}
|
||||||
|
|
||||||
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
|
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
|
||||||
|
|
||||||
{% set SURICATABPF = BPFMERGED.suricata %}
|
{% set SURICATABPF = BPFMERGED.suricata %}
|
||||||
|
|
||||||
|
{% if SURICATABPF %}
|
||||||
|
{% set SURICATA_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ SURICATABPF|join(" "), cwd='/root') %}
|
||||||
|
{% if SURICATA_BPF_CALC['retcode'] == 0 %}
|
||||||
|
{% set SURICATA_BPF_STATUS = 1 %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|||||||
@@ -1,7 +1,16 @@
|
|||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||||
|
{% set ZEEK_BPF_STATUS = 0 %}
|
||||||
{% import 'bpf/macros.jinja' as MACROS %}
|
{% import 'bpf/macros.jinja' as MACROS %}
|
||||||
|
|
||||||
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
|
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
|
||||||
|
|
||||||
{% set ZEEKBPF = BPFMERGED.zeek %}
|
{% set ZEEKBPF = BPFMERGED.zeek %}
|
||||||
|
|
||||||
|
{% if ZEEKBPF %}
|
||||||
|
{% set ZEEK_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ ZEEKBPF|join(" "), cwd='/root') %}
|
||||||
|
{% if ZEEK_BPF_CALC['retcode'] == 0 %}
|
||||||
|
{% set ZEEK_BPF_STATUS = 1 %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|||||||
21
salt/common/grains.sls
Normal file
21
salt/common/grains.sls
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% set nsm_exists = salt['file.directory_exists']('/nsm') %}
|
||||||
|
{% if nsm_exists %}
|
||||||
|
{% set nsm_total = salt['cmd.shell']('df -BG /nsm | tail -1 | awk \'{print $2}\'') %}
|
||||||
|
|
||||||
|
nsm_total:
|
||||||
|
grains.present:
|
||||||
|
- name: nsm_total
|
||||||
|
- value: {{ nsm_total }}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
nsm_missing:
|
||||||
|
test.succeed_without_changes:
|
||||||
|
- name: /nsm does not exist, skipping grain assignment
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -4,6 +4,7 @@
|
|||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
|
||||||
include:
|
include:
|
||||||
|
- common.grains
|
||||||
- common.packages
|
- common.packages
|
||||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||||
- manager.elasticsearch # needed for elastic_curl_config state
|
- manager.elasticsearch # needed for elastic_curl_config state
|
||||||
@@ -106,7 +107,7 @@ Etc/UTC:
|
|||||||
timezone.system
|
timezone.system
|
||||||
|
|
||||||
# Sync curl configuration for Elasticsearch authentication
|
# Sync curl configuration for Elasticsearch authentication
|
||||||
{% if GLOBALS.role in ['so-eval', 'so-heavynode', 'so-import', 'so-manager', 'so-managersearch', 'so-searchnode', 'so-standalone'] %}
|
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-searchnode'] %}
|
||||||
elastic_curl_config:
|
elastic_curl_config:
|
||||||
file.managed:
|
file.managed:
|
||||||
- name: /opt/so/conf/elasticsearch/curl.config
|
- name: /opt/so/conf/elasticsearch/curl.config
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
# we cannot import GLOBALS from vars/globals.map.jinja in this state since it is called in setup.virt.init
|
||||||
|
# since it is early in setup of a new VM, the pillars imported in GLOBALS are not yet defined
|
||||||
{% if GLOBALS.os_family == 'Debian' %}
|
{% if grains.os_family == 'Debian' %}
|
||||||
commonpkgs:
|
commonpkgs:
|
||||||
pkg.installed:
|
pkg.installed:
|
||||||
- skip_suggestions: True
|
- skip_suggestions: True
|
||||||
@@ -46,7 +46,7 @@ python-rich:
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if GLOBALS.os_family == 'RedHat' %}
|
{% if grains.os_family == 'RedHat' %}
|
||||||
|
|
||||||
remove_mariadb:
|
remove_mariadb:
|
||||||
pkg.removed:
|
pkg.removed:
|
||||||
|
|||||||
@@ -29,9 +29,26 @@ fi
|
|||||||
|
|
||||||
interface="$1"
|
interface="$1"
|
||||||
shift
|
shift
|
||||||
tcpdump -i $interface -ddd $@ | tail -n+2 |
|
|
||||||
while read line; do
|
# Capture tcpdump output and exit code
|
||||||
|
tcpdump_output=$(tcpdump -i "$interface" -ddd "$@" 2>&1)
|
||||||
|
tcpdump_exit=$?
|
||||||
|
|
||||||
|
if [ $tcpdump_exit -ne 0 ]; then
|
||||||
|
echo "$tcpdump_output" >&2
|
||||||
|
exit $tcpdump_exit
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Process the output, skipping the first line
|
||||||
|
echo "$tcpdump_output" | tail -n+2 | while read -r line; do
|
||||||
cols=( $line )
|
cols=( $line )
|
||||||
printf "%04x%02x%02x%08x" ${cols[0]} ${cols[1]} ${cols[2]} ${cols[3]}
|
printf "%04x%02x%02x%08x" "${cols[0]}" "${cols[1]}" "${cols[2]}" "${cols[3]}"
|
||||||
done
|
done
|
||||||
|
|
||||||
|
# Check if the pipeline succeeded
|
||||||
|
if [ "${PIPESTATUS[0]}" -ne 0 ]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
|
exit 0
|
||||||
|
|||||||
@@ -99,6 +99,17 @@ add_interface_bond0() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
airgap_playbooks() {
|
||||||
|
SRC_DIR=$1
|
||||||
|
# Copy playbooks if using airgap
|
||||||
|
mkdir -p /nsm/airgap-resources
|
||||||
|
# Purge old airgap playbooks to ensure SO only uses the latest released playbooks
|
||||||
|
rm -fr /nsm/airgap-resources/playbooks
|
||||||
|
tar xf $SRC_DIR/airgap-resources/playbooks.tgz -C /nsm/airgap-resources/
|
||||||
|
chown -R socore:socore /nsm/airgap-resources/playbooks
|
||||||
|
git config --global --add safe.directory /nsm/airgap-resources/playbooks
|
||||||
|
}
|
||||||
|
|
||||||
check_container() {
|
check_container() {
|
||||||
docker ps | grep "$1:" > /dev/null 2>&1
|
docker ps | grep "$1:" > /dev/null 2>&1
|
||||||
return $?
|
return $?
|
||||||
@@ -209,12 +220,22 @@ compare_es_versions() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
copy_new_files() {
|
copy_new_files() {
|
||||||
|
# Define files to exclude from deletion (relative to their respective base directories)
|
||||||
|
local EXCLUDE_FILES=(
|
||||||
|
"salt/hypervisor/soc_hypervisor.yaml"
|
||||||
|
)
|
||||||
|
|
||||||
|
# Build rsync exclude arguments
|
||||||
|
local EXCLUDE_ARGS=()
|
||||||
|
for file in "${EXCLUDE_FILES[@]}"; do
|
||||||
|
EXCLUDE_ARGS+=(--exclude="$file")
|
||||||
|
done
|
||||||
|
|
||||||
# Copy new files over to the salt dir
|
# Copy new files over to the salt dir
|
||||||
cd $UPDATE_DIR
|
cd $UPDATE_DIR
|
||||||
rsync -a salt $DEFAULT_SALT_DIR/ --delete
|
rsync -a salt $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
||||||
rsync -a pillar $DEFAULT_SALT_DIR/ --delete
|
rsync -a pillar $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
||||||
chown -R socore:socore $DEFAULT_SALT_DIR/
|
chown -R socore:socore $DEFAULT_SALT_DIR/
|
||||||
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
|
|
||||||
cd /tmp
|
cd /tmp
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -299,7 +320,8 @@ fail() {
|
|||||||
|
|
||||||
get_agent_count() {
|
get_agent_count() {
|
||||||
if [ -f /opt/so/log/agents/agentstatus.log ]; then
|
if [ -f /opt/so/log/agents/agentstatus.log ]; then
|
||||||
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}')
|
AGENTCOUNT=$(cat /opt/so/log/agents/agentstatus.log | grep -wF active | awk '{print $2}' | sed 's/,//')
|
||||||
|
[[ -z "$AGENTCOUNT" ]] && AGENTCOUNT="0"
|
||||||
else
|
else
|
||||||
AGENTCOUNT=0
|
AGENTCOUNT=0
|
||||||
fi
|
fi
|
||||||
@@ -373,7 +395,7 @@ is_manager_node() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
is_sensor_node() {
|
is_sensor_node() {
|
||||||
# Check to see if this is a sensor (forward) node
|
# Check to see if this is a sensor node
|
||||||
is_single_node_grid && return 0
|
is_single_node_grid && return 0
|
||||||
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null
|
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null
|
||||||
}
|
}
|
||||||
@@ -429,8 +451,7 @@ lookup_grain() {
|
|||||||
|
|
||||||
lookup_role() {
|
lookup_role() {
|
||||||
id=$(lookup_grain id)
|
id=$(lookup_grain id)
|
||||||
pieces=($(echo $id | tr '_' ' '))
|
echo "${id##*_}"
|
||||||
echo ${pieces[1]}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
is_feature_enabled() {
|
is_feature_enabled() {
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ def check_for_fps():
|
|||||||
result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE)
|
result = subprocess.run([feat_full + '-mode-setup', '--is-enabled'], stdout=subprocess.PIPE)
|
||||||
if result.returncode == 0:
|
if result.returncode == 0:
|
||||||
fps = 1
|
fps = 1
|
||||||
except FileNotFoundError:
|
except:
|
||||||
fn = '/proc/sys/crypto/' + feat_full + '_enabled'
|
fn = '/proc/sys/crypto/' + feat_full + '_enabled'
|
||||||
try:
|
try:
|
||||||
with open(fn, 'r') as f:
|
with open(fn, 'r') as f:
|
||||||
|
|||||||
@@ -62,8 +62,6 @@ container_list() {
|
|||||||
"so-soc"
|
"so-soc"
|
||||||
"so-steno"
|
"so-steno"
|
||||||
"so-strelka-backend"
|
"so-strelka-backend"
|
||||||
"so-strelka-filestream"
|
|
||||||
"so-strelka-frontend"
|
|
||||||
"so-strelka-manager"
|
"so-strelka-manager"
|
||||||
"so-suricata"
|
"so-suricata"
|
||||||
"so-telegraf"
|
"so-telegraf"
|
||||||
|
|||||||
@@ -158,6 +158,8 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
|||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding index lifecycle policy" # false positive (elasticsearch policy names contain 'error')
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding index lifecycle policy" # false positive (elasticsearch policy names contain 'error')
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
|
||||||
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
|
||||||
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||||
@@ -220,6 +222,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
|||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
||||||
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
|
||||||
fi
|
fi
|
||||||
|
|
||||||
RESULT=0
|
RESULT=0
|
||||||
@@ -266,6 +269,13 @@ for log_file in $(cat /tmp/log_check_files); do
|
|||||||
tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check
|
tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check
|
||||||
check_for_errors
|
check_for_errors
|
||||||
done
|
done
|
||||||
|
# Look for OOM specific errors in /var/log/messages which can lead to odd behavior / test failures
|
||||||
|
if [[ -f /var/log/messages ]]; then
|
||||||
|
status "Checking log file /var/log/messages"
|
||||||
|
if journalctl --since "24 hours ago" | grep -iE 'out of memory|oom-kill'; then
|
||||||
|
RESULT=1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# Cleanup temp files
|
# Cleanup temp files
|
||||||
rm -f /tmp/log_check_files
|
rm -f /tmp/log_check_files
|
||||||
|
|||||||
53
salt/common/tools/sbin/so_logging_utils.py
Normal file
53
salt/common/tools/sbin/so_logging_utils.py
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
#!/usr/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def setup_logging(logger_name, log_file_path, log_level=logging.INFO, format_str='%(asctime)s - %(levelname)s - %(message)s'):
|
||||||
|
"""
|
||||||
|
Sets up logging for a script.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
logger_name (str): The name of the logger.
|
||||||
|
log_file_path (str): The file path for the log file.
|
||||||
|
log_level (int): The logging level (e.g., logging.INFO, logging.DEBUG).
|
||||||
|
format_str (str): The format string for log messages.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
logging.Logger: Configured logger object.
|
||||||
|
"""
|
||||||
|
logger = logging.getLogger(logger_name)
|
||||||
|
logger.setLevel(log_level)
|
||||||
|
|
||||||
|
# Create directory for log file if it doesn't exist
|
||||||
|
log_file_dir = os.path.dirname(log_file_path)
|
||||||
|
if log_file_dir and not os.path.exists(log_file_dir):
|
||||||
|
try:
|
||||||
|
os.makedirs(log_file_dir)
|
||||||
|
except OSError as e:
|
||||||
|
print(f"Error creating directory {log_file_dir}: {e}")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
# Create handlers
|
||||||
|
c_handler = logging.StreamHandler()
|
||||||
|
f_handler = logging.FileHandler(log_file_path)
|
||||||
|
c_handler.setLevel(log_level)
|
||||||
|
f_handler.setLevel(log_level)
|
||||||
|
|
||||||
|
# Create formatter and add it to handlers
|
||||||
|
formatter = logging.Formatter(format_str)
|
||||||
|
c_handler.setFormatter(formatter)
|
||||||
|
f_handler.setFormatter(formatter)
|
||||||
|
|
||||||
|
# Add handlers to the logger if they are not already added
|
||||||
|
if not logger.hasHandlers():
|
||||||
|
logger.addHandler(c_handler)
|
||||||
|
logger.addHandler(f_handler)
|
||||||
|
|
||||||
|
return logger
|
||||||
@@ -173,7 +173,7 @@ for PCAP in $INPUT_FILES; do
|
|||||||
status "- assigning unique identifier to import: $HASH"
|
status "- assigning unique identifier to import: $HASH"
|
||||||
|
|
||||||
pcap_data=$(pcapinfo "${PCAP}")
|
pcap_data=$(pcapinfo "${PCAP}")
|
||||||
if ! echo "$pcap_data" | grep -q "First packet time:" || echo "$pcap_data" |egrep -q "Last packet time: 1970-01-01|Last packet time: n/a"; then
|
if ! echo "$pcap_data" | grep -q "Earliest packet time:" || echo "$pcap_data" |egrep -q "Latest packet time: 1970-01-01|Latest packet time: n/a"; then
|
||||||
status "- this PCAP file is invalid; skipping"
|
status "- this PCAP file is invalid; skipping"
|
||||||
INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1))
|
INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1))
|
||||||
else
|
else
|
||||||
@@ -205,8 +205,8 @@ for PCAP in $INPUT_FILES; do
|
|||||||
HASHES="${HASHES} ${HASH}"
|
HASHES="${HASHES} ${HASH}"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}')
|
START=$(pcapinfo "${PCAP}" -a |grep "Earliest packet time:" | awk '{print $4}')
|
||||||
END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}')
|
END=$(pcapinfo "${PCAP}" -e |grep "Latest packet time:" | awk '{print $4}')
|
||||||
status "- found PCAP data spanning dates $START through $END"
|
status "- found PCAP data spanning dates $START through $END"
|
||||||
|
|
||||||
# compare $START to $START_OLDEST
|
# compare $START to $START_OLDEST
|
||||||
@@ -248,7 +248,7 @@ fi
|
|||||||
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
|
START_OLDEST_SLASH=$(echo $START_OLDEST | sed -e 's/-/%2F/g')
|
||||||
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||||
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
|
if [[ $VALID_PCAPS_COUNT -gt 0 ]] || [[ $SKIPPED_PCAPS_COUNT -gt 0 ]]; then
|
||||||
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source_geo.organization_name%20source.geo.country_name%20%7C%20groupby%20destination_geo.organization_name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
|
URL="https://{{ URLBASE }}/#/dashboards?q=$HASH_FILTERS%20%7C%20groupby%20event.module*%20%7C%20groupby%20-sankey%20event.module*%20event.dataset%20%7C%20groupby%20event.dataset%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20network.protocol%20%7C%20groupby%20rule.name%20rule.category%20event.severity_label%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20http.virtual_host%20http.uri%20%7C%20groupby%20notice.note%20notice.message%20notice.sub_message%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source.as.organization.name%20source.geo.country_name%20%7C%20groupby%20destination.as.organization.name%20destination.geo.country_name&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC"
|
||||||
|
|
||||||
status "Import complete!"
|
status "Import complete!"
|
||||||
status
|
status
|
||||||
|
|||||||
@@ -0,0 +1,132 @@
|
|||||||
|
#!/opt/saltstack/salt/bin/python3
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||||
|
#
|
||||||
|
# "You may not move, change, disable, or circumvent the license key functionality
|
||||||
|
# in the software, and you may not remove or obscure any functionality in the
|
||||||
|
# software that is protected by the license key."
|
||||||
|
|
||||||
|
{% if 'vrt' in salt['pillar.get']('features', []) -%}
|
||||||
|
|
||||||
|
"""
|
||||||
|
Script for emitting VM deployment status events to the Salt event bus.
|
||||||
|
|
||||||
|
This script provides functionality to emit status events for VM deployment operations,
|
||||||
|
used by various Security Onion VM management tools.
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
so-salt-emit-vm-deployment-status-event -v <vm_name> -H <hypervisor> -s <status>
|
||||||
|
|
||||||
|
Arguments:
|
||||||
|
-v, --vm-name Name of the VM (hostname_role)
|
||||||
|
-H, --hypervisor Name of the hypervisor
|
||||||
|
-s, --status Current deployment status of the VM
|
||||||
|
|
||||||
|
Example:
|
||||||
|
so-salt-emit-vm-deployment-status-event -v sensor1_sensor -H hypervisor1 -s "Creating"
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
import logging
|
||||||
|
import salt.client
|
||||||
|
from typing import Dict, Any
|
||||||
|
|
||||||
|
# Configure logging
|
||||||
|
logging.basicConfig(
|
||||||
|
level=logging.INFO,
|
||||||
|
format='%(asctime)s - %(levelname)s - %(message)s'
|
||||||
|
)
|
||||||
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
def emit_event(vm_name: str, hypervisor: str, status: str) -> bool:
|
||||||
|
"""
|
||||||
|
Emit a VM deployment status event to the salt event bus.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
vm_name: Name of the VM (hostname_role)
|
||||||
|
hypervisor: Name of the hypervisor
|
||||||
|
status: Current deployment status of the VM
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
bool: True if event was sent successfully, False otherwise
|
||||||
|
|
||||||
|
Raises:
|
||||||
|
ValueError: If status is not a valid deployment status
|
||||||
|
"""
|
||||||
|
log.info("Attempting to emit deployment event...")
|
||||||
|
|
||||||
|
try:
|
||||||
|
caller = salt.client.Caller()
|
||||||
|
event_data = {
|
||||||
|
'vm_name': vm_name,
|
||||||
|
'hypervisor': hypervisor,
|
||||||
|
'status': status
|
||||||
|
}
|
||||||
|
|
||||||
|
# Use consistent event tag structure
|
||||||
|
event_tag = f'soc/dyanno/hypervisor/{status.lower()}'
|
||||||
|
|
||||||
|
ret = caller.cmd(
|
||||||
|
'event.send',
|
||||||
|
event_tag,
|
||||||
|
event_data
|
||||||
|
)
|
||||||
|
|
||||||
|
if not ret:
|
||||||
|
log.error("Failed to emit VM deployment status event: %s", event_data)
|
||||||
|
return False
|
||||||
|
|
||||||
|
log.info("Successfully emitted VM deployment status event: %s", event_data)
|
||||||
|
return True
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Error emitting VM deployment status event: %s", str(e))
|
||||||
|
return False
|
||||||
|
|
||||||
|
def parse_args():
|
||||||
|
"""Parse command line arguments."""
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description='Emit VM deployment status events to the Salt event bus.'
|
||||||
|
)
|
||||||
|
parser.add_argument('-v', '--vm-name', required=True,
|
||||||
|
help='Name of the VM (hostname_role)')
|
||||||
|
parser.add_argument('-H', '--hypervisor', required=True,
|
||||||
|
help='Name of the hypervisor')
|
||||||
|
parser.add_argument('-s', '--status', required=True,
|
||||||
|
help='Current deployment status of the VM')
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
def main():
|
||||||
|
"""Main entry point for the script."""
|
||||||
|
try:
|
||||||
|
args = parse_args()
|
||||||
|
|
||||||
|
success = emit_event(
|
||||||
|
vm_name=args.vm_name,
|
||||||
|
hypervisor=args.hypervisor,
|
||||||
|
status=args.status
|
||||||
|
)
|
||||||
|
|
||||||
|
if not success:
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
log.error("Failed to emit status event: %s", str(e))
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
||||||
|
{%- else -%}
|
||||||
|
|
||||||
|
echo "Hypervisor nodes are a feature supported only for customers with a valid license. \
|
||||||
|
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com \
|
||||||
|
for more information about purchasing a license to enable this feature."
|
||||||
|
|
||||||
|
{% endif -%}
|
||||||
@@ -9,3 +9,6 @@ fleetartifactdir:
|
|||||||
- user: 947
|
- user: 947
|
||||||
- group: 939
|
- group: 939
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
- recurse:
|
||||||
|
- user
|
||||||
|
- group
|
||||||
|
|||||||
@@ -9,6 +9,9 @@
|
|||||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||||
{% set node_data = salt['pillar.get']('node_data') %}
|
{% set node_data = salt['pillar.get']('node_data') %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- elasticfleet.artifact_registry
|
||||||
|
|
||||||
# Add EA Group
|
# Add EA Group
|
||||||
elasticfleetgroup:
|
elasticfleetgroup:
|
||||||
group.present:
|
group.present:
|
||||||
@@ -166,7 +169,7 @@ eaoptionalintegrationsdir:
|
|||||||
|
|
||||||
{% for minion in node_data %}
|
{% for minion in node_data %}
|
||||||
{% set role = node_data[minion]["role"] %}
|
{% set role = node_data[minion]["role"] %}
|
||||||
{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %}
|
{% if role in [ "eval","fleet","heavynode","import","manager", "managerhype", "managersearch","standalone" ] %}
|
||||||
{% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %}
|
{% set optional_integrations = ELASTICFLEETMERGED.optional_integrations %}
|
||||||
{% set integration_keys = optional_integrations.keys() %}
|
{% set integration_keys = optional_integrations.keys() %}
|
||||||
fleet_server_integrations_{{ minion }}:
|
fleet_server_integrations_{{ minion }}:
|
||||||
|
|||||||
@@ -15,7 +15,6 @@ elasticfleet:
|
|||||||
logging:
|
logging:
|
||||||
zeek:
|
zeek:
|
||||||
excluded:
|
excluded:
|
||||||
- analyzer
|
|
||||||
- broker
|
- broker
|
||||||
- capture_loss
|
- capture_loss
|
||||||
- cluster
|
- cluster
|
||||||
@@ -38,6 +37,7 @@ elasticfleet:
|
|||||||
- elasticsearch
|
- elasticsearch
|
||||||
- endpoint
|
- endpoint
|
||||||
- fleet_server
|
- fleet_server
|
||||||
|
- filestream
|
||||||
- http_endpoint
|
- http_endpoint
|
||||||
- httpjson
|
- httpjson
|
||||||
- log
|
- log
|
||||||
|
|||||||
@@ -32,6 +32,16 @@ so-elastic-fleet-auto-configure-logstash-outputs:
|
|||||||
- retry:
|
- retry:
|
||||||
attempts: 4
|
attempts: 4
|
||||||
interval: 30
|
interval: 30
|
||||||
|
|
||||||
|
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
||||||
|
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-outputs-update --force --certs
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
- onchanges:
|
||||||
|
- x509: etc_elasticfleet_logstash_crt
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||||
@@ -67,6 +77,8 @@ so-elastic-fleet-auto-configure-artifact-urls:
|
|||||||
elasticagent_syncartifacts:
|
elasticagent_syncartifacts:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /nsm/elastic-fleet/artifacts/beats
|
- name: /nsm/elastic-fleet/artifacts/beats
|
||||||
|
- user: 947
|
||||||
|
- group: 947
|
||||||
- source: salt://beats
|
- source: salt://beats
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
@@ -133,12 +145,18 @@ so-elastic-fleet-package-statefile:
|
|||||||
so-elastic-fleet-package-upgrade:
|
so-elastic-fleet-package-upgrade:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: /usr/sbin/so-elastic-fleet-package-upgrade
|
- name: /usr/sbin/so-elastic-fleet-package-upgrade
|
||||||
|
- retry:
|
||||||
|
attempts: 3
|
||||||
|
interval: 10
|
||||||
- onchanges:
|
- onchanges:
|
||||||
- file: /opt/so/state/elastic_fleet_packages.txt
|
- file: /opt/so/state/elastic_fleet_packages.txt
|
||||||
|
|
||||||
so-elastic-fleet-integrations:
|
so-elastic-fleet-integrations:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
|
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
|
||||||
|
- retry:
|
||||||
|
attempts: 3
|
||||||
|
interval: 10
|
||||||
|
|
||||||
so-elastic-agent-grid-upgrade:
|
so-elastic-agent-grid-upgrade:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
@@ -150,7 +168,11 @@ so-elastic-agent-grid-upgrade:
|
|||||||
so-elastic-fleet-integration-upgrade:
|
so-elastic-fleet-integration-upgrade:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
|
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
|
||||||
|
- retry:
|
||||||
|
attempts: 3
|
||||||
|
interval: 10
|
||||||
|
|
||||||
|
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
|
||||||
so-elastic-fleet-addon-integrations:
|
so-elastic-fleet-addon-integrations:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
|
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
|
||||||
|
|||||||
@@ -1,32 +1,33 @@
|
|||||||
{
|
{
|
||||||
"name": "elastic-defend-endpoints",
|
"name": "elastic-defend-endpoints",
|
||||||
"namespace": "default",
|
"namespace": "default",
|
||||||
"description": "",
|
"description": "",
|
||||||
"package": {
|
"package": {
|
||||||
"name": "endpoint",
|
"name": "endpoint",
|
||||||
"title": "Elastic Defend",
|
"title": "Elastic Defend",
|
||||||
"version": "8.17.0",
|
"version": "8.18.1",
|
||||||
"requires_root": true
|
"requires_root": true
|
||||||
},
|
},
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"policy_id": "endpoints-initial",
|
"policy_ids": [
|
||||||
"vars": {},
|
"endpoints-initial"
|
||||||
"inputs": [
|
],
|
||||||
{
|
"vars": {},
|
||||||
"type": "endpoint",
|
"inputs": [
|
||||||
"enabled": true,
|
{
|
||||||
"config": {
|
"type": "ENDPOINT_INTEGRATION_CONFIG",
|
||||||
"integration_config": {
|
"enabled": true,
|
||||||
"value": {
|
"config": {
|
||||||
"type": "endpoint",
|
"_config": {
|
||||||
"endpointConfig": {
|
"value": {
|
||||||
"preset": "DataCollection"
|
"type": "endpoint",
|
||||||
}
|
"endpointConfig": {
|
||||||
}
|
"preset": "DataCollection"
|
||||||
}
|
}
|
||||||
},
|
}
|
||||||
"streams": []
|
}
|
||||||
}
|
},
|
||||||
]
|
"streams": []
|
||||||
}
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -0,0 +1,48 @@
|
|||||||
|
{
|
||||||
|
"package": {
|
||||||
|
"name": "filestream",
|
||||||
|
"version": ""
|
||||||
|
},
|
||||||
|
"name": "agent-monitor",
|
||||||
|
"namespace": "",
|
||||||
|
"description": "",
|
||||||
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"output_id": null,
|
||||||
|
"vars": {},
|
||||||
|
"inputs": {
|
||||||
|
"filestream-filestream": {
|
||||||
|
"enabled": true,
|
||||||
|
"streams": {
|
||||||
|
"filestream.generic": {
|
||||||
|
"enabled": true,
|
||||||
|
"vars": {
|
||||||
|
"paths": [
|
||||||
|
"/opt/so/log/agents/agent-monitor.log"
|
||||||
|
],
|
||||||
|
"data_stream.dataset": "agentmonitor",
|
||||||
|
"pipeline": "elasticagent.monitor",
|
||||||
|
"parsers": "",
|
||||||
|
"exclude_files": [
|
||||||
|
"\\.gz$"
|
||||||
|
],
|
||||||
|
"include_files": [],
|
||||||
|
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: gridmetrics",
|
||||||
|
"tags": [],
|
||||||
|
"recursive_glob": true,
|
||||||
|
"ignore_older": "72h",
|
||||||
|
"clean_inactive": -1,
|
||||||
|
"harvester_limit": 0,
|
||||||
|
"fingerprint": true,
|
||||||
|
"fingerprint_offset": 0,
|
||||||
|
"fingerprint_length": 64,
|
||||||
|
"file_identity_native": false,
|
||||||
|
"exclude_lines": [],
|
||||||
|
"include_lines": []
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -40,7 +40,7 @@
|
|||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/opt/so/log/elasticsearch/*.log"
|
"/opt/so/log/elasticsearch/*.json"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
@@ -19,7 +19,7 @@
|
|||||||
],
|
],
|
||||||
"data_stream.dataset": "idh",
|
"data_stream.dataset": "idh",
|
||||||
"tags": [],
|
"tags": [],
|
||||||
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- drop_fields:\n when:\n equals:\n logtype: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
||||||
"custom": "pipeline: common"
|
"custom": "pipeline: common"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,7 +20,7 @@
|
|||||||
],
|
],
|
||||||
"data_stream.dataset": "import",
|
"data_stream.dataset": "import",
|
||||||
"custom": "",
|
"custom": "",
|
||||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-1.67.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-2.5.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-1.67.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-1.67.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-2.5.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||||
"tags": [
|
"tags": [
|
||||||
"import"
|
"import"
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -11,7 +11,7 @@
|
|||||||
"tcp-tcp": {
|
"tcp-tcp": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"tcp.generic": {
|
"tcp.tcp": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"listen_address": "0.0.0.0",
|
"listen_address": "0.0.0.0",
|
||||||
@@ -23,7 +23,8 @@
|
|||||||
"syslog"
|
"syslog"
|
||||||
],
|
],
|
||||||
"syslog_options": "field: message\n#format: auto\n#timezone: Local",
|
"syslog_options": "field: message\n#format: auto\n#timezone: Local",
|
||||||
"ssl": ""
|
"ssl": "",
|
||||||
|
"custom": ""
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,26 +2,30 @@
|
|||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||||
# this file except in compliance with the Elastic License 2.0.
|
# this file except in compliance with the Elastic License 2.0.
|
||||||
|
|
||||||
{%- set GRIDNODETOKENGENERAL = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
|
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
|
||||||
{%- set GRIDNODETOKENHEAVY = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
|
{% if grains.role == 'so-heavynode' %}
|
||||||
|
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
|
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
|
||||||
{% if not AGENT_STATUS %}
|
{% if not AGENT_STATUS %}
|
||||||
|
|
||||||
{% if grains.role not in ['so-heavynode'] %}
|
pull_agent_installer:
|
||||||
run_installer:
|
file.managed:
|
||||||
cmd.script:
|
- name: /opt/so/so-elastic-agent_linux_amd64
|
||||||
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
- source: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||||
- cwd: /opt/so
|
- mode: 755
|
||||||
- args: -token={{ GRIDNODETOKENGENERAL }}
|
- makedirs: True
|
||||||
- retry: True
|
|
||||||
{% else %}
|
|
||||||
run_installer:
|
|
||||||
cmd.script:
|
|
||||||
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
|
||||||
- cwd: /opt/so
|
|
||||||
- args: -token={{ GRIDNODETOKENHEAVY }}
|
|
||||||
- retry: True
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
|
run_installer:
|
||||||
|
cmd.run:
|
||||||
|
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }}
|
||||||
|
- cwd: /opt/so
|
||||||
|
- retry:
|
||||||
|
attempts: 3
|
||||||
|
interval: 20
|
||||||
|
|
||||||
|
cleanup_agent_installer:
|
||||||
|
file.absent:
|
||||||
|
- name: /opt/so/so-elastic-agent_linux_amd64
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -4,6 +4,7 @@
|
|||||||
|
|
||||||
|
|
||||||
{% import_json '/opt/so/state/esfleet_package_components.json' as ADDON_PACKAGE_COMPONENTS %}
|
{% import_json '/opt/so/state/esfleet_package_components.json' as ADDON_PACKAGE_COMPONENTS %}
|
||||||
|
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
|
||||||
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||||
|
|
||||||
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
|
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
|
||||||
@@ -14,6 +15,7 @@
|
|||||||
'awsfirehose.logs': 'awsfirehose',
|
'awsfirehose.logs': 'awsfirehose',
|
||||||
'awsfirehose.metrics': 'aws.cloudwatch',
|
'awsfirehose.metrics': 'aws.cloudwatch',
|
||||||
'cribl.logs': 'cribl',
|
'cribl.logs': 'cribl',
|
||||||
|
'cribl.metrics': 'cribl',
|
||||||
'sentinel_one_cloud_funnel.logins': 'sentinel_one_cloud_funnel.login',
|
'sentinel_one_cloud_funnel.logins': 'sentinel_one_cloud_funnel.login',
|
||||||
'azure_application_insights.app_insights': 'azure.app_insights',
|
'azure_application_insights.app_insights': 'azure.app_insights',
|
||||||
'azure_application_insights.app_state': 'azure.app_state',
|
'azure_application_insights.app_state': 'azure.app_state',
|
||||||
@@ -45,7 +47,10 @@
|
|||||||
'synthetics.browser_screenshot': 'synthetics-browser.screenshot',
|
'synthetics.browser_screenshot': 'synthetics-browser.screenshot',
|
||||||
'synthetics.http': 'synthetics-http',
|
'synthetics.http': 'synthetics-http',
|
||||||
'synthetics.icmp': 'synthetics-icmp',
|
'synthetics.icmp': 'synthetics-icmp',
|
||||||
'synthetics.tcp': 'synthetics-tcp'
|
'synthetics.tcp': 'synthetics-tcp',
|
||||||
|
'swimlane.swimlane_api': 'swimlane.api',
|
||||||
|
'swimlane.tenant_api': 'swimlane.tenant',
|
||||||
|
'swimlane.turbine_api': 'turbine.api'
|
||||||
} %}
|
} %}
|
||||||
|
|
||||||
{% for pkg in ADDON_PACKAGE_COMPONENTS %}
|
{% for pkg in ADDON_PACKAGE_COMPONENTS %}
|
||||||
@@ -62,70 +67,90 @@
|
|||||||
{% else %}
|
{% else %}
|
||||||
{% set integration_type = "" %}
|
{% set integration_type = "" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% set component_name = pkg.name ~ "." ~ pattern.title %}
|
{% set component_name = pkg.name ~ "." ~ pattern.title %}
|
||||||
{# fix weirdly named components #}
|
{% set index_pattern = pattern.name %}
|
||||||
{% if component_name in WEIRD_INTEGRATIONS %}
|
|
||||||
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
{# fix weirdly named components #}
|
||||||
{% endif %}
|
{% if component_name in WEIRD_INTEGRATIONS %}
|
||||||
|
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{# create duplicate of component_name, so we can split generics from @custom component templates in the index template below and overwrite the default @package when needed
|
||||||
|
eg. having to replace unifiedlogs.generic@package with filestream.generic@package, but keep the ability to customize unifiedlogs.generic@custom and its ILM policy #}
|
||||||
|
{% set custom_component_name = component_name %}
|
||||||
|
|
||||||
|
{# duplicate integration_type to assist with sometimes needing to overwrite component templates with 'logs-filestream.generic@package' (there is no metrics-filestream.generic@package) #}
|
||||||
|
{% set generic_integration_type = integration_type %}
|
||||||
|
|
||||||
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
||||||
{% set component_name_x = component_name.replace(".","_x_") %}
|
{% set component_name_x = component_name.replace(".","_x_") %}
|
||||||
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
||||||
{% set integration_key = "so-" ~ integration_type ~ component_name_x %}
|
{% set integration_key = "so-" ~ integration_type ~ component_name_x %}
|
||||||
|
|
||||||
|
{# if its a .generic template make sure that a .generic@package for the integration exists. Else default to logs-filestream.generic@package #}
|
||||||
|
{% if ".generic" in component_name and integration_type ~ component_name ~ "@package" not in INSTALLED_COMPONENT_TEMPLATES %}
|
||||||
|
{# these generic templates by default are directed to index_pattern of 'logs-generic-*', overwrite that here to point to eg gcp_pubsub.generic-* #}
|
||||||
|
{% set index_pattern = integration_type ~ component_name ~ "-*" %}
|
||||||
|
{# includes use of .generic component template, but it doesn't exist in installed component templates. Redirect it to filestream.generic@package #}
|
||||||
|
{% set component_name = "filestream.generic" %}
|
||||||
|
{% set generic_integration_type = "logs-" %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{# Default integration settings #}
|
{# Default integration settings #}
|
||||||
{% set integration_defaults = {
|
{% set integration_defaults = {
|
||||||
"index_sorting": false,
|
"index_sorting": false,
|
||||||
"index_template": {
|
"index_template": {
|
||||||
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
"composed_of": [generic_integration_type ~ component_name ~ "@package", integration_type ~ custom_component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||||
"data_stream": {
|
"data_stream": {
|
||||||
"allow_custom_routing": false,
|
"allow_custom_routing": false,
|
||||||
"hidden": false
|
"hidden": false
|
||||||
},
|
},
|
||||||
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
|
"ignore_missing_component_templates": [integration_type ~ custom_component_name ~ "@custom"],
|
||||||
"index_patterns": [pattern.name],
|
"index_patterns": [index_pattern],
|
||||||
"priority": 501,
|
"priority": 501,
|
||||||
"template": {
|
"template": {
|
||||||
"settings": {
|
"settings": {
|
||||||
"index": {
|
"index": {
|
||||||
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
|
"lifecycle": {"name": "so-" ~ integration_type ~ custom_component_name ~ "-logs"},
|
||||||
"number_of_replicas": 0
|
"number_of_replicas": 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"policy": {
|
"policy": {
|
||||||
"phases": {
|
"phases": {
|
||||||
"cold": {
|
"cold": {
|
||||||
"actions": {
|
"actions": {
|
||||||
"set_priority": {"priority": 0}
|
"set_priority": {"priority": 0}
|
||||||
},
|
},
|
||||||
"min_age": "60d"
|
"min_age": "60d"
|
||||||
|
},
|
||||||
|
"delete": {
|
||||||
|
"actions": {
|
||||||
|
"delete": {}
|
||||||
|
},
|
||||||
|
"min_age": "365d"
|
||||||
|
},
|
||||||
|
"hot": {
|
||||||
|
"actions": {
|
||||||
|
"rollover": {
|
||||||
|
"max_age": "30d",
|
||||||
|
"max_primary_shard_size": "50gb"
|
||||||
|
},
|
||||||
|
"set_priority": {"priority": 100}
|
||||||
},
|
},
|
||||||
"delete": {
|
"min_age": "0ms"
|
||||||
"actions": {
|
},
|
||||||
"delete": {}
|
"warm": {
|
||||||
},
|
"actions": {
|
||||||
"min_age": "365d"
|
"set_priority": {"priority": 50}
|
||||||
},
|
},
|
||||||
"hot": {
|
"min_age": "30d"
|
||||||
"actions": {
|
}
|
||||||
"rollover": {
|
}
|
||||||
"max_age": "30d",
|
}
|
||||||
"max_primary_shard_size": "50gb"
|
} %}
|
||||||
},
|
|
||||||
"set_priority": {"priority": 100}
|
|
||||||
},
|
|
||||||
"min_age": "0ms"
|
|
||||||
},
|
|
||||||
"warm": {
|
|
||||||
"actions": {
|
|
||||||
"set_priority": {"priority": 50}
|
|
||||||
},
|
|
||||||
"min_age": "30d"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} %}
|
|
||||||
{% do ADDON_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
|
{% do ADDON_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|||||||
@@ -23,6 +23,13 @@ fi
|
|||||||
# Define a banner to separate sections
|
# Define a banner to separate sections
|
||||||
banner="========================================================================="
|
banner="========================================================================="
|
||||||
|
|
||||||
|
fleet_api() {
|
||||||
|
local QUERYPATH=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --retry-delay 10 --fail 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
elastic_fleet_integration_check() {
|
elastic_fleet_integration_check() {
|
||||||
|
|
||||||
AGENT_POLICY=$1
|
AGENT_POLICY=$1
|
||||||
@@ -39,7 +46,9 @@ elastic_fleet_integration_create() {
|
|||||||
|
|
||||||
JSON_STRING=$1
|
JSON_STRING=$1
|
||||||
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
if ! fleet_api "package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -d "$JSON_STRING"; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@@ -56,7 +65,10 @@ elastic_fleet_integration_remove() {
|
|||||||
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
||||||
)
|
)
|
||||||
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
if ! fleet_api "package_policies/delete" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||||
|
echo "Error: Unable to delete '$NAME' from '$AGENT_POLICY'"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_integration_update() {
|
elastic_fleet_integration_update() {
|
||||||
@@ -65,7 +77,9 @@ elastic_fleet_integration_update() {
|
|||||||
|
|
||||||
JSON_STRING=$2
|
JSON_STRING=$2
|
||||||
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
if ! fleet_api "package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPUT -d "$JSON_STRING"; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_integration_policy_upgrade() {
|
elastic_fleet_integration_policy_upgrade() {
|
||||||
@@ -77,101 +91,116 @@ elastic_fleet_integration_policy_upgrade() {
|
|||||||
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
||||||
)
|
)
|
||||||
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
if ! fleet_api "package_policies/upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
elastic_fleet_package_version_check() {
|
elastic_fleet_package_version_check() {
|
||||||
PACKAGE=$1
|
PACKAGE=$1
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version'
|
|
||||||
|
if output=$(fleet_api "epm/packages/$PACKAGE"); then
|
||||||
|
echo "$output" | jq -r '.item.version'
|
||||||
|
else
|
||||||
|
echo "Error: Failed to get current package version for '$PACKAGE'"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_package_latest_version_check() {
|
elastic_fleet_package_latest_version_check() {
|
||||||
PACKAGE=$1
|
PACKAGE=$1
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.latestVersion'
|
if output=$(fleet_api "epm/packages/$PACKAGE"); then
|
||||||
|
if version=$(jq -e -r '.item.latestVersion' <<< $output); then
|
||||||
|
echo "$version"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Error: Failed to get latest version for '$PACKAGE'"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_package_install() {
|
elastic_fleet_package_install() {
|
||||||
PKG=$1
|
PKG=$1
|
||||||
VERSION=$2
|
VERSION=$2
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}' "localhost:5601/api/fleet/epm/packages/$PKG/$VERSION"
|
if ! fleet_api "epm/packages/$PKG/$VERSION" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}'; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_bulk_package_install() {
|
elastic_fleet_bulk_package_install() {
|
||||||
BULK_PKG_LIST=$1
|
BULK_PKG_LIST=$1
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$1 "localhost:5601/api/fleet/epm/packages/_bulk"
|
if ! fleet_api "epm/packages/_bulk" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$BULK_PKG_LIST; then
|
||||||
}
|
return 1
|
||||||
|
|
||||||
elastic_fleet_package_is_installed() {
|
|
||||||
PACKAGE=$1
|
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status'
|
|
||||||
}
|
|
||||||
|
|
||||||
elastic_fleet_installed_packages() {
|
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' -H 'Content-Type: application/json' "localhost:5601/api/fleet/epm/packages/installed?perPage=500"
|
|
||||||
}
|
|
||||||
|
|
||||||
elastic_fleet_agent_policy_ids() {
|
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].id
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error: Failed to retrieve agent policies."
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_agent_policy_names() {
|
elastic_fleet_installed_packages() {
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].name
|
if ! fleet_api "epm/packages/installed?perPage=500"; then
|
||||||
if [ $? -ne 0 ]; then
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
elastic_fleet_agent_policy_ids() {
|
||||||
|
if output=$(fleet_api "agent_policies"); then
|
||||||
|
echo "$output" | jq -r .items[].id
|
||||||
|
else
|
||||||
echo "Error: Failed to retrieve agent policies."
|
echo "Error: Failed to retrieve agent policies."
|
||||||
exit 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_integration_policy_names() {
|
elastic_fleet_integration_policy_names() {
|
||||||
AGENT_POLICY=$1
|
AGENT_POLICY=$1
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r .item.package_policies[].name
|
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||||
if [ $? -ne 0 ]; then
|
echo "$output" | jq -r .item.package_policies[].name
|
||||||
|
else
|
||||||
echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'."
|
echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'."
|
||||||
exit 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_integration_policy_package_name() {
|
elastic_fleet_integration_policy_package_name() {
|
||||||
AGENT_POLICY=$1
|
AGENT_POLICY=$1
|
||||||
INTEGRATION=$2
|
INTEGRATION=$2
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
|
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||||
if [ $? -ne 0 ]; then
|
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
|
||||||
|
else
|
||||||
echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'."
|
echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||||
exit 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_integration_policy_package_version() {
|
elastic_fleet_integration_policy_package_version() {
|
||||||
AGENT_POLICY=$1
|
AGENT_POLICY=$1
|
||||||
INTEGRATION=$2
|
INTEGRATION=$2
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version'
|
|
||||||
if [ $? -ne 0 ]; then
|
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||||
echo "Error: Failed to retrieve package version for '$INTEGRATION' in '$AGENT_POLICY'."
|
if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< "$output"); then
|
||||||
exit 1
|
echo "$version"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Error: Failed to retrieve integration version for '$INTEGRATION' in policy '$AGENT_POLICY'"
|
||||||
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_integration_id() {
|
elastic_fleet_integration_id() {
|
||||||
AGENT_POLICY=$1
|
AGENT_POLICY=$1
|
||||||
INTEGRATION=$2
|
INTEGRATION=$2
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
|
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||||
if [ $? -ne 0 ]; then
|
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
|
||||||
|
else
|
||||||
echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'."
|
echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||||
exit 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_integration_policy_dryrun_upgrade() {
|
elastic_fleet_integration_policy_dryrun_upgrade() {
|
||||||
INTEGRATION_ID=$1
|
INTEGRATION_ID=$1
|
||||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -L -X POST "localhost:5601/api/fleet/package_policies/upgrade/dryrun" -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"
|
if ! fleet_api "package_policies/upgrade/dryrun" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -XPOST -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"; then
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error: Failed to complete dry run for '$INTEGRATION_ID'."
|
echo "Error: Failed to complete dry run for '$INTEGRATION_ID'."
|
||||||
exit 1
|
return 1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,25 +209,18 @@ elastic_fleet_policy_create() {
|
|||||||
NAME=$1
|
NAME=$1
|
||||||
DESC=$2
|
DESC=$2
|
||||||
FLEETSERVER=$3
|
FLEETSERVER=$3
|
||||||
TIMEOUT=$4
|
TIMEOUT=$4
|
||||||
|
|
||||||
JSON_STRING=$( jq -n \
|
JSON_STRING=$( jq -n \
|
||||||
--arg NAME "$NAME" \
|
--arg NAME "$NAME" \
|
||||||
--arg DESC "$DESC" \
|
--arg DESC "$DESC" \
|
||||||
--arg TIMEOUT $TIMEOUT \
|
--arg TIMEOUT $TIMEOUT \
|
||||||
--arg FLEETSERVER "$FLEETSERVER" \
|
--arg FLEETSERVER "$FLEETSERVER" \
|
||||||
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
|
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
|
||||||
)
|
)
|
||||||
# Create Fleet Policy
|
# Create Fleet Policy
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
if ! fleet_api "agent_policies" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_policy_update() {
|
|
||||||
|
|
||||||
POLICYID=$1
|
|
||||||
JSON_STRING=$2
|
|
||||||
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|||||||
@@ -8,6 +8,7 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-elastic-fleet-common
|
. /usr/sbin/so-elastic-fleet-common
|
||||||
|
|
||||||
|
ERROR=false
|
||||||
# Manage Elastic Defend Integration for Initial Endpoints Policy
|
# Manage Elastic Defend Integration for Initial Endpoints Policy
|
||||||
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
|
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
|
||||||
do
|
do
|
||||||
@@ -15,9 +16,20 @@ do
|
|||||||
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
||||||
if [ -n "$INTEGRATION_ID" ]; then
|
if [ -n "$INTEGRATION_ID" ]; then
|
||||||
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
|
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
|
||||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
|
||||||
|
echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}"
|
||||||
|
ERROR=true
|
||||||
|
continue
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||||
elastic_fleet_integration_create "@$INTEGRATION"
|
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||||
|
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||||
|
ERROR=true
|
||||||
|
continue
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
if [[ "$ERROR" == "true" ]]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
@@ -25,5 +25,9 @@ for POLICYNAME in $POLICY; do
|
|||||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||||
|
|
||||||
# Now update the integration policy using the modified JSON
|
# Now update the integration policy using the modified JSON
|
||||||
elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
|
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"; then
|
||||||
|
# exit 1 on failure to update fleet integration policies, let salt handle retries
|
||||||
|
echo "Failed to update $POLICYNAME.."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
done
|
done
|
||||||
@@ -13,11 +13,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
|||||||
/usr/sbin/so-elastic-fleet-package-upgrade
|
/usr/sbin/so-elastic-fleet-package-upgrade
|
||||||
|
|
||||||
# Second, update Fleet Server policies
|
# Second, update Fleet Server policies
|
||||||
/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
|
/usr/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
|
||||||
|
|
||||||
# Third, configure Elastic Defend Integration seperately
|
# Third, configure Elastic Defend Integration seperately
|
||||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
||||||
|
|
||||||
# Initial Endpoints
|
# Initial Endpoints
|
||||||
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
|
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
|
||||||
do
|
do
|
||||||
@@ -25,10 +24,18 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
|||||||
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
||||||
if [ -n "$INTEGRATION_ID" ]; then
|
if [ -n "$INTEGRATION_ID" ]; then
|
||||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||||
|
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||||
|
RETURN_CODE=1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||||
elastic_fleet_integration_create "@$INTEGRATION"
|
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||||
|
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||||
|
RETURN_CODE=1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
@@ -39,10 +46,18 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
|||||||
elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
|
elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
|
||||||
if [ -n "$INTEGRATION_ID" ]; then
|
if [ -n "$INTEGRATION_ID" ]; then
|
||||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||||
|
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||||
|
RETURN_CODE=1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||||
elastic_fleet_integration_create "@$INTEGRATION"
|
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||||
|
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||||
|
RETURN_CODE=1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
if [[ "$RETURN_CODE" != "1" ]]; then
|
if [[ "$RETURN_CODE" != "1" ]]; then
|
||||||
@@ -56,11 +71,19 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
|||||||
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
|
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
|
||||||
if [ -n "$INTEGRATION_ID" ]; then
|
if [ -n "$INTEGRATION_ID" ]; then
|
||||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||||
|
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||||
|
RETURN_CODE=1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||||
if [ "$NAME" != "elasticsearch-logs" ]; then
|
if [ "$NAME" != "elasticsearch-logs" ]; then
|
||||||
elastic_fleet_integration_create "@$INTEGRATION"
|
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||||
|
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||||
|
RETURN_CODE=1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
@@ -77,11 +100,19 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
|||||||
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
|
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
|
||||||
if [ -n "$INTEGRATION_ID" ]; then
|
if [ -n "$INTEGRATION_ID" ]; then
|
||||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||||
|
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||||
|
RETURN_CODE=1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||||
if [ "$NAME" != "elasticsearch-logs" ]; then
|
if [ "$NAME" != "elasticsearch-logs" ]; then
|
||||||
elastic_fleet_integration_create "@$INTEGRATION"
|
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||||
|
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||||
|
RETURN_CODE=1
|
||||||
|
continue
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|||||||
@@ -24,23 +24,39 @@ fi
|
|||||||
|
|
||||||
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
|
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
|
||||||
|
|
||||||
|
ERROR=false
|
||||||
for AGENT_POLICY in $agent_policies; do
|
for AGENT_POLICY in $agent_policies; do
|
||||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
|
||||||
|
# this script upgrades default integration packages, exit 1 and let salt handle retrying
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
for INTEGRATION in $integrations; do
|
for INTEGRATION in $integrations; do
|
||||||
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
|
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
|
||||||
# Get package name so we know what package to look for when checking the current and latest available version
|
# Get package name so we know what package to look for when checking the current and latest available version
|
||||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||||
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
# Get currently installed version of package
|
# Get currently installed version of package
|
||||||
PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION")
|
attempt=0
|
||||||
|
max_attempts=3
|
||||||
# Get latest available version of package
|
while [ $attempt -lt $max_attempts ]; do
|
||||||
AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME")
|
if PACKAGE_VERSION=$(elastic_fleet_integration_policy_package_version "$AGENT_POLICY" "$INTEGRATION") && AVAILABLE_VERSION=$(elastic_fleet_package_latest_version_check "$PACKAGE_NAME"); then
|
||||||
|
break
|
||||||
|
fi
|
||||||
|
attempt=$((attempt + 1))
|
||||||
|
done
|
||||||
|
if [ $attempt -eq $max_attempts ]; then
|
||||||
|
echo "Error: Failed getting $PACKAGE_VERSION or $AVAILABLE_VERSION"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Get integration ID
|
# Get integration ID
|
||||||
INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
|
if ! INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION"); then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
|
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
|
||||||
# Dry run of the upgrade
|
# Dry run of the upgrade
|
||||||
@@ -48,20 +64,23 @@ for AGENT_POLICY in $agent_policies; do
|
|||||||
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
|
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
|
||||||
echo "Upgrading $INTEGRATION..."
|
echo "Upgrading $INTEGRATION..."
|
||||||
echo "Starting dry run..."
|
echo "Starting dry run..."
|
||||||
DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
|
if ! DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID"); then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
|
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
|
||||||
|
|
||||||
# If no errors with dry run, proceed with actual upgrade
|
# If no errors with dry run, proceed with actual upgrade
|
||||||
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
|
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
|
||||||
echo "No errors detected. Proceeding with upgrade..."
|
echo "No errors detected. Proceeding with upgrade..."
|
||||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
|
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
|
||||||
exit 1
|
ERROR=true
|
||||||
|
continue
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
|
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
|
||||||
exit 1
|
ERROR=true
|
||||||
|
continue
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||||
@@ -70,4 +89,7 @@ for AGENT_POLICY in $agent_policies; do
|
|||||||
fi
|
fi
|
||||||
done
|
done
|
||||||
done
|
done
|
||||||
|
if [[ "$ERROR" == "true" ]]; then
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
echo
|
echo
|
||||||
|
|||||||
@@ -19,6 +19,7 @@ BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json
|
|||||||
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
|
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
|
||||||
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
|
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
|
||||||
PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
|
PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
|
||||||
|
COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json
|
||||||
|
|
||||||
PENDING_UPDATE=false
|
PENDING_UPDATE=false
|
||||||
|
|
||||||
@@ -61,9 +62,17 @@ default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.l
|
|||||||
in_use_integrations=()
|
in_use_integrations=()
|
||||||
|
|
||||||
for AGENT_POLICY in $agent_policies; do
|
for AGENT_POLICY in $agent_policies; do
|
||||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
|
||||||
|
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
|
||||||
|
# skip the agent policy if we can't get required info, let salt retry. Integrations loaded by this script are non-default integrations.
|
||||||
|
echo "Skipping $AGENT_POLICY.. "
|
||||||
|
continue
|
||||||
|
fi
|
||||||
for INTEGRATION in $integrations; do
|
for INTEGRATION in $integrations; do
|
||||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then
|
||||||
|
echo "Not adding $INTEGRATION, couldn't get package name"
|
||||||
|
continue
|
||||||
|
fi
|
||||||
# non-default integrations that are in-use in any policy
|
# non-default integrations that are in-use in any policy
|
||||||
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||||
in_use_integrations+=("$PACKAGE_NAME")
|
in_use_integrations+=("$PACKAGE_NAME")
|
||||||
@@ -147,14 +156,38 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
|||||||
done <<< "$(jq -c '.packages[]' "$INSTALLED_PACKAGE_LIST")"
|
done <<< "$(jq -c '.packages[]' "$INSTALLED_PACKAGE_LIST")"
|
||||||
|
|
||||||
if [ "$PENDING_UPDATE" = true ]; then
|
if [ "$PENDING_UPDATE" = true ]; then
|
||||||
# Run bulk install of packages
|
# Run chunked install of packages
|
||||||
elastic_fleet_bulk_package_install $BULK_INSTALL_PACKAGE_LIST > $BULK_INSTALL_OUTPUT
|
echo "" > $BULK_INSTALL_OUTPUT
|
||||||
|
pkg_group=1
|
||||||
|
pkg_filename="${BULK_INSTALL_PACKAGE_LIST%.json}"
|
||||||
|
|
||||||
|
jq -c '.packages | _nwise(25)' $BULK_INSTALL_PACKAGE_LIST | while read -r line; do
|
||||||
|
echo "$line" | jq '{ "packages": . }' > "${pkg_filename}_${pkg_group}.json"
|
||||||
|
pkg_group=$((pkg_group + 1))
|
||||||
|
done
|
||||||
|
|
||||||
|
for file in "${pkg_filename}_"*.json; do
|
||||||
|
[ -e "$file" ] || continue
|
||||||
|
if ! elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT; then
|
||||||
|
# integrations loaded my this script are non-essential and shouldn't cause exit, skip them for now next highstate run can retry
|
||||||
|
echo "Failed to complete a chunk of bulk package installs -- $file "
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
# cleanup any temp files for chunked package install
|
||||||
|
rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST
|
||||||
else
|
else
|
||||||
echo "Elastic integrations don't appear to need installation/updating..."
|
echo "Elastic integrations don't appear to need installation/updating..."
|
||||||
fi
|
fi
|
||||||
# Write out file for generating index/component/ilm templates
|
# Write out file for generating index/component/ilm templates
|
||||||
latest_installed_package_list=$(elastic_fleet_installed_packages)
|
if latest_installed_package_list=$(elastic_fleet_installed_packages); then
|
||||||
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
||||||
|
fi
|
||||||
|
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
|
||||||
|
# Refresh installed component template list
|
||||||
|
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
|
||||||
|
echo $latest_component_templates_list > $COMPONENT_TEMPLATES
|
||||||
|
fi
|
||||||
|
|
||||||
else
|
else
|
||||||
# This is the installation of add-on integrations and upgrade of existing integrations. Exiting without error, next highstate will attempt to re-run.
|
# This is the installation of add-on integrations and upgrade of existing integrations. Exiting without error, next highstate will attempt to re-run.
|
||||||
|
|||||||
@@ -8,6 +8,27 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
FORCE_UPDATE=false
|
||||||
|
UPDATE_CERTS=false
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-f|--force)
|
||||||
|
FORCE_UPDATE=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
-c| --certs)
|
||||||
|
UPDATE_CERTS=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown option $1"
|
||||||
|
echo "Usage: $0 [-f|--force] [-c|--certs]"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
# Only run on Managers
|
# Only run on Managers
|
||||||
if ! is_manager_node; then
|
if ! is_manager_node; then
|
||||||
printf "Not a Manager Node... Exiting"
|
printf "Not a Manager Node... Exiting"
|
||||||
@@ -15,22 +36,74 @@ if ! is_manager_node; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
function update_logstash_outputs() {
|
function update_logstash_outputs() {
|
||||||
# Generate updated JSON payload
|
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
|
||||||
JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
|
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl')
|
||||||
|
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
|
||||||
|
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
|
||||||
|
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||||
|
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||||
|
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||||
|
# Reuse existing secret
|
||||||
|
JSON_STRING=$(jq -n \
|
||||||
|
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||||
|
--argjson SECRETS "$SECRETS" \
|
||||||
|
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||||
|
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||||
|
else
|
||||||
|
# Update certs, creating new secret
|
||||||
|
JSON_STRING=$(jq -n \
|
||||||
|
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||||
|
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||||
|
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||||
|
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||||
|
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}')
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||||
|
# Reuse existing ssl config
|
||||||
|
JSON_STRING=$(jq -n \
|
||||||
|
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||||
|
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||||
|
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}')
|
||||||
|
else
|
||||||
|
# Update ssl config
|
||||||
|
JSON_STRING=$(jq -n \
|
||||||
|
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||||
|
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||||
|
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||||
|
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||||
|
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}')
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
# Update Logstash Outputs
|
# Update Logstash Outputs
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||||
}
|
}
|
||||||
function update_kafka_outputs() {
|
function update_kafka_outputs() {
|
||||||
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
|
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
|
||||||
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
|
if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||||
|
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
|
||||||
JSON_STRING=$(jq -n \
|
if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
# Update policy when fleet has secrets enabled
|
||||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
JSON_STRING=$(jq -n \
|
||||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||||
# Update Kafka outputs
|
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
--argjson SECRETS "$SECRETS" \
|
||||||
|
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||||
|
else
|
||||||
|
# Update policy when fleet has secrets disabled or policy hasn't been force updated
|
||||||
|
JSON_STRING=$(jq -n \
|
||||||
|
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||||
|
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||||
|
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||||
|
fi
|
||||||
|
# Update Kafka outputs
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||||
|
else
|
||||||
|
printf "Failed to get current Kafka output policy..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||||
@@ -124,7 +197,7 @@ NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "$
|
|||||||
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||||
|
|
||||||
# Compare the current & new list of outputs - if different, update the Logstash outputs
|
# Compare the current & new list of outputs - if different, update the Logstash outputs
|
||||||
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then
|
||||||
printf "\nHashes match - no update needed.\n"
|
printf "\nHashes match - no update needed.\n"
|
||||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||||
|
|
||||||
|
|||||||
@@ -10,8 +10,16 @@
|
|||||||
|
|
||||||
{%- for PACKAGE in SUPPORTED_PACKAGES %}
|
{%- for PACKAGE in SUPPORTED_PACKAGES %}
|
||||||
echo "Setting up {{ PACKAGE }} package..."
|
echo "Setting up {{ PACKAGE }} package..."
|
||||||
VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}")
|
if VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}"); then
|
||||||
elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
|
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then
|
||||||
|
# packages loaded by this script should never fail to install and REQUIRED before an installation of SO can be considered successful
|
||||||
|
echo -e "\nERROR: Failed to install default integration package -- $PACKAGE $VERSION"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
echo
|
echo
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
echo
|
echo
|
||||||
|
|||||||
@@ -10,8 +10,15 @@
|
|||||||
|
|
||||||
{%- for PACKAGE in SUPPORTED_PACKAGES %}
|
{%- for PACKAGE in SUPPORTED_PACKAGES %}
|
||||||
echo "Upgrading {{ PACKAGE }} package..."
|
echo "Upgrading {{ PACKAGE }} package..."
|
||||||
VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}")
|
if VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}"); then
|
||||||
elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
|
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then
|
||||||
|
# exit 1 on failure to upgrade a default package, allow salt to handle retries
|
||||||
|
echo -e "\nERROR: Failed to upgrade $PACKAGE to version: $VERSION"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
|
||||||
|
fi
|
||||||
echo
|
echo
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
echo
|
echo
|
||||||
|
|||||||
@@ -23,18 +23,17 @@ if [[ "$RETURN_CODE" != "0" ]]; then
|
|||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
ALIASES=".fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest"
|
ALIASES=(.fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest)
|
||||||
for ALIAS in ${ALIASES}
|
for ALIAS in "${ALIASES[@]}"; do
|
||||||
do
|
|
||||||
# Get all concrete indices from alias
|
# Get all concrete indices from alias
|
||||||
INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]')
|
if INDXS_RAW=$(curl -sK /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" --fail 2>/dev/null); then
|
||||||
|
INDXS=$(echo "$INDXS_RAW" | jq -r '.aliases[].indices[]')
|
||||||
# Delete all resolved indices
|
# Delete all resolved indices
|
||||||
for INDX in ${INDXS}
|
for INDX in ${INDXS}; do
|
||||||
do
|
|
||||||
status "Deleting $INDX"
|
status "Deleting $INDX"
|
||||||
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
|
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
|
||||||
done
|
done
|
||||||
|
fi
|
||||||
done
|
done
|
||||||
|
|
||||||
# Restarting Kibana...
|
# Restarting Kibana...
|
||||||
@@ -51,22 +50,61 @@ if [[ "$RETURN_CODE" != "0" ]]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
printf "\n### Create ES Token ###\n"
|
printf "\n### Create ES Token ###\n"
|
||||||
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
|
if ESTOKEN_RAW=$(fleet_api "service_tokens" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||||
|
ESTOKEN=$(echo "$ESTOKEN_RAW" | jq -r .value)
|
||||||
|
else
|
||||||
|
echo -e "\nFailed to create ES token..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
### Create Outputs, Fleet Policy and Fleet URLs ###
|
### Create Outputs, Fleet Policy and Fleet URLs ###
|
||||||
# Create the Manager Elasticsearch Output first and set it as the default output
|
# Create the Manager Elasticsearch Output first and set it as the default output
|
||||||
printf "\nAdd Manager Elasticsearch Output...\n"
|
printf "\nAdd Manager Elasticsearch Output...\n"
|
||||||
ESCACRT=$(openssl x509 -in $INTCA)
|
ESCACRT=$(openssl x509 -in "$INTCA" -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]')
|
||||||
JSON_STRING=$( jq -n \
|
JSON_STRING=$(jq -n \
|
||||||
--arg ESCACRT "$ESCACRT" \
|
--arg ESCACRT "$ESCACRT" \
|
||||||
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
|
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ca_trusted_fingerprint": $ESCACRT}')
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
|
||||||
|
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||||
|
echo -e "\nFailed to create so-elasticsearch_manager policy..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
printf "\n\n"
|
printf "\n\n"
|
||||||
|
|
||||||
|
# so-manager_elasticsearch should exist and be disabled. Now update it before checking its the only default policy
|
||||||
|
MANAGER_OUTPUT_ENABLED=$(echo "$JSON_STRING" | jq 'del(.id) | .is_default = true | .is_default_monitoring = true')
|
||||||
|
if ! curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$MANAGER_OUTPUT_ENABLED"; then
|
||||||
|
echo -e "\n failed to update so-manager_elasticsearch"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# At this point there should only be two policies. fleet-default-output & so-manager_elasticsearch
|
||||||
|
status "Verifying so-manager_elasticsearch policy is configured as the current default"
|
||||||
|
|
||||||
|
# Grab the fleet-default-output policy instead of so-manager_elasticsearch, because a weird state can exist where both fleet-default-output & so-elasticsearch_manager can be set as the active default output for logs / metrics. Resulting in logs not ingesting on import/eval nodes
|
||||||
|
if DEFAULTPOLICY=$(fleet_api "outputs/fleet-default-output"); then
|
||||||
|
fleet_default=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default')
|
||||||
|
fleet_default_monitoring=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default_monitoring')
|
||||||
|
# Check that fleet-default-output isn't configured as a default for anything ( both variables return false )
|
||||||
|
if [[ $fleet_default == "false" ]] && [[ $fleet_default_monitoring == "false" ]]; then
|
||||||
|
echo -e "\nso-manager_elasticsearch is configured as the current default policy..."
|
||||||
|
else
|
||||||
|
echo -e "\nVerification of so-manager_elasticsearch policy failed... The default 'fleet-default-output' output is still active..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
# fleet-output-policy is created automatically by fleet when started. Should always exist on any installation type
|
||||||
|
echo -e "\nDefault fleet-default-output policy doesn't exist...\n"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Create the Manager Fleet Server Host Agent Policy
|
# Create the Manager Fleet Server Host Agent Policy
|
||||||
# This has to be done while the Elasticsearch Output is set to the default Output
|
# This has to be done while the Elasticsearch Output is set to the default Output
|
||||||
printf "Create Manager Fleet Server Policy...\n"
|
printf "Create Manager Fleet Server Policy...\n"
|
||||||
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
|
if ! elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"; then
|
||||||
|
echo -e "\n Failed to create Manager fleet server policy..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Modify the default integration policy to update the policy_id with the correct naming
|
# Modify the default integration policy to update the policy_id with the correct naming
|
||||||
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
|
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
|
||||||
@@ -74,7 +112,10 @@ UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname
|
|||||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||||
|
|
||||||
# Add the Fleet Server Integration to the new Fleet Policy
|
# Add the Fleet Server Integration to the new Fleet Policy
|
||||||
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
|
if ! elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"; then
|
||||||
|
echo -e "\nFailed to create Fleet server integration for Manager.."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Now we can create the Logstash Output and set it to to be the default Output
|
# Now we can create the Logstash Output and set it to to be the default Output
|
||||||
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
|
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
|
||||||
@@ -86,9 +127,12 @@ JSON_STRING=$( jq -n \
|
|||||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||||
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]},"proxy_id":null}'
|
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }},"proxy_id":null}'
|
||||||
)
|
)
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||||
|
echo -e "\nFailed to create logstash fleet output"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
printf "\n\n"
|
printf "\n\n"
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
@@ -106,7 +150,10 @@ else
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
## This array replaces whatever URLs are currently configured
|
## This array replaces whatever URLs are currently configured
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/fleet_server_hosts" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
if ! fleet_api "fleet_server_hosts" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||||
|
echo -e "\nFailed to add manager fleet URL"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
printf "\n\n"
|
printf "\n\n"
|
||||||
|
|
||||||
### Create Policies & Associated Integration Configuration ###
|
### Create Policies & Associated Integration Configuration ###
|
||||||
@@ -117,13 +164,22 @@ printf "\n\n"
|
|||||||
/usr/sbin/so-elasticsearch-templates-load
|
/usr/sbin/so-elasticsearch-templates-load
|
||||||
|
|
||||||
# Initial Endpoints Policy
|
# Initial Endpoints Policy
|
||||||
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
|
if ! elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"; then
|
||||||
|
echo -e "\nFailed to create endpoints-initial policy..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Grid Nodes - General Policy
|
# Grid Nodes - General Policy
|
||||||
elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"
|
if ! elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"; then
|
||||||
|
echo -e "\nFailed to create so-grid-nodes_general policy..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Grid Nodes - Heavy Node Policy
|
# Grid Nodes - Heavy Node Policy
|
||||||
elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"
|
if ! elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"; then
|
||||||
|
echo -e "\nFailed to create so-grid-nodes_heavy policy..."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Load Integrations for default policies
|
# Load Integrations for default policies
|
||||||
so-elastic-fleet-integration-policy-load
|
so-elastic-fleet-integration-policy-load
|
||||||
@@ -135,14 +191,34 @@ JSON_STRING=$( jq -n \
|
|||||||
'{"name":$NAME,"host":$URL,"is_default":true}'
|
'{"name":$NAME,"host":$URL,"is_default":true}'
|
||||||
)
|
)
|
||||||
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
if ! fleet_api "agent_download_sources" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||||
|
echo -e "\nFailed to update Elastic Agent artifact URL"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
### Finalization ###
|
### Finalization ###
|
||||||
|
|
||||||
# Query for Enrollment Tokens for default policies
|
# Query for Enrollment Tokens for default policies
|
||||||
ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
if ENDPOINTSENROLLMENTOKEN_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||||
GRIDNODESENROLLMENTOKENGENERAL=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
|
ENDPOINTSENROLLMENTOKEN=$(echo "$ENDPOINTSENROLLMENTOKEN_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||||
GRIDNODESENROLLMENTOKENHEAVY=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
|
else
|
||||||
|
echo -e "\nFailed to query for Endpoints enrollment token"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if GRIDNODESENROLLMENTOKENGENERAL_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||||
|
GRIDNODESENROLLMENTOKENGENERAL=$(echo "$GRIDNODESENROLLMENTOKENGENERAL_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
|
||||||
|
else
|
||||||
|
echo -e "\nFailed to query for Grid nodes - General enrollment token"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if GRIDNODESENROLLMENTOKENHEAVY_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||||
|
GRIDNODESENROLLMENTOKENHEAVY=$(echo "$GRIDNODESENROLLMENTOKENHEAVY_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
|
||||||
|
else
|
||||||
|
echo -e "\nFailed to query for Grid nodes - Heavy enrollment token"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
# Store needed data in minion pillar
|
# Store needed data in minion pillar
|
||||||
pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
|
pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
|
||||||
|
|||||||
@@ -5,46 +5,78 @@
|
|||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
|
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-managerhype'] %}
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
force=false
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case $1 in
|
||||||
|
-f|--force)
|
||||||
|
force=true
|
||||||
|
shift
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Unknown option $1"
|
||||||
|
echo "Usage: $0 [-f|--force]"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
|
||||||
# Check to make sure that Kibana API is up & ready
|
# Check to make sure that Kibana API is up & ready
|
||||||
RETURN_CODE=0
|
RETURN_CODE=0
|
||||||
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
||||||
RETURN_CODE=$?
|
RETURN_CODE=$?
|
||||||
|
|
||||||
if [[ "$RETURN_CODE" != "0" ]]; then
|
if [[ "$RETURN_CODE" != "0" ]]; then
|
||||||
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
|
echo -e "\nKibana API not accessible, can't setup Elastic Fleet output policy for Kafka...\n"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||||
|
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||||
|
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||||
|
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||||
|
|
||||||
if ! echo "$output" | grep -q "so-manager_kafka"; then
|
if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
# Create a new output policy for Kafka. Default is disabled 'is_default: false & is_default_monitoring: false'
|
||||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
|
||||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
|
||||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
|
||||||
JSON_STRING=$( jq -n \
|
JSON_STRING=$( jq -n \
|
||||||
--arg KAFKACRT "$KAFKACRT" \
|
--arg KAFKACRT "$KAFKACRT" \
|
||||||
--arg KAFKAKEY "$KAFKAKEY" \
|
--arg KAFKAKEY "$KAFKAKEY" \
|
||||||
--arg KAFKACA "$KAFKACA" \
|
--arg KAFKACA "$KAFKACA" \
|
||||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||||
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 10 }, "topics":[{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
|
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||||
)
|
)
|
||||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
|
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||||
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||||
|
exit 1
|
||||||
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
else
|
||||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null) && [[ "$force" == "true" ]]; then
|
||||||
|
# force an update to Kafka policy. Keep the current value of Kafka output policy (enabled/disabled).
|
||||||
|
ENABLED_DISABLED=$(echo "$kafka_output" | jq -e .item.is_default)
|
||||||
|
HOSTS=$(echo "$kafka_output" | jq -r '.item.hosts')
|
||||||
|
JSON_STRING=$( jq -n \
|
||||||
|
--arg KAFKACRT "$KAFKACRT" \
|
||||||
|
--arg KAFKAKEY "$KAFKAKEY" \
|
||||||
|
--arg KAFKACA "$KAFKACA" \
|
||||||
|
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
|
||||||
|
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||||
|
--argjson HOSTS "$HOSTS" \
|
||||||
|
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topics":[{"topic":"default-securityonion"}],"headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||||
|
)
|
||||||
|
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||||
|
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
|
||||||
exit 1
|
exit 1
|
||||||
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
else
|
||||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
echo -e "\nForced update to Elastic Fleet output policy for Kafka...\n"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
elif echo "$output" | grep -q "so-manager_kafka"; then
|
else
|
||||||
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
|
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
|
||||||
fi
|
fi
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|||||||
@@ -28,7 +28,7 @@
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
{% if grains.id.split('_') | last in ['manager','managersearch','standalone'] %}
|
{% if grains.id.split('_') | last in ['manager','managerhype','managersearch','standalone'] %}
|
||||||
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
|
{% if ELASTICSEARCH_SEED_HOSTS | length > 1 %}
|
||||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
|
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': []}}) %}
|
||||||
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
|
{% for NODE in ELASTICSEARCH_SEED_HOSTS %}
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
elasticsearch:
|
elasticsearch:
|
||||||
enabled: false
|
enabled: false
|
||||||
version: 8.17.3
|
version: 8.18.8
|
||||||
index_clean: true
|
index_clean: true
|
||||||
config:
|
config:
|
||||||
action:
|
action:
|
||||||
@@ -284,6 +284,86 @@ elasticsearch:
|
|||||||
hot:
|
hot:
|
||||||
actions: {}
|
actions: {}
|
||||||
min_age: 0ms
|
min_age: 0ms
|
||||||
|
so-assistant-chat:
|
||||||
|
index_sorting: false
|
||||||
|
index_template:
|
||||||
|
composed_of:
|
||||||
|
- assistant-chat-mappings
|
||||||
|
- assistant-chat-settings
|
||||||
|
data_stream:
|
||||||
|
allow_custom_routing: false
|
||||||
|
hidden: false
|
||||||
|
ignore_missing_component_templates: []
|
||||||
|
index_patterns:
|
||||||
|
- so-assistant-chat*
|
||||||
|
priority: 501
|
||||||
|
template:
|
||||||
|
mappings:
|
||||||
|
date_detection: false
|
||||||
|
dynamic_templates:
|
||||||
|
- strings_as_keyword:
|
||||||
|
mapping:
|
||||||
|
ignore_above: 1024
|
||||||
|
type: keyword
|
||||||
|
match_mapping_type: string
|
||||||
|
settings:
|
||||||
|
index:
|
||||||
|
lifecycle:
|
||||||
|
name: so-assistant-chat-logs
|
||||||
|
mapping:
|
||||||
|
total_fields:
|
||||||
|
limit: 1500
|
||||||
|
number_of_replicas: 0
|
||||||
|
number_of_shards: 1
|
||||||
|
refresh_interval: 1s
|
||||||
|
sort:
|
||||||
|
field: '@timestamp'
|
||||||
|
order: desc
|
||||||
|
policy:
|
||||||
|
phases:
|
||||||
|
hot:
|
||||||
|
actions: {}
|
||||||
|
min_age: 0ms
|
||||||
|
so-assistant-session:
|
||||||
|
index_sorting: false
|
||||||
|
index_template:
|
||||||
|
composed_of:
|
||||||
|
- assistant-session-mappings
|
||||||
|
- assistant-session-settings
|
||||||
|
data_stream:
|
||||||
|
allow_custom_routing: false
|
||||||
|
hidden: false
|
||||||
|
ignore_missing_component_templates: []
|
||||||
|
index_patterns:
|
||||||
|
- so-assistant-session*
|
||||||
|
priority: 501
|
||||||
|
template:
|
||||||
|
mappings:
|
||||||
|
date_detection: false
|
||||||
|
dynamic_templates:
|
||||||
|
- strings_as_keyword:
|
||||||
|
mapping:
|
||||||
|
ignore_above: 1024
|
||||||
|
type: keyword
|
||||||
|
match_mapping_type: string
|
||||||
|
settings:
|
||||||
|
index:
|
||||||
|
lifecycle:
|
||||||
|
name: so-assistant-session-logs
|
||||||
|
mapping:
|
||||||
|
total_fields:
|
||||||
|
limit: 1500
|
||||||
|
number_of_replicas: 0
|
||||||
|
number_of_shards: 1
|
||||||
|
refresh_interval: 1s
|
||||||
|
sort:
|
||||||
|
field: '@timestamp'
|
||||||
|
order: desc
|
||||||
|
policy:
|
||||||
|
phases:
|
||||||
|
hot:
|
||||||
|
actions: {}
|
||||||
|
min_age: 0ms
|
||||||
so-endgame:
|
so-endgame:
|
||||||
index_sorting: false
|
index_sorting: false
|
||||||
index_template:
|
index_template:
|
||||||
@@ -567,6 +647,7 @@ elasticsearch:
|
|||||||
- common-settings
|
- common-settings
|
||||||
- common-dynamic-mappings
|
- common-dynamic-mappings
|
||||||
- winlog-mappings
|
- winlog-mappings
|
||||||
|
- hash-mappings
|
||||||
data_stream: {}
|
data_stream: {}
|
||||||
ignore_missing_component_templates: []
|
ignore_missing_component_templates: []
|
||||||
index_patterns:
|
index_patterns:
|
||||||
@@ -1242,6 +1323,68 @@ elasticsearch:
|
|||||||
set_priority:
|
set_priority:
|
||||||
priority: 50
|
priority: 50
|
||||||
min_age: 30d
|
min_age: 30d
|
||||||
|
so-elastic-agent-monitor:
|
||||||
|
index_sorting: false
|
||||||
|
index_template:
|
||||||
|
composed_of:
|
||||||
|
- event-mappings
|
||||||
|
- so-elastic-agent-monitor
|
||||||
|
- so-fleet_integrations.ip_mappings-1
|
||||||
|
- so-fleet_globals-1
|
||||||
|
- so-fleet_agent_id_verification-1
|
||||||
|
data_stream:
|
||||||
|
allow_custom_routing: false
|
||||||
|
hidden: false
|
||||||
|
index_patterns:
|
||||||
|
- logs-agentmonitor-*
|
||||||
|
priority: 501
|
||||||
|
template:
|
||||||
|
mappings:
|
||||||
|
_meta:
|
||||||
|
managed: true
|
||||||
|
managed_by: security_onion
|
||||||
|
package:
|
||||||
|
name: elastic_agent
|
||||||
|
settings:
|
||||||
|
index:
|
||||||
|
lifecycle:
|
||||||
|
name: so-elastic-agent-monitor-logs
|
||||||
|
mapping:
|
||||||
|
total_fields:
|
||||||
|
limit: 5000
|
||||||
|
number_of_replicas: 0
|
||||||
|
sort:
|
||||||
|
field: '@timestamp'
|
||||||
|
order: desc
|
||||||
|
policy:
|
||||||
|
_meta:
|
||||||
|
managed: true
|
||||||
|
managed_by: security_onion
|
||||||
|
package:
|
||||||
|
name: elastic_agent
|
||||||
|
phases:
|
||||||
|
cold:
|
||||||
|
actions:
|
||||||
|
set_priority:
|
||||||
|
priority: 0
|
||||||
|
min_age: 60d
|
||||||
|
delete:
|
||||||
|
actions:
|
||||||
|
delete: {}
|
||||||
|
min_age: 365d
|
||||||
|
hot:
|
||||||
|
actions:
|
||||||
|
rollover:
|
||||||
|
max_age: 30d
|
||||||
|
max_primary_shard_size: 50gb
|
||||||
|
set_priority:
|
||||||
|
priority: 100
|
||||||
|
min_age: 0ms
|
||||||
|
warm:
|
||||||
|
actions:
|
||||||
|
set_priority:
|
||||||
|
priority: 50
|
||||||
|
min_age: 30d
|
||||||
so-logs-elastic_agent_x_apm_server:
|
so-logs-elastic_agent_x_apm_server:
|
||||||
index_sorting: false
|
index_sorting: false
|
||||||
index_template:
|
index_template:
|
||||||
@@ -1848,6 +1991,70 @@ elasticsearch:
|
|||||||
set_priority:
|
set_priority:
|
||||||
priority: 50
|
priority: 50
|
||||||
min_age: 30d
|
min_age: 30d
|
||||||
|
so-logs-elasticsearch_x_server:
|
||||||
|
index_sorting: false
|
||||||
|
index_template:
|
||||||
|
composed_of:
|
||||||
|
- logs-elasticsearch.server@package
|
||||||
|
- logs-elasticsearch.server@custom
|
||||||
|
- so-fleet_integrations.ip_mappings-1
|
||||||
|
- so-fleet_globals-1
|
||||||
|
- so-fleet_agent_id_verification-1
|
||||||
|
data_stream:
|
||||||
|
allow_custom_routing: false
|
||||||
|
hidden: false
|
||||||
|
ignore_missing_component_templates:
|
||||||
|
- logs-elasticsearch.server@custom
|
||||||
|
index_patterns:
|
||||||
|
- logs-elasticsearch.server-*
|
||||||
|
priority: 501
|
||||||
|
template:
|
||||||
|
mappings:
|
||||||
|
_meta:
|
||||||
|
managed: true
|
||||||
|
managed_by: security_onion
|
||||||
|
package:
|
||||||
|
name: elastic_agent
|
||||||
|
settings:
|
||||||
|
index:
|
||||||
|
lifecycle:
|
||||||
|
name: so-logs-elasticsearch.server-logs
|
||||||
|
mapping:
|
||||||
|
total_fields:
|
||||||
|
limit: 5000
|
||||||
|
number_of_replicas: 0
|
||||||
|
sort:
|
||||||
|
field: '@timestamp'
|
||||||
|
order: desc
|
||||||
|
policy:
|
||||||
|
_meta:
|
||||||
|
managed: true
|
||||||
|
managed_by: security_onion
|
||||||
|
package:
|
||||||
|
name: elastic_agent
|
||||||
|
phases:
|
||||||
|
cold:
|
||||||
|
actions:
|
||||||
|
set_priority:
|
||||||
|
priority: 0
|
||||||
|
min_age: 60d
|
||||||
|
delete:
|
||||||
|
actions:
|
||||||
|
delete: {}
|
||||||
|
min_age: 365d
|
||||||
|
hot:
|
||||||
|
actions:
|
||||||
|
rollover:
|
||||||
|
max_age: 30d
|
||||||
|
max_primary_shard_size: 50gb
|
||||||
|
set_priority:
|
||||||
|
priority: 100
|
||||||
|
min_age: 0ms
|
||||||
|
warm:
|
||||||
|
actions:
|
||||||
|
set_priority:
|
||||||
|
priority: 50
|
||||||
|
min_age: 30d
|
||||||
so-logs-endpoint_x_actions:
|
so-logs-endpoint_x_actions:
|
||||||
index_sorting: false
|
index_sorting: false
|
||||||
index_template:
|
index_template:
|
||||||
@@ -3874,6 +4081,7 @@ elasticsearch:
|
|||||||
- vulnerability-mappings
|
- vulnerability-mappings
|
||||||
- common-settings
|
- common-settings
|
||||||
- common-dynamic-mappings
|
- common-dynamic-mappings
|
||||||
|
- hash-mappings
|
||||||
data_stream: {}
|
data_stream: {}
|
||||||
ignore_missing_component_templates: []
|
ignore_missing_component_templates: []
|
||||||
index_patterns:
|
index_patterns:
|
||||||
@@ -3987,6 +4195,7 @@ elasticsearch:
|
|||||||
- vulnerability-mappings
|
- vulnerability-mappings
|
||||||
- common-settings
|
- common-settings
|
||||||
- common-dynamic-mappings
|
- common-dynamic-mappings
|
||||||
|
- hash-mappings
|
||||||
data_stream: {}
|
data_stream: {}
|
||||||
ignore_missing_component_templates: []
|
ignore_missing_component_templates: []
|
||||||
index_patterns:
|
index_patterns:
|
||||||
@@ -4028,7 +4237,7 @@ elasticsearch:
|
|||||||
hot:
|
hot:
|
||||||
actions:
|
actions:
|
||||||
rollover:
|
rollover:
|
||||||
max_age: 1d
|
max_age: 30d
|
||||||
max_primary_shard_size: 50gb
|
max_primary_shard_size: 50gb
|
||||||
set_priority:
|
set_priority:
|
||||||
priority: 100
|
priority: 100
|
||||||
@@ -4100,6 +4309,7 @@ elasticsearch:
|
|||||||
- vulnerability-mappings
|
- vulnerability-mappings
|
||||||
- common-settings
|
- common-settings
|
||||||
- common-dynamic-mappings
|
- common-dynamic-mappings
|
||||||
|
- hash-mappings
|
||||||
data_stream: {}
|
data_stream: {}
|
||||||
ignore_missing_component_templates: []
|
ignore_missing_component_templates: []
|
||||||
index_patterns:
|
index_patterns:
|
||||||
@@ -4329,6 +4539,7 @@ elasticsearch:
|
|||||||
- zeek-mappings
|
- zeek-mappings
|
||||||
- common-settings
|
- common-settings
|
||||||
- common-dynamic-mappings
|
- common-dynamic-mappings
|
||||||
|
- hash-mappings
|
||||||
data_stream: {}
|
data_stream: {}
|
||||||
ignore_missing_component_templates: []
|
ignore_missing_component_templates: []
|
||||||
index_patterns:
|
index_patterns:
|
||||||
@@ -4501,6 +4712,14 @@ elasticsearch:
|
|||||||
- data
|
- data
|
||||||
- remote_cluster_client
|
- remote_cluster_client
|
||||||
- transform
|
- transform
|
||||||
|
so-managerhype:
|
||||||
|
config:
|
||||||
|
node:
|
||||||
|
roles:
|
||||||
|
- master
|
||||||
|
- data
|
||||||
|
- remote_cluster_client
|
||||||
|
- transform
|
||||||
so-managersearch:
|
so-managersearch:
|
||||||
config:
|
config:
|
||||||
node:
|
node:
|
||||||
|
|||||||
@@ -204,7 +204,7 @@ so-elasticsearch-roles-load:
|
|||||||
- docker_container: so-elasticsearch
|
- docker_container: so-elasticsearch
|
||||||
- file: elasticsearch_sbin_jinja
|
- file: elasticsearch_sbin_jinja
|
||||||
|
|
||||||
{% if grains.role in ['so-managersearch', 'so-manager'] %}
|
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
|
||||||
{% set ap = "absent" %}
|
{% set ap = "absent" %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||||
|
|||||||
@@ -26,7 +26,7 @@
|
|||||||
{
|
{
|
||||||
"geoip": {
|
"geoip": {
|
||||||
"field": "destination.ip",
|
"field": "destination.ip",
|
||||||
"target_field": "destination_geo",
|
"target_field": "destination.as",
|
||||||
"database_file": "GeoLite2-ASN.mmdb",
|
"database_file": "GeoLite2-ASN.mmdb",
|
||||||
"ignore_missing": true,
|
"ignore_missing": true,
|
||||||
"ignore_failure": true,
|
"ignore_failure": true,
|
||||||
@@ -36,13 +36,17 @@
|
|||||||
{
|
{
|
||||||
"geoip": {
|
"geoip": {
|
||||||
"field": "source.ip",
|
"field": "source.ip",
|
||||||
"target_field": "source_geo",
|
"target_field": "source.as",
|
||||||
"database_file": "GeoLite2-ASN.mmdb",
|
"database_file": "GeoLite2-ASN.mmdb",
|
||||||
"ignore_missing": true,
|
"ignore_missing": true,
|
||||||
"ignore_failure": true,
|
"ignore_failure": true,
|
||||||
"properties": ["ip", "asn", "organization_name", "network"]
|
"properties": ["ip", "asn", "organization_name", "network"]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{ "rename": { "field": "destination.as.organization_name", "target_field": "destination.as.organization.name", "ignore_failure": true, "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "source.as.organization_name", "target_field": "source.as.organization.name", "ignore_failure": true, "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "destination.as.asn", "target_field": "destination.as.number", "ignore_failure": true, "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "source.as.asn", "target_field": "source.as.number", "ignore_failure": true, "ignore_missing": true } },
|
||||||
{ "set": { "if": "ctx.event?.severity == 1", "field": "event.severity_label", "value": "low", "override": true } },
|
{ "set": { "if": "ctx.event?.severity == 1", "field": "event.severity_label", "value": "low", "override": true } },
|
||||||
{ "set": { "if": "ctx.event?.severity == 2", "field": "event.severity_label", "value": "medium", "override": true } },
|
{ "set": { "if": "ctx.event?.severity == 2", "field": "event.severity_label", "value": "medium", "override": true } },
|
||||||
{ "set": { "if": "ctx.event?.severity == 3", "field": "event.severity_label", "value": "high", "override": true } },
|
{ "set": { "if": "ctx.event?.severity == 3", "field": "event.severity_label", "value": "high", "override": true } },
|
||||||
|
|||||||
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
{
|
||||||
|
"processors": [
|
||||||
|
{
|
||||||
|
"convert": {
|
||||||
|
"field": "_ingest._value",
|
||||||
|
"type": "ip",
|
||||||
|
"target_field": "_ingest._temp_ip",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"append": {
|
||||||
|
"field": "temp._valid_ips",
|
||||||
|
"allow_duplicates": false,
|
||||||
|
"value": [
|
||||||
|
"{{{_ingest._temp_ip}}}"
|
||||||
|
],
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
36
salt/elasticsearch/files/ingest/elasticagent.monitor
Normal file
36
salt/elasticsearch/files/ingest/elasticagent.monitor
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
{
|
||||||
|
"processors": [
|
||||||
|
{
|
||||||
|
"set": {
|
||||||
|
"field": "event.dataset",
|
||||||
|
"value": "gridmetrics.agents",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"set": {
|
||||||
|
"field": "event.module",
|
||||||
|
"value": "gridmetrics",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"remove": {
|
||||||
|
"field": [
|
||||||
|
"host",
|
||||||
|
"elastic_agent",
|
||||||
|
"agent"
|
||||||
|
],
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"json": {
|
||||||
|
"field": "message",
|
||||||
|
"add_to_root": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -19,11 +19,13 @@
|
|||||||
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
{ "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } },
|
||||||
{ "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.dataset", "value": "import" } },
|
{ "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.dataset", "value": "import" } },
|
||||||
{ "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.namespace", "value": "so" } },
|
{ "set": { "if": "ctx.tags != null && ctx.tags.contains('import')", "override": true, "field": "data_stream.namespace", "value": "so" } },
|
||||||
{ "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp","ignore_failure": true, "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSX","yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } },
|
|
||||||
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
|
||||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||||
|
{ "set": { "if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'", "field": "event.module", "value":"elasticsearch" }},
|
||||||
|
{"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
|
||||||
|
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,11 +0,0 @@
|
|||||||
{
|
|
||||||
"description" : "import.wel",
|
|
||||||
"processors" : [
|
|
||||||
{ "set": { "field": "event.ingested", "value": "{{ @timestamp }}" } },
|
|
||||||
{ "set" : { "field" : "@timestamp", "value" : "{{ event.created }}" } },
|
|
||||||
{ "remove": { "field": [ "event_record_id", "event.created" , "timestamp" , "winlog.event_data.UtcTime" ], "ignore_failure": true } },
|
|
||||||
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
|
|
||||||
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
|
|
||||||
{ "pipeline": { "name": "common" } }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -107,61 +107,61 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.21.0-firewall",
|
"name": "logs-pfsense.log-1.23.1-firewall",
|
||||||
"if": "ctx.event.provider == 'filterlog'"
|
"if": "ctx.event.provider == 'filterlog'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.21.0-openvpn",
|
"name": "logs-pfsense.log-1.23.1-openvpn",
|
||||||
"if": "ctx.event.provider == 'openvpn'"
|
"if": "ctx.event.provider == 'openvpn'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.21.0-ipsec",
|
"name": "logs-pfsense.log-1.23.1-ipsec",
|
||||||
"if": "ctx.event.provider == 'charon'"
|
"if": "ctx.event.provider == 'charon'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.21.0-dhcp",
|
"name": "logs-pfsense.log-1.23.1-dhcp",
|
||||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.21.0-unbound",
|
"name": "logs-pfsense.log-1.23.1-unbound",
|
||||||
"if": "ctx.event.provider == 'unbound'"
|
"if": "ctx.event.provider == 'unbound'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.21.0-haproxy",
|
"name": "logs-pfsense.log-1.23.1-haproxy",
|
||||||
"if": "ctx.event.provider == 'haproxy'"
|
"if": "ctx.event.provider == 'haproxy'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.21.0-php-fpm",
|
"name": "logs-pfsense.log-1.23.1-php-fpm",
|
||||||
"if": "ctx.event.provider == 'php-fpm'"
|
"if": "ctx.event.provider == 'php-fpm'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.21.0-squid",
|
"name": "logs-pfsense.log-1.23.1-squid",
|
||||||
"if": "ctx.event.provider == 'squid'"
|
"if": "ctx.event.provider == 'squid'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.21.0-snort",
|
"name": "logs-pfsense.log-1.23.1-snort",
|
||||||
"if": "ctx.event.provider == 'snort'"
|
"if": "ctx.event.provider == 'snort'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.21.0-suricata",
|
"name": "logs-pfsense.log-1.23.1-suricata",
|
||||||
"if": "ctx.event.provider == 'suricata'"
|
"if": "ctx.event.provider == 'suricata'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -358,14 +358,6 @@
|
|||||||
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"remove": {
|
|
||||||
"field": "event.original",
|
|
||||||
"if": "ctx.tags == null || !(ctx.tags.contains('preserve_original_event'))",
|
|
||||||
"ignore_failure": true,
|
|
||||||
"ignore_missing": true
|
|
||||||
}
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "global@custom",
|
"name": "global@custom",
|
||||||
@@ -1,14 +1,79 @@
|
|||||||
{
|
{
|
||||||
"description" : "suricata.alert",
|
"description": "suricata.alert",
|
||||||
"processors" : [
|
"processors": [
|
||||||
{ "set": { "if": "ctx.event?.imported != true", "field": "_index", "value": "logs-suricata.alerts-so" } },
|
{
|
||||||
{ "set": { "field": "tags","value": "alert" }},
|
"set": {
|
||||||
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
|
"if": "ctx.event?.imported != true",
|
||||||
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
|
"field": "_index",
|
||||||
{ "rename":{ "field": "rule.ref", "target_field": "rule.version", "ignore_failure": true } },
|
"value": "logs-suricata.alerts-so"
|
||||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
|
}
|
||||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
|
},
|
||||||
{ "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
|
{
|
||||||
{ "pipeline": { "name": "common.nids" } }
|
"set": {
|
||||||
]
|
"field": "tags",
|
||||||
}
|
"value": "alert"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.alert",
|
||||||
|
"target_field": "rule",
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "rule.signature",
|
||||||
|
"target_field": "rule.name",
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "rule.ref",
|
||||||
|
"target_field": "rule.version",
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "rule.signature_id",
|
||||||
|
"target_field": "rule.uuid",
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "rule.signature_id",
|
||||||
|
"target_field": "rule.signature",
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.payload_printable",
|
||||||
|
"target_field": "network.data.decoded",
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"dissect": {
|
||||||
|
"field": "rule.rule",
|
||||||
|
"pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}",
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pipeline": {
|
||||||
|
"name": "common.nids"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -1,23 +1,155 @@
|
|||||||
{
|
{
|
||||||
"description" : "suricata.common",
|
"description": "suricata.common",
|
||||||
"processors" : [
|
"processors": [
|
||||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
{
|
||||||
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
|
"json": {
|
||||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
|
"field": "message",
|
||||||
{ "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
|
"target_field": "message2",
|
||||||
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
|
"ignore_failure": true
|
||||||
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
|
}
|
||||||
{ "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
|
},
|
||||||
{ "rename": { "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } },
|
{
|
||||||
{ "rename": { "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } },
|
"rename": {
|
||||||
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
|
"field": "message2.pkt_src",
|
||||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
"target_field": "network.packet_source",
|
||||||
{ "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
|
"ignore_failure": true
|
||||||
{ "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
|
}
|
||||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
|
},
|
||||||
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
|
{
|
||||||
{ "date": { "field": "message2.timestamp", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "timezone": "UTC", "ignore_failure": true } },
|
"rename": {
|
||||||
{ "remove":{ "field": "agent", "ignore_failure": true } },
|
"field": "message2.proto",
|
||||||
{ "pipeline": { "if": "ctx?.event?.dataset != null", "name": "suricata.{{event.dataset}}" } }
|
"target_field": "network.transport",
|
||||||
]
|
"ignore_failure": true
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.in_iface",
|
||||||
|
"target_field": "observer.ingress.interface.name",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.flow_id",
|
||||||
|
"target_field": "log.id.uid",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.src_ip",
|
||||||
|
"target_field": "source.ip",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.src_port",
|
||||||
|
"target_field": "source.port",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dest_ip",
|
||||||
|
"target_field": "destination.ip",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dest_port",
|
||||||
|
"target_field": "destination.port",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.vlan",
|
||||||
|
"target_field": "network.vlan.id",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.community_id",
|
||||||
|
"target_field": "network.community_id",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.xff",
|
||||||
|
"target_field": "xff.ip",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"set": {
|
||||||
|
"field": "event.dataset",
|
||||||
|
"value": "{{ message2.event_type }}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"set": {
|
||||||
|
"field": "observer.name",
|
||||||
|
"value": "{{agent.name}}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"set": {
|
||||||
|
"field": "event.ingested",
|
||||||
|
"value": "{{@timestamp}}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"date": {
|
||||||
|
"field": "message2.timestamp",
|
||||||
|
"target_field": "@timestamp",
|
||||||
|
"formats": [
|
||||||
|
"ISO8601",
|
||||||
|
"UNIX"
|
||||||
|
],
|
||||||
|
"timezone": "UTC",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"remove": {
|
||||||
|
"field": "agent",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"append": {
|
||||||
|
"field": "related.ip",
|
||||||
|
"value": [
|
||||||
|
"{{source.ip}}",
|
||||||
|
"{{destination.ip}}"
|
||||||
|
],
|
||||||
|
"allow_duplicates": false,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
|
||||||
|
"ignore_failure": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.capture_file",
|
||||||
|
"target_field": "suricata.capture_file",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pipeline": {
|
||||||
|
"if": "ctx?.event?.dataset != null",
|
||||||
|
"name": "suricata.{{event.dataset}}"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -1,21 +1,136 @@
|
|||||||
{
|
{
|
||||||
"description" : "suricata.dns",
|
"description": "suricata.dns",
|
||||||
"processors" : [
|
"processors": [
|
||||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
{
|
||||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
"rename": {
|
||||||
{ "rename": { "field": "message2.dns.type", "target_field": "dns.query.type", "ignore_missing": true } },
|
"field": "message2.proto",
|
||||||
{ "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } },
|
"target_field": "network.transport",
|
||||||
{ "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } },
|
"ignore_missing": true
|
||||||
{ "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } },
|
}
|
||||||
{ "rename": { "field": "message2.dns.rrtype", "target_field": "dns.query.type_name", "ignore_missing": true } },
|
},
|
||||||
{ "rename": { "field": "message2.dns.flags", "target_field": "dns.flags", "ignore_missing": true } },
|
{
|
||||||
{ "rename": { "field": "message2.dns.qr", "target_field": "dns.qr", "ignore_missing": true } },
|
"rename": {
|
||||||
{ "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
"field": "message2.app_proto",
|
||||||
{ "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
"target_field": "network.protocol",
|
||||||
{ "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } },
|
"ignore_missing": true
|
||||||
{ "rename": { "field": "message2.dns.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } },
|
}
|
||||||
{ "rename": { "field": "message2.dns.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } },
|
},
|
||||||
{ "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
{
|
||||||
{ "pipeline": { "name": "common" } }
|
"rename": {
|
||||||
]
|
"field": "message2.dns.type",
|
||||||
}
|
"target_field": "dns.query.type",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.tx_id",
|
||||||
|
"target_field": "dns.tx_id",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.id",
|
||||||
|
"target_field": "dns.id",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.version",
|
||||||
|
"target_field": "dns.version",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pipeline": {
|
||||||
|
"name": "suricata.dnsv3",
|
||||||
|
"ignore_missing_pipeline": true,
|
||||||
|
"if": "ctx?.dns?.version != null && ctx?.dns?.version == 3",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.rrname",
|
||||||
|
"target_field": "dns.query.name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.rrtype",
|
||||||
|
"target_field": "dns.query.type_name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.flags",
|
||||||
|
"target_field": "dns.flags",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.qr",
|
||||||
|
"target_field": "dns.qr",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.rd",
|
||||||
|
"target_field": "dns.recursion.desired",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.ra",
|
||||||
|
"target_field": "dns.recursion.available",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.opcode",
|
||||||
|
"target_field": "dns.opcode",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.rcode",
|
||||||
|
"target_field": "dns.response.code_name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.grouped.A",
|
||||||
|
"target_field": "dns.answers.data",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.grouped.CNAME",
|
||||||
|
"target_field": "dns.answers.name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pipeline": {
|
||||||
|
"if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')",
|
||||||
|
"name": "dns.tld"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pipeline": {
|
||||||
|
"name": "common"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
56
salt/elasticsearch/files/ingest/suricata.dnsv3
Normal file
56
salt/elasticsearch/files/ingest/suricata.dnsv3
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
{
|
||||||
|
"processors": [
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.dns.queries",
|
||||||
|
"target_field": "dns.queries",
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.name = ctx?.dns?.queries[0].rrname;\n}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.type_name = ctx?.dns?.queries[0].rrtype;\n}"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"foreach": {
|
||||||
|
"field": "dns.queries",
|
||||||
|
"processor": {
|
||||||
|
"rename": {
|
||||||
|
"field": "_ingest._value.rrname",
|
||||||
|
"target_field": "_ingest._value.name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"foreach": {
|
||||||
|
"field": "dns.queries",
|
||||||
|
"processor": {
|
||||||
|
"rename": {
|
||||||
|
"field": "_ingest._value.rrtype",
|
||||||
|
"target_field": "_ingest._value.type_name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pipeline": {
|
||||||
|
"name": "suricata.tld",
|
||||||
|
"ignore_missing_pipeline": true,
|
||||||
|
"if": "ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
52
salt/elasticsearch/files/ingest/suricata.tld
Normal file
52
salt/elasticsearch/files/ingest/suricata.tld
Normal file
@@ -0,0 +1,52 @@
|
|||||||
|
{
|
||||||
|
"processors": [
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.top_level_domain = q.name.substring(q.name.lastIndexOf('.') + 1);\n }\n }\n}",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.query_without_tld = q.name.substring(0, q.name.lastIndexOf('.'));\n }\n }\n}",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.parent_domain = q.query_without_tld.substring(q.query_without_tld.lastIndexOf('.') + 1);\n }\n }\n}",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.subdomain = q.query_without_tld.substring(0, q.query_without_tld.lastIndexOf('.'));\n }\n }\n}",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null && q.top_level_domain != null) {\n q.highest_registered_domain = q.parent_domain + \".\" + q.top_level_domain;\n }\n }\n}",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.subdomain != null) {\n q.subdomain_length = q.subdomain.length();\n }\n }\n}",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null) {\n q.parent_domain_length = q.parent_domain.length();\n }\n }\n}",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n q.remove('query_without_tld');\n }\n}",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
61
salt/elasticsearch/files/ingest/zeek.analyzer
Normal file
61
salt/elasticsearch/files/ingest/zeek.analyzer
Normal file
@@ -0,0 +1,61 @@
|
|||||||
|
{
|
||||||
|
"description": "zeek.analyzer",
|
||||||
|
"processors": [
|
||||||
|
{
|
||||||
|
"set": {
|
||||||
|
"field": "event.dataset",
|
||||||
|
"value": "analyzer"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"remove": {
|
||||||
|
"field": [
|
||||||
|
"host"
|
||||||
|
],
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"json": {
|
||||||
|
"field": "message",
|
||||||
|
"target_field": "message2",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"set": {
|
||||||
|
"field": "network.protocol",
|
||||||
|
"copy_from": "message2.analyzer_name",
|
||||||
|
"ignore_empty_value": true,
|
||||||
|
"if": "ctx?.message2?.analyzer_kind == 'protocol'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"set": {
|
||||||
|
"field": "network.protocol",
|
||||||
|
"ignore_empty_value": true,
|
||||||
|
"if": "ctx?.message2?.analyzer_kind != 'protocol'",
|
||||||
|
"copy_from": "message2.proto"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"lowercase": {
|
||||||
|
"field": "network.protocol",
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.failure_reason",
|
||||||
|
"target_field": "error.reason",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pipeline": {
|
||||||
|
"name": "zeek.common"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -12,7 +12,8 @@
|
|||||||
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
|
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
|
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
|
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
|
||||||
{ "community_id": {} },
|
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
||||||
|
{ "community_id": { "if": "ctx.network?.community_id == null" } },
|
||||||
{ "set": { "if": "ctx.source?.ip != null", "field": "client.ip", "value": "{{source.ip}}" } },
|
{ "set": { "if": "ctx.source?.ip != null", "field": "client.ip", "value": "{{source.ip}}" } },
|
||||||
{ "set": { "if": "ctx.source?.port != null", "field": "client.port", "value": "{{source.port}}" } },
|
{ "set": { "if": "ctx.source?.port != null", "field": "client.port", "value": "{{source.port}}" } },
|
||||||
{ "set": { "if": "ctx.destination?.ip != null", "field": "server.ip", "value": "{{destination.ip}}" } },
|
{ "set": { "if": "ctx.destination?.ip != null", "field": "server.ip", "value": "{{destination.ip}}" } },
|
||||||
|
|||||||
@@ -24,6 +24,10 @@
|
|||||||
{ "rename": { "field": "message2.resp_cc", "target_field": "server.country_code", "ignore_missing": true } },
|
{ "rename": { "field": "message2.resp_cc", "target_field": "server.country_code", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.sensorname", "target_field": "observer.name", "ignore_missing": true } },
|
{ "rename": { "field": "message2.sensorname", "target_field": "observer.name", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_missing": true } },
|
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "message2.ja4l", "target_field": "hash.ja4l", "ignore_missing" : true, "if": "ctx.message2?.ja4l != null && ctx.message2.ja4l.length() > 0" }},
|
||||||
|
{ "rename": { "field": "message2.ja4ls", "target_field": "hash.ja4ls", "ignore_missing" : true, "if": "ctx.message2?.ja4ls != null && ctx.message2.ja4ls.length() > 0" }},
|
||||||
|
{ "rename": { "field": "message2.ja4t", "target_field": "hash.ja4t", "ignore_missing" : true, "if": "ctx.message2?.ja4t != null && ctx.message2.ja4t.length() > 0" }},
|
||||||
|
{ "rename": { "field": "message2.ja4ts", "target_field": "hash.ja4ts", "ignore_missing" : true, "if": "ctx.message2?.ja4ts != null && ctx.message2.ja4ts.length() > 0" }},
|
||||||
{ "script": { "lang": "painless", "source": "ctx.network.bytes = (ctx.client.bytes + ctx.server.bytes)", "ignore_failure": true } },
|
{ "script": { "lang": "painless", "source": "ctx.network.bytes = (ctx.client.bytes + ctx.server.bytes)", "ignore_failure": true } },
|
||||||
{ "set": { "if": "ctx.connection?.state == 'S0'", "field": "connection.state_description", "value": "Connection attempt seen, no reply" } },
|
{ "set": { "if": "ctx.connection?.state == 'S0'", "field": "connection.state_description", "value": "Connection attempt seen, no reply" } },
|
||||||
{ "set": { "if": "ctx.connection?.state == 'S1'", "field": "connection.state_description", "value": "Connection established, not terminated" } },
|
{ "set": { "if": "ctx.connection?.state == 'S1'", "field": "connection.state_description", "value": "Connection established, not terminated" } },
|
||||||
|
|||||||
@@ -1,31 +1,227 @@
|
|||||||
{
|
{
|
||||||
"description" : "zeek.dns",
|
"description": "zeek.dns",
|
||||||
"processors" : [
|
"processors": [
|
||||||
{ "set": { "field": "event.dataset", "value": "dns" } },
|
{
|
||||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
"set": {
|
||||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
"field": "event.dataset",
|
||||||
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
"value": "dns"
|
||||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
}
|
||||||
{ "rename": { "field": "message2.trans_id", "target_field": "dns.id", "ignore_missing": true } },
|
},
|
||||||
{ "rename": { "field": "message2.rtt", "target_field": "event.duration", "ignore_missing": true } },
|
{
|
||||||
{ "rename": { "field": "message2.query", "target_field": "dns.query.name", "ignore_missing": true } },
|
"remove": {
|
||||||
{ "rename": { "field": "message2.qclass", "target_field": "dns.query.class", "ignore_missing": true } },
|
"field": [
|
||||||
{ "rename": { "field": "message2.qclass_name", "target_field": "dns.query.class_name", "ignore_missing": true } },
|
"host"
|
||||||
{ "rename": { "field": "message2.qtype", "target_field": "dns.query.type", "ignore_missing": true } },
|
],
|
||||||
{ "rename": { "field": "message2.qtype_name", "target_field": "dns.query.type_name", "ignore_missing": true } },
|
"ignore_failure": true
|
||||||
{ "rename": { "field": "message2.rcode", "target_field": "dns.response.code", "ignore_missing": true } },
|
}
|
||||||
{ "rename": { "field": "message2.rcode_name", "target_field": "dns.response.code_name", "ignore_missing": true } },
|
},
|
||||||
{ "rename": { "field": "message2.AA", "target_field": "dns.authoritative", "ignore_missing": true } },
|
{
|
||||||
{ "rename": { "field": "message2.TC", "target_field": "dns.truncated", "ignore_missing": true } },
|
"json": {
|
||||||
{ "rename": { "field": "message2.RD", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
"field": "message",
|
||||||
{ "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
"target_field": "message2",
|
||||||
{ "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
|
"ignore_failure": true
|
||||||
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
|
}
|
||||||
{ "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
|
},
|
||||||
{ "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
|
{
|
||||||
{ "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
|
"dot_expander": {
|
||||||
{ "set": { "if": "ctx._index == 'so-zeek'", "field": "_index", "value": "so-zeek_dns", "override": true } },
|
"field": "id.orig_h",
|
||||||
{ "pipeline": { "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
"path": "message2",
|
||||||
{ "pipeline": { "name": "zeek.common" } }
|
"ignore_failure": true
|
||||||
]
|
}
|
||||||
}
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.proto",
|
||||||
|
"target_field": "network.transport",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.trans_id",
|
||||||
|
"target_field": "dns.id",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.rtt",
|
||||||
|
"target_field": "event.duration",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.query",
|
||||||
|
"target_field": "dns.query.name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.qclass",
|
||||||
|
"target_field": "dns.query.class",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.qclass_name",
|
||||||
|
"target_field": "dns.query.class_name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.qtype",
|
||||||
|
"target_field": "dns.query.type",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.qtype_name",
|
||||||
|
"target_field": "dns.query.type_name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.rcode",
|
||||||
|
"target_field": "dns.response.code",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.rcode_name",
|
||||||
|
"target_field": "dns.response.code_name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.AA",
|
||||||
|
"target_field": "dns.authoritative",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.TC",
|
||||||
|
"target_field": "dns.truncated",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.RD",
|
||||||
|
"target_field": "dns.recursion.desired",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.RA",
|
||||||
|
"target_field": "dns.recursion.available",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.Z",
|
||||||
|
"target_field": "dns.reserved",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.answers",
|
||||||
|
"target_field": "dns.answers.name",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"foreach": {
|
||||||
|
"field": "dns.answers.name",
|
||||||
|
"processor": {
|
||||||
|
"pipeline": {
|
||||||
|
"name": "common.ip_validation"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"foreach": {
|
||||||
|
"field": "temp._valid_ips",
|
||||||
|
"processor": {
|
||||||
|
"append": {
|
||||||
|
"field": "dns.resolved_ip",
|
||||||
|
"allow_duplicates": false,
|
||||||
|
"value": "{{{_ingest._value}}}",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"remove": {
|
||||||
|
"field": [
|
||||||
|
"temp"
|
||||||
|
],
|
||||||
|
"ignore_missing": true,
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.TTLs",
|
||||||
|
"target_field": "dns.ttls",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"rename": {
|
||||||
|
"field": "message2.rejected",
|
||||||
|
"target_field": "dns.query.rejected",
|
||||||
|
"ignore_missing": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"script": {
|
||||||
|
"lang": "painless",
|
||||||
|
"source": "ctx.dns.query.length = ctx.dns.query.name.length()",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"set": {
|
||||||
|
"if": "ctx._index == 'so-zeek'",
|
||||||
|
"field": "_index",
|
||||||
|
"value": "so-zeek_dns",
|
||||||
|
"override": true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pipeline": {
|
||||||
|
"if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')",
|
||||||
|
"name": "dns.tld"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pipeline": {
|
||||||
|
"name": "zeek.common"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
{
|
|
||||||
"description" : "zeek.dpd",
|
|
||||||
"processors" : [
|
|
||||||
{ "set": { "field": "event.dataset", "value": "dpd" } },
|
|
||||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
|
||||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
|
||||||
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
|
||||||
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
|
|
||||||
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
|
|
||||||
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
|
|
||||||
{ "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } },
|
|
||||||
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
|
|
||||||
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
|
|
||||||
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.analyzer", "target_field": "observer.analyzer", "ignore_missing": true } },
|
|
||||||
{ "rename": { "field": "message2.failure_reason", "target_field": "error.reason", "ignore_missing": true } },
|
|
||||||
{ "pipeline": { "name": "zeek.common" } }
|
|
||||||
]
|
|
||||||
}
|
|
||||||
@@ -27,6 +27,7 @@
|
|||||||
{ "rename": { "field": "message2.resp_fuids", "target_field": "log.id.resp_fuids", "ignore_missing": true } },
|
{ "rename": { "field": "message2.resp_fuids", "target_field": "log.id.resp_fuids", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } },
|
{ "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
{ "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "message2.ja4h", "target_field": "hash.ja4h", "ignore_missing": true, "if": "ctx?.message2?.ja4h != null && ctx.message2.ja4h.length() > 0" } },
|
||||||
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
|
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
|
||||||
{ "script": { "lang": "painless", "source": "ctx.useragent_length = ctx.useragent.length()", "ignore_failure": true } },
|
{ "script": { "lang": "painless", "source": "ctx.useragent_length = ctx.useragent.length()", "ignore_failure": true } },
|
||||||
{ "script": { "lang": "painless", "source": "ctx.virtual_host_length = ctx.virtual_host.length()", "ignore_failure": true } },
|
{ "script": { "lang": "painless", "source": "ctx.virtual_host_length = ctx.virtual_host.length()", "ignore_failure": true } },
|
||||||
|
|||||||
@@ -27,6 +27,7 @@
|
|||||||
{ "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } },
|
{ "rename": { "field": "message2.resp_filenames", "target_field": "file.resp_filenames", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
{ "rename": { "field": "message2.resp_mime_types", "target_field": "file.resp_mime_types", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.stream_id", "target_field": "http2.stream_id", "ignore_missing": true } },
|
{ "rename": { "field": "message2.stream_id", "target_field": "http2.stream_id", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "message2.ja4h", "target_field": "hash.ja4h", "ignore_missing": true, "if": "ctx?.message2?.ja4h != null && ctx.message2.ja4h.length() > 0" } },
|
||||||
{ "remove": { "field": "message2.tags", "ignore_failure": true } },
|
{ "remove": { "field": "message2.tags", "ignore_failure": true } },
|
||||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||||
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
|
{ "script": { "lang": "painless", "source": "ctx.uri_length = ctx.uri.length()", "ignore_failure": true } },
|
||||||
|
|||||||
10
salt/elasticsearch/files/ingest/zeek.ja4ssh
Normal file
10
salt/elasticsearch/files/ingest/zeek.ja4ssh
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
{
|
||||||
|
"description": "zeek.ja4ssh",
|
||||||
|
"processors": [
|
||||||
|
{"set": {"field": "event.dataset","value": "ja4ssh"}},
|
||||||
|
{"remove": {"field": "host","ignore_missing": true,"ignore_failure": true}},
|
||||||
|
{"json": {"field": "message","target_field": "message2","ignore_failure": true}},
|
||||||
|
{"rename": {"field": "message2.ja4ssh", "target_field": "hash.ja4ssh", "ignore_missing": true, "if": "ctx?.message2?.ja4ssh != null && ctx.message2.ja4ssh.length() > 0" }},
|
||||||
|
{"pipeline": {"name": "zeek.common"}}
|
||||||
|
]
|
||||||
|
}
|
||||||
@@ -23,6 +23,8 @@
|
|||||||
{ "rename": { "field": "message2.validation_status","target_field": "ssl.validation_status", "ignore_missing": true } },
|
{ "rename": { "field": "message2.validation_status","target_field": "ssl.validation_status", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.ja3", "target_field": "hash.ja3", "ignore_missing": true } },
|
{ "rename": { "field": "message2.ja3", "target_field": "hash.ja3", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.ja3s", "target_field": "hash.ja3s", "ignore_missing": true } },
|
{ "rename": { "field": "message2.ja3s", "target_field": "hash.ja3s", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "message2.ja4", "target_field": "hash.ja4", "ignore_missing": true, "if": "ctx?.message2?.ja4 != null && ctx.message2.ja4.length() > 0" } },
|
||||||
|
{ "rename": { "field": "message2.ja4s", "target_field": "hash.ja4s", "ignore_missing": true, "if": "ctx?.message2?.ja4s != null && ctx.message2.ja4s.length() > 0" } },
|
||||||
{ "foreach":
|
{ "foreach":
|
||||||
{
|
{
|
||||||
"if": "ctx?.tls?.client?.hash?.sha256 !=null",
|
"if": "ctx?.tls?.client?.hash?.sha256 !=null",
|
||||||
|
|||||||
@@ -42,6 +42,7 @@
|
|||||||
{ "dot_expander": { "field": "basic_constraints.path_length", "path": "message2", "ignore_failure": true } },
|
{ "dot_expander": { "field": "basic_constraints.path_length", "path": "message2", "ignore_failure": true } },
|
||||||
{ "rename": { "field": "message2.basic_constraints.path_length", "target_field": "x509.basic_constraints.path_length", "ignore_missing": true } },
|
{ "rename": { "field": "message2.basic_constraints.path_length", "target_field": "x509.basic_constraints.path_length", "ignore_missing": true } },
|
||||||
{ "rename": { "field": "message2.fingerprint", "target_field": "hash.sha256", "ignore_missing": true } },
|
{ "rename": { "field": "message2.fingerprint", "target_field": "hash.sha256", "ignore_missing": true } },
|
||||||
|
{ "rename": { "field": "message2.ja4x", "target_field": "hash.ja4x", "ignore_missing": true, "if": "ctx?.message2?.ja4x != null && ctx.message2.ja4x.length() > 0" } },
|
||||||
{ "pipeline": { "name": "zeek.common_ssl" } }
|
{ "pipeline": { "name": "zeek.common_ssl" } }
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,8 +20,28 @@ appender.rolling.strategy.type = DefaultRolloverStrategy
|
|||||||
appender.rolling.strategy.action.type = Delete
|
appender.rolling.strategy.action.type = Delete
|
||||||
appender.rolling.strategy.action.basepath = /var/log/elasticsearch
|
appender.rolling.strategy.action.basepath = /var/log/elasticsearch
|
||||||
appender.rolling.strategy.action.condition.type = IfFileName
|
appender.rolling.strategy.action.condition.type = IfFileName
|
||||||
appender.rolling.strategy.action.condition.glob = *.gz
|
appender.rolling.strategy.action.condition.glob = *.log.gz
|
||||||
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
|
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
|
||||||
appender.rolling.strategy.action.condition.nested_condition.age = 7D
|
appender.rolling.strategy.action.condition.nested_condition.age = 7D
|
||||||
|
|
||||||
|
appender.rolling_json.type = RollingFile
|
||||||
|
appender.rolling_json.name = rolling_json
|
||||||
|
appender.rolling_json.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.json
|
||||||
|
appender.rolling_json.layout.type = ECSJsonLayout
|
||||||
|
appender.rolling_json.layout.dataset = elasticsearch.server
|
||||||
|
appender.rolling_json.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.json.gz
|
||||||
|
appender.rolling_json.policies.type = Policies
|
||||||
|
appender.rolling_json.policies.time.type = TimeBasedTriggeringPolicy
|
||||||
|
appender.rolling_json.policies.time.interval = 1
|
||||||
|
appender.rolling_json.policies.time.modulate = true
|
||||||
|
appender.rolling_json.strategy.type = DefaultRolloverStrategy
|
||||||
|
appender.rolling_json.strategy.action.type = Delete
|
||||||
|
appender.rolling_json.strategy.action.basepath = /var/log/elasticsearch
|
||||||
|
appender.rolling_json.strategy.action.condition.type = IfFileName
|
||||||
|
appender.rolling_json.strategy.action.condition.glob = *.json.gz
|
||||||
|
appender.rolling_json.strategy.action.condition.nested_condition.type = IfLastModified
|
||||||
|
appender.rolling_json.strategy.action.condition.nested_condition.age = 1D
|
||||||
|
|
||||||
rootLogger.level = info
|
rootLogger.level = info
|
||||||
rootLogger.appenderRef.rolling.ref = rolling
|
rootLogger.appenderRef.rolling.ref = rolling
|
||||||
|
rootLogger.appenderRef.rolling_json.ref = rolling_json
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ elasticsearch:
|
|||||||
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
|
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
index_clean:
|
index_clean:
|
||||||
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings.
|
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings. This setting only applies to EVAL, STANDALONE, and HEAVY NODE installations. Other installations can only use ILM settings.
|
||||||
forcedType: bool
|
forcedType: bool
|
||||||
helpLink: elasticsearch.html
|
helpLink: elasticsearch.html
|
||||||
retention:
|
retention:
|
||||||
@@ -392,6 +392,7 @@ elasticsearch:
|
|||||||
so-logs-elastic_agent_x_metricbeat: *indexSettings
|
so-logs-elastic_agent_x_metricbeat: *indexSettings
|
||||||
so-logs-elastic_agent_x_osquerybeat: *indexSettings
|
so-logs-elastic_agent_x_osquerybeat: *indexSettings
|
||||||
so-logs-elastic_agent_x_packetbeat: *indexSettings
|
so-logs-elastic_agent_x_packetbeat: *indexSettings
|
||||||
|
so-logs-elasticsearch_x_server: *indexSettings
|
||||||
so-metrics-endpoint_x_metadata: *indexSettings
|
so-metrics-endpoint_x_metadata: *indexSettings
|
||||||
so-metrics-endpoint_x_metrics: *indexSettings
|
so-metrics-endpoint_x_metrics: *indexSettings
|
||||||
so-metrics-endpoint_x_policy: *indexSettings
|
so-metrics-endpoint_x_policy: *indexSettings
|
||||||
|
|||||||
@@ -15,7 +15,7 @@
|
|||||||
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
|
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
|
||||||
|
|
||||||
{# start generation of integration default index_settings #}
|
{# start generation of integration default index_settings #}
|
||||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') %}
|
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') and salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %}
|
||||||
{% set check_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %}
|
{% set check_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %}
|
||||||
{% if check_package_components.size > 1 %}
|
{% if check_package_components.size > 1 %}
|
||||||
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
|
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
|
||||||
|
|||||||
69
salt/elasticsearch/templates/component/ecs/hash.json
Normal file
69
salt/elasticsearch/templates/component/ecs/hash.json
Normal file
@@ -0,0 +1,69 @@
|
|||||||
|
{
|
||||||
|
"template": {
|
||||||
|
"mappings": {
|
||||||
|
"properties": {
|
||||||
|
"hash": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"ja3": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"ja3s": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"hassh": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"md5": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"sha1": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"sha256": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"ja4": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"ja4l": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"ja4ls": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"ja4t": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"ja4ts": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"ja4ssh": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"ja4h": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
},
|
||||||
|
"ja4x": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -841,6 +841,10 @@
|
|||||||
"type": "long"
|
"type": "long"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"capture_file": {
|
||||||
|
"type": "keyword",
|
||||||
|
"ignore_above": 1024
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,43 @@
|
|||||||
|
{
|
||||||
|
"template": {
|
||||||
|
"mappings": {
|
||||||
|
"properties": {
|
||||||
|
"agent": {
|
||||||
|
"type": "object",
|
||||||
|
"properties": {
|
||||||
|
"hostname": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"id": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"last_checkin_status": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"last_checkin": {
|
||||||
|
"type": "date"
|
||||||
|
},
|
||||||
|
"name": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"offline_duration_hours": {
|
||||||
|
"type": "integer"
|
||||||
|
},
|
||||||
|
"policy_id": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"status": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,104 @@
|
|||||||
|
{
|
||||||
|
"template": {
|
||||||
|
"mappings": {
|
||||||
|
"properties": {
|
||||||
|
"@timestamp": {
|
||||||
|
"type": "date"
|
||||||
|
},
|
||||||
|
"so_kind": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"so_operation": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"so_chat": {
|
||||||
|
"properties": {
|
||||||
|
"role": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"content": {
|
||||||
|
"type": "object",
|
||||||
|
"enabled": false
|
||||||
|
},
|
||||||
|
"sessionId": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"createTime": {
|
||||||
|
"type": "date"
|
||||||
|
},
|
||||||
|
"deletedAt": {
|
||||||
|
"type": "date"
|
||||||
|
},
|
||||||
|
"tags": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"tool_use_id": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"userId": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"message": {
|
||||||
|
"properties": {
|
||||||
|
"id": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"type": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"role": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"model": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"contentStr": {
|
||||||
|
"type": "text"
|
||||||
|
},
|
||||||
|
"contentBlocks": {
|
||||||
|
"type": "nested",
|
||||||
|
"enabled": false
|
||||||
|
},
|
||||||
|
"stopReason": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"stopSequence": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"usage": {
|
||||||
|
"properties": {
|
||||||
|
"input_tokens": {
|
||||||
|
"type": "long"
|
||||||
|
},
|
||||||
|
"output_tokens": {
|
||||||
|
"type": "long"
|
||||||
|
},
|
||||||
|
"credits": {
|
||||||
|
"type": "long"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"_meta": {
|
||||||
|
"ecs_version": "1.12.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"template": {},
|
||||||
|
"version": 1,
|
||||||
|
"_meta": {
|
||||||
|
"description": "default settings for common Security Onion Assistant indices"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,44 @@
|
|||||||
|
{
|
||||||
|
"template": {
|
||||||
|
"mappings": {
|
||||||
|
"properties": {
|
||||||
|
"@timestamp": {
|
||||||
|
"type": "date"
|
||||||
|
},
|
||||||
|
"so_kind": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"so_session": {
|
||||||
|
"properties": {
|
||||||
|
"title": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"sessionId": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"createTime": {
|
||||||
|
"type": "date"
|
||||||
|
},
|
||||||
|
"deleteTime": {
|
||||||
|
"type": "date"
|
||||||
|
},
|
||||||
|
"tags": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
},
|
||||||
|
"userId": {
|
||||||
|
"ignore_above": 1024,
|
||||||
|
"type": "keyword"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"_meta": {
|
||||||
|
"ecs_version": "1.12.2"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
{
|
||||||
|
"template": {},
|
||||||
|
"version": 1,
|
||||||
|
"_meta": {
|
||||||
|
"description": "default settings for common Security Onion Assistant indices"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
/bin/bash
|
#!/bin/bash
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
@@ -6,6 +6,6 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
|
||||||
echo "Starting ILM..."
|
echo "Starting ILM..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://localhost:9200/_ilm/start
|
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://localhost:9200/_ilm/start
|
||||||
|
echo
|
||||||
|
|||||||
@@ -8,3 +8,4 @@
|
|||||||
|
|
||||||
echo "Stopping ILM..."
|
echo "Stopping ILM..."
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://localhost:9200/_ilm/stop
|
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L -X POST https://localhost:9200/_ilm/stop
|
||||||
|
echo
|
||||||
|
|||||||
113
salt/elasticsearch/tools/sbin/so-elasticsearch-indices-growth
Normal file
113
salt/elasticsearch/tools/sbin/so-elasticsearch-indices-growth
Normal file
@@ -0,0 +1,113 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
INFLUX_URL="https://localhost:8086/api/v2"
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
request() {
|
||||||
|
curl -skK /opt/so/conf/influxdb/curl.config "$INFLUX_URL/$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
lookup_org_id() {
|
||||||
|
response=$(request orgs?org=Security+Onion)
|
||||||
|
echo "$response" | jq -r ".orgs[] | select(.name == \"Security Onion\").id"
|
||||||
|
}
|
||||||
|
|
||||||
|
ORG_ID=$(lookup_org_id)
|
||||||
|
|
||||||
|
run_flux_query() {
|
||||||
|
local query=$1
|
||||||
|
request "query?org=$ORG_ID" -H 'Accept:application/csv' -H 'Content-type:application/vnd.flux' -d "$query" -XPOST 2>/dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
read_csv_result() {
|
||||||
|
local result="$1"
|
||||||
|
echo "$result" | grep '^,_result,' | head -1 | awk -F',' '{print $NF}' | tr -d '\r\n\t '
|
||||||
|
}
|
||||||
|
|
||||||
|
bytes_to_gb() {
|
||||||
|
local bytes="${1:-0}"
|
||||||
|
if [[ "$bytes" =~ ^-?[0-9]+$ ]]; then
|
||||||
|
echo "$bytes" | awk '{printf "%.2f", $1 / 1024 / 1024 / 1024}'
|
||||||
|
else
|
||||||
|
echo "0.00"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
indexes_query='from(bucket: "telegraf/so_long_term")
|
||||||
|
|> range(start: -7d)
|
||||||
|
|> filter(fn: (r) => r._measurement == "elasticsearch_index_size")
|
||||||
|
|> distinct(column: "_field")
|
||||||
|
|> keep(columns: ["_field"])'
|
||||||
|
|
||||||
|
indexes_result=$(run_flux_query "$indexes_query")
|
||||||
|
indexes=$(echo "$indexes_result" | tail -n +2 | cut -d',' -f4 | grep -v '^$' | grep -v '^_field$' | sed 's/\r$//' | sort -u)
|
||||||
|
|
||||||
|
printf "%-50s %15s %15s %15s\n" "Index Name" "Last 24hr (GB)" "Last 7d (GB)" "Last 30d (GB)"
|
||||||
|
printf "%-50s %15s %15s %15s\n" "$(printf '%.0s-' {1..50})" "$(printf '%.0s-' {1..15})" "$(printf '%.0s-' {1..15})" "$(printf '%.0s-' {1..15})"
|
||||||
|
|
||||||
|
for index in $indexes; do
|
||||||
|
[[ -z "$index" ]] && continue
|
||||||
|
current_query="from(bucket: \"telegraf/so_long_term\")
|
||||||
|
|> range(start: -4h)
|
||||||
|
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|
||||||
|
|> last()
|
||||||
|
|> keep(columns: [\"_value\"])"
|
||||||
|
current_result=$(run_flux_query "$current_query")
|
||||||
|
current_size=$(read_csv_result "$current_result")
|
||||||
|
current_size=${current_size:-0}
|
||||||
|
|
||||||
|
size_24h_query="from(bucket: \"telegraf/so_long_term\")
|
||||||
|
|> range(start: -25h, stop: -23h)
|
||||||
|
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|
||||||
|
|> last()
|
||||||
|
|> keep(columns: [\"_value\"])"
|
||||||
|
size_24h_result=$(run_flux_query "$size_24h_query")
|
||||||
|
size_24h_ago=$(read_csv_result "$size_24h_result")
|
||||||
|
size_24h_ago=${size_24h_ago:-$current_size}
|
||||||
|
|
||||||
|
size_7d_query="from(bucket: \"telegraf/so_long_term\")
|
||||||
|
|> range(start: -7d8h, stop: -7d)
|
||||||
|
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|
||||||
|
|> last()
|
||||||
|
|> keep(columns: [\"_value\"])"
|
||||||
|
size_7d_result=$(run_flux_query "$size_7d_query")
|
||||||
|
size_7d_ago=$(read_csv_result "$size_7d_result")
|
||||||
|
size_7d_ago=${size_7d_ago:-$current_size}
|
||||||
|
|
||||||
|
size_30d_query="from(bucket: \"telegraf/so_long_term\")
|
||||||
|
|> range(start: -30d8h, stop: -30d)
|
||||||
|
|> filter(fn: (r) => r._measurement == \"elasticsearch_index_size\" and r._field == \"$index\")
|
||||||
|
|> last()
|
||||||
|
|> keep(columns: [\"_value\"])"
|
||||||
|
size_30d_result=$(run_flux_query "$size_30d_query")
|
||||||
|
size_30d_ago=$(read_csv_result "$size_30d_result")
|
||||||
|
size_30d_ago=${size_30d_ago:-$current_size}
|
||||||
|
|
||||||
|
# if an index was recently cleaned up by ilm it will result in a negative number for 'index growth'.
|
||||||
|
growth_24h=$(( current_size > size_24h_ago ? current_size - size_24h_ago : 0 ))
|
||||||
|
|
||||||
|
growth_7d=$(( current_size > size_7d_ago ? current_size - size_7d_ago : 0 ))
|
||||||
|
|
||||||
|
growth_30d=$(( current_size > size_30d_ago ? current_size - size_30d_ago : 0 ))
|
||||||
|
|
||||||
|
growth_24h_gb=$(bytes_to_gb "$growth_24h")
|
||||||
|
growth_7d_gb=$(bytes_to_gb "$growth_7d")
|
||||||
|
growth_30d_gb=$(bytes_to_gb "$growth_30d")
|
||||||
|
|
||||||
|
# Only results for indices with atleast 1 metric above 0.00
|
||||||
|
if [[ "$growth_24h_gb" != "0.00" ]] || [[ "$growth_7d_gb" != "0.00" ]] || [[ "$growth_30d_gb" != "0.00" ]]; then
|
||||||
|
printf "%020.2f|%-50s %15s %15s %15s\n" \
|
||||||
|
"$growth_24h" \
|
||||||
|
"$index" \
|
||||||
|
"$growth_24h_gb" \
|
||||||
|
"$growth_7d_gb" \
|
||||||
|
"$growth_30d_gb"
|
||||||
|
fi
|
||||||
|
done | sort -t'|' -k1,1nr | cut -d'|' -f2-
|
||||||
|
|
||||||
1164
salt/elasticsearch/tools/sbin/so-elasticsearch-retention-estimate
Executable file
1164
salt/elasticsearch/tools/sbin/so-elasticsearch-retention-estimate
Executable file
File diff suppressed because it is too large
Load Diff
@@ -21,7 +21,7 @@ while [[ "$COUNT" -le 240 ]]; do
|
|||||||
ELASTICSEARCH_CONNECTED="yes"
|
ELASTICSEARCH_CONNECTED="yes"
|
||||||
echo "connected!"
|
echo "connected!"
|
||||||
# Check cluster health once connected
|
# Check cluster health once connected
|
||||||
so-elasticsearch-query _cluster/health?wait_for_status=yellow > /dev/null 2>&1
|
so-elasticsearch-query _cluster/health?wait_for_status=yellow\&timeout=120s > /dev/null 2>&1
|
||||||
break
|
break
|
||||||
else
|
else
|
||||||
((COUNT+=1))
|
((COUNT+=1))
|
||||||
|
|||||||
195
salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot
Normal file
195
salt/elasticsearch/tools/sbin/so-elasticsearch-troubleshoot
Normal file
@@ -0,0 +1,195 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
RED='\033[0;31m'
|
||||||
|
GREEN='\033[0;32m'
|
||||||
|
YELLOW='\033[1;33m'
|
||||||
|
BLUE='\033[0;34m'
|
||||||
|
BOLD='\033[1;37m'
|
||||||
|
NC='\033[0m'
|
||||||
|
|
||||||
|
log_title() {
|
||||||
|
if [ $1 == "LOG" ]; then
|
||||||
|
echo -e "\n${BOLD}================ $2 ================${NC}\n"
|
||||||
|
elif [ $1 == "OK" ]; then
|
||||||
|
echo -e "${GREEN} $2 ${NC}"
|
||||||
|
elif [ $1 == "WARN" ]; then
|
||||||
|
echo -e "${YELLOW} $2 ${NC}"
|
||||||
|
elif [ $1 == "ERROR" ]; then
|
||||||
|
echo -e "${RED} $2 ${NC}"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
health_report() {
|
||||||
|
if ! health_report_output=$(so-elasticsearch-query _health_report?format=json --fail 2>/dev/null); then
|
||||||
|
log_title "ERROR" "Failed to retrieve health report from Elasticsearch"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
non_green_count=$(echo "$health_report_output" | jq '[.indicators | to_entries[] | select(.value.status != "green")] | length')
|
||||||
|
|
||||||
|
if [ "$non_green_count" -gt 0 ]; then
|
||||||
|
echo "$health_report_output" | jq -r '.indicators | to_entries[] | select(.value.status != "green") | .key' | while read -r indicator_name; do
|
||||||
|
indicator=$(echo "$health_report_output" | jq -r ".indicators.\"$indicator_name\"")
|
||||||
|
status=$(echo "$indicator" | jq -r '.status')
|
||||||
|
symptom=$(echo "$indicator" | jq -r '.symptom // "No symptom available"')
|
||||||
|
|
||||||
|
# reormat indicator name
|
||||||
|
display_name=$(echo "$indicator_name" | tr '_' ' ' | sed 's/\b\(.\)/\u\1/g')
|
||||||
|
|
||||||
|
if [ "$status" = "yellow" ]; then
|
||||||
|
log_title "WARN" "$display_name: $symptom"
|
||||||
|
else
|
||||||
|
log_title "ERROR" "$display_name: $symptom"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# diagnosis if available
|
||||||
|
echo "$indicator" | jq -c '.diagnosis[]? // empty' | while read -r diagnosis; do
|
||||||
|
cause=$(echo "$diagnosis" | jq -r '.cause // "Unknown"')
|
||||||
|
action=$(echo "$diagnosis" | jq -r '.action // "No action specified"')
|
||||||
|
|
||||||
|
echo -e " ${BOLD}Cause:${NC} $cause\n"
|
||||||
|
echo -e " ${BOLD}Action:${NC} $action\n"
|
||||||
|
|
||||||
|
# Check for affected indices
|
||||||
|
affected_indices=$(echo "$diagnosis" | jq -r '.affected_resources.indices[]? // empty')
|
||||||
|
if [ -n "$affected_indices" ]; then
|
||||||
|
echo -e " ${BOLD}Affected indices:${NC}"
|
||||||
|
total_indices=$(echo "$affected_indices" | wc -l)
|
||||||
|
echo "$affected_indices" | head -10 | while read -r index; do
|
||||||
|
echo " - $index"
|
||||||
|
done
|
||||||
|
if [ "$total_indices" -gt 10 ]; then
|
||||||
|
remaining=$((total_indices - 10))
|
||||||
|
echo " ... and $remaining more indices (truncated for readability)"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
echo
|
||||||
|
done
|
||||||
|
done
|
||||||
|
else
|
||||||
|
log_title "OK" "All health indicators are green"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
elasticsearch_status() {
|
||||||
|
log_title "LOG" "Elasticsearch Status"
|
||||||
|
if so-elasticsearch-query / --fail --output /dev/null; then
|
||||||
|
health_report
|
||||||
|
else
|
||||||
|
log_title "ERROR" "Elasticsearch API is not accessible"
|
||||||
|
so-status
|
||||||
|
log_title "ERROR" "Make sure Elasticsearch is running. Addtionally, check for startup errors in /opt/so/log/elasticsearch/securityonion.log${NC}\n"
|
||||||
|
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
indices_by_age() {
|
||||||
|
log_title "LOG" "Indices by Creation Date - Size > 1KB"
|
||||||
|
log_title "WARN" "Since high/flood watermark has been reached consider updating ILM policies.\n"
|
||||||
|
if ! indices_output=$(so-elasticsearch-query '_cat/indices?v&s=creation.date:asc&h=creation.date.string,index,status,health,docs.count,pri.store.size&bytes=b&format=json' --fail 2>/dev/null); then
|
||||||
|
log_title "ERROR" "Failed to retrieve indices list from Elasticsearch"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Filter for indices with size > 1KB (1024 bytes) and format output
|
||||||
|
echo -e "${BOLD}Creation Date Name Size${NC}"
|
||||||
|
echo -e "${BOLD}--------------------------------------------------------------------------------------------------------------${NC}"
|
||||||
|
|
||||||
|
# Create list of indices excluding .internal, so-detection*, so-case*
|
||||||
|
echo "$indices_output" | jq -r '.[] | select((."pri.store.size" | tonumber) > 1024) | select(.index | (startswith(".internal") or startswith("so-detection") or startswith("so-case")) | not ) | "\(."creation.date.string") | \(.index) | \(."pri.store.size")"' | while IFS='|' read -r creation_date index_name size_bytes; do
|
||||||
|
# Convert bytes to GB / MB
|
||||||
|
if [ "$size_bytes" -gt 1073741824 ]; then
|
||||||
|
size_human=$(echo "scale=2; $size_bytes / 1073741824" | bc)GB
|
||||||
|
else
|
||||||
|
size_human=$(echo "scale=2; $size_bytes / 1048576" | bc)MB
|
||||||
|
fi
|
||||||
|
|
||||||
|
creation_date=$(date -d "$creation_date" '+%Y-%m-%dT%H:%MZ' )
|
||||||
|
|
||||||
|
# Format output with spacing
|
||||||
|
printf "%-19s %-76s %10s\n" "$creation_date" "$index_name" "$size_human"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
watermark_settings() {
|
||||||
|
watermark_path=".defaults.cluster.routing.allocation.disk.watermark"
|
||||||
|
if ! watermark_output=$(so-elasticsearch-query _cluster/settings?include_defaults=true\&filter_path=*.cluster.routing.allocation.disk.* --fail 2>/dev/null); then
|
||||||
|
log_title "ERROR" "Failed to retrieve watermark settings from Elasticsearch"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! disk_allocation_output=$(so-elasticsearch-query _cat/nodes?v\&h=name,ip,disk.used_percent,disk.avail,disk.total,node.role\&format=json --fail 2>/dev/null); then
|
||||||
|
log_title "ERROR" "Failed to retrieve disk allocation data from Elasticsearch"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
flood=$(echo $watermark_output | jq -r "$watermark_path.flood_stage" )
|
||||||
|
high=$(echo $watermark_output | jq -r "$watermark_path.high" )
|
||||||
|
low=$(echo $watermark_output | jq -r "$watermark_path.low" )
|
||||||
|
|
||||||
|
# Strip percentage signs for comparison
|
||||||
|
flood_num=${flood%\%}
|
||||||
|
high_num=${high%\%}
|
||||||
|
low_num=${low%\%}
|
||||||
|
|
||||||
|
# Check each nodes disk usage
|
||||||
|
log_title "LOG" "Disk Usage Check"
|
||||||
|
echo -e "${BOLD}LOW:${GREEN}$low${NC}${BOLD} HIGH:${YELLOW}${high}${NC}${BOLD} FLOOD:${RED}${flood}${NC}\n"
|
||||||
|
|
||||||
|
# Only show data nodes (d=data, h=hot, w=warm, c=cold, f=frozen, s=content)
|
||||||
|
echo "$disk_allocation_output" | jq -r '.[] | select(.["node.role"] | test("[dhwcfs]")) | "\(.name)|\(.["disk.used_percent"])"' | while IFS='|' read -r node_name disk_used; do
|
||||||
|
disk_used_num=$(echo $disk_used | bc)
|
||||||
|
|
||||||
|
if (( $(echo "$disk_used_num >= $flood_num" | bc -l) )); then
|
||||||
|
log_title "ERROR" "$node_name is at or above the flood watermark ($flood)! Disk usage: ${disk_used}%"
|
||||||
|
touch /tmp/watermark_reached
|
||||||
|
elif (( $(echo "$disk_used_num >= $high_num" | bc -l) )); then
|
||||||
|
log_title "ERROR" "$node_name is at or above the high watermark ($high)! Disk usage: ${disk_used}%"
|
||||||
|
touch /tmp/watermark_reached
|
||||||
|
else
|
||||||
|
log_title "OK" "$node_name disk usage: ${disk_used}%"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# Check if we need to show indices by age
|
||||||
|
if [ -f /tmp/watermark_reached ]; then
|
||||||
|
indices_by_age
|
||||||
|
rm -f /tmp/watermark_reached
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
unassigned_shards() {
|
||||||
|
|
||||||
|
if ! unassigned_shards_output=$(so-elasticsearch-query _cat/shards?v\&h=index,shard,prirep,state,unassigned.reason,unassigned.details\&s=state\&format=json --fail 2>/dev/null); then
|
||||||
|
log_title "ERROR" "Failed to retrieve shard data from Elasticsearch"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
log_title "LOG" "Unassigned Shards Check"
|
||||||
|
# Check if there are any UNASSIGNED shards
|
||||||
|
unassigned_count=$(echo "$unassigned_shards_output" | jq '[.[] | select(.state == "UNASSIGNED")] | length')
|
||||||
|
|
||||||
|
if [ "$unassigned_count" -gt 0 ]; then
|
||||||
|
echo "$unassigned_shards_output" | jq -r '.[] | select(.state == "UNASSIGNED") | "\(.index)|\(.shard)|\(.prirep)|\(."unassigned.reason")"' | while IFS='|' read -r index shard prirep reason; do
|
||||||
|
if [ "$prirep" = "r" ]; then
|
||||||
|
log_title "WARN" "Replica shard for index $index is unassigned. Reason: $reason"
|
||||||
|
elif [ "$prirep" = "p" ]; then
|
||||||
|
log_title "ERROR" "Primary shard for index $index is unassigned. Reason: $reason"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
log_title "OK" "All shards are assigned"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
main() {
|
||||||
|
elasticsearch_status
|
||||||
|
watermark_settings
|
||||||
|
unassigned_shards
|
||||||
|
}
|
||||||
|
|
||||||
|
main
|
||||||
@@ -136,7 +136,7 @@ if [ ! -f $STATE_FILE_SUCCESS ]; then
|
|||||||
TEMPLATE=${i::-14}
|
TEMPLATE=${i::-14}
|
||||||
COMPONENT_PATTERN=${TEMPLATE:3}
|
COMPONENT_PATTERN=${TEMPLATE:3}
|
||||||
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
|
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
|
||||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ logs-http_endpoint\.generic|logs-winlog\.winlog ]]; then
|
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ \.generic|logs-winlog\.winlog ]]; then
|
||||||
load_failures=$((load_failures+1))
|
load_failures=$((load_failures+1))
|
||||||
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
|
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
|
||||||
else
|
else
|
||||||
|
|||||||
@@ -21,7 +21,7 @@
|
|||||||
'so-strelka-filestream'
|
'so-strelka-filestream'
|
||||||
] %}
|
] %}
|
||||||
|
|
||||||
{% elif GLOBALS.role == 'so-manager' or GLOBALS.role == 'so-standalone' or GLOBALS.role == 'so-managersearch' %}
|
{% elif GLOBALS.role in ['so-manager', 'so-standalone','so-managersearch', 'so-managerhype'] %}
|
||||||
{% set NODE_CONTAINERS = [
|
{% set NODE_CONTAINERS = [
|
||||||
'so-dockerregistry',
|
'so-dockerregistry',
|
||||||
'so-elasticsearch',
|
'so-elasticsearch',
|
||||||
|
|||||||
@@ -14,11 +14,13 @@ firewall:
|
|||||||
external_kafka: []
|
external_kafka: []
|
||||||
fleet: []
|
fleet: []
|
||||||
heavynode: []
|
heavynode: []
|
||||||
|
hypervisor: []
|
||||||
idh: []
|
idh: []
|
||||||
import: []
|
import: []
|
||||||
localhost:
|
localhost:
|
||||||
- 127.0.0.1
|
- 127.0.0.1
|
||||||
manager: []
|
manager: []
|
||||||
|
managerhype: []
|
||||||
managersearch: []
|
managersearch: []
|
||||||
receiver: []
|
receiver: []
|
||||||
searchnode: []
|
searchnode: []
|
||||||
@@ -489,6 +491,15 @@ firewall:
|
|||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
customhostgroup0:
|
customhostgroup0:
|
||||||
portgroups: []
|
portgroups: []
|
||||||
customhostgroup1:
|
customhostgroup1:
|
||||||
@@ -541,6 +552,218 @@ firewall:
|
|||||||
desktop:
|
desktop:
|
||||||
portgroups:
|
portgroups:
|
||||||
- salt_manager
|
- salt_manager
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
self:
|
||||||
|
portgroups:
|
||||||
|
- syslog
|
||||||
|
syslog:
|
||||||
|
portgroups:
|
||||||
|
- syslog
|
||||||
|
customhostgroup0:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup1:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup2:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup3:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup4:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup5:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup6:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup7:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup8:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup9:
|
||||||
|
portgroups: []
|
||||||
|
managerhype:
|
||||||
|
chain:
|
||||||
|
DOCKER-USER:
|
||||||
|
hostgroups:
|
||||||
|
managerhype:
|
||||||
|
portgroups:
|
||||||
|
- kibana
|
||||||
|
- redis
|
||||||
|
- influxdb
|
||||||
|
- elasticsearch_rest
|
||||||
|
- elasticsearch_node
|
||||||
|
- docker_registry
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- localrules
|
||||||
|
- sensoroni
|
||||||
|
fleet:
|
||||||
|
portgroups:
|
||||||
|
- elasticsearch_rest
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- sensoroni
|
||||||
|
- yum
|
||||||
|
- beats_5044
|
||||||
|
- beats_5644
|
||||||
|
- beats_5056
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
idh:
|
||||||
|
portgroups:
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- sensoroni
|
||||||
|
- yum
|
||||||
|
- beats_5044
|
||||||
|
- beats_5644
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
sensor:
|
||||||
|
portgroups:
|
||||||
|
- beats_5044
|
||||||
|
- beats_5644
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- sensoroni
|
||||||
|
searchnode:
|
||||||
|
portgroups:
|
||||||
|
- redis
|
||||||
|
- elasticsearch_rest
|
||||||
|
- elasticsearch_node
|
||||||
|
- beats_5644
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
|
heavynode:
|
||||||
|
portgroups:
|
||||||
|
- redis
|
||||||
|
- elasticsearch_rest
|
||||||
|
- elasticsearch_node
|
||||||
|
- beats_5644
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
|
receiver:
|
||||||
|
portgroups:
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
|
analyst:
|
||||||
|
portgroups:
|
||||||
|
- nginx
|
||||||
|
beats_endpoint:
|
||||||
|
portgroups:
|
||||||
|
- beats_5044
|
||||||
|
beats_endpoint_ssl:
|
||||||
|
portgroups:
|
||||||
|
- beats_5644
|
||||||
|
elasticsearch_rest:
|
||||||
|
portgroups:
|
||||||
|
- elasticsearch_rest
|
||||||
|
elastic_agent_endpoint:
|
||||||
|
portgroups:
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
endgame:
|
||||||
|
portgroups:
|
||||||
|
- endgame
|
||||||
|
external_suricata:
|
||||||
|
portgroups:
|
||||||
|
- external_suricata
|
||||||
|
desktop:
|
||||||
|
portgroups:
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- sensoroni
|
||||||
|
- yum
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
|
customhostgroup0:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup1:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup2:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup3:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup4:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup5:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup6:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup7:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup8:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup9:
|
||||||
|
portgroups: []
|
||||||
|
INPUT:
|
||||||
|
hostgroups:
|
||||||
|
anywhere:
|
||||||
|
portgroups:
|
||||||
|
- ssh
|
||||||
|
dockernet:
|
||||||
|
portgroups:
|
||||||
|
- all
|
||||||
|
fleet:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
idh:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
localhost:
|
||||||
|
portgroups:
|
||||||
|
- all
|
||||||
|
sensor:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
searchnode:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
heavynode:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
receiver:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
desktop:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
self:
|
self:
|
||||||
portgroups:
|
portgroups:
|
||||||
- syslog
|
- syslog
|
||||||
@@ -686,6 +909,15 @@ firewall:
|
|||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
customhostgroup0:
|
customhostgroup0:
|
||||||
portgroups: []
|
portgroups: []
|
||||||
customhostgroup1:
|
customhostgroup1:
|
||||||
@@ -738,6 +970,9 @@ firewall:
|
|||||||
desktop:
|
desktop:
|
||||||
portgroups:
|
portgroups:
|
||||||
- salt_manager
|
- salt_manager
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
self:
|
self:
|
||||||
portgroups:
|
portgroups:
|
||||||
- syslog
|
- syslog
|
||||||
@@ -890,6 +1125,15 @@ firewall:
|
|||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- yum
|
||||||
|
- docker_registry
|
||||||
|
- influxdb
|
||||||
|
- elastic_agent_control
|
||||||
|
- elastic_agent_data
|
||||||
|
- elastic_agent_update
|
||||||
|
- sensoroni
|
||||||
customhostgroup0:
|
customhostgroup0:
|
||||||
portgroups: []
|
portgroups: []
|
||||||
customhostgroup1:
|
customhostgroup1:
|
||||||
@@ -945,6 +1189,9 @@ firewall:
|
|||||||
desktop:
|
desktop:
|
||||||
portgroups:
|
portgroups:
|
||||||
- salt_manager
|
- salt_manager
|
||||||
|
hypervisor:
|
||||||
|
portgroups:
|
||||||
|
- salt_manager
|
||||||
self:
|
self:
|
||||||
portgroups:
|
portgroups:
|
||||||
- syslog
|
- syslog
|
||||||
@@ -983,6 +1230,10 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
|
managerhype:
|
||||||
|
portgroups:
|
||||||
|
- elasticsearch_node
|
||||||
|
- elasticsearch_rest
|
||||||
standalone:
|
standalone:
|
||||||
portgroups:
|
portgroups:
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
@@ -1130,6 +1381,10 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
|
managerhype:
|
||||||
|
portgroups:
|
||||||
|
- elasticsearch_node
|
||||||
|
- elasticsearch_rest
|
||||||
standalone:
|
standalone:
|
||||||
portgroups:
|
portgroups:
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
@@ -1332,6 +1587,9 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- redis
|
- redis
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
|
managerhype:
|
||||||
|
portgroups:
|
||||||
|
- elastic_agent_data
|
||||||
self:
|
self:
|
||||||
portgroups:
|
portgroups:
|
||||||
- redis
|
- redis
|
||||||
@@ -1449,6 +1707,9 @@ firewall:
|
|||||||
managersearch:
|
managersearch:
|
||||||
portgroups:
|
portgroups:
|
||||||
- openssh
|
- openssh
|
||||||
|
managerhype:
|
||||||
|
portgroups:
|
||||||
|
- openssh
|
||||||
standalone:
|
standalone:
|
||||||
portgroups:
|
portgroups:
|
||||||
- openssh
|
- openssh
|
||||||
@@ -1472,3 +1733,66 @@ firewall:
|
|||||||
portgroups: []
|
portgroups: []
|
||||||
customhostgroup9:
|
customhostgroup9:
|
||||||
portgroups: []
|
portgroups: []
|
||||||
|
hypervisor:
|
||||||
|
chain:
|
||||||
|
DOCKER-USER:
|
||||||
|
hostgroups:
|
||||||
|
customhostgroup0:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup1:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup2:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup3:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup4:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup5:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup6:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup7:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup8:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup9:
|
||||||
|
portgroups: []
|
||||||
|
INPUT:
|
||||||
|
hostgroups:
|
||||||
|
anywhere:
|
||||||
|
portgroups:
|
||||||
|
- ssh
|
||||||
|
dockernet:
|
||||||
|
portgroups:
|
||||||
|
- all
|
||||||
|
localhost:
|
||||||
|
portgroups:
|
||||||
|
- all
|
||||||
|
manager:
|
||||||
|
portgroups: []
|
||||||
|
managersearch:
|
||||||
|
portgroups: []
|
||||||
|
managerhype:
|
||||||
|
portgroups: []
|
||||||
|
standalone:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup0:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup1:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup2:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup3:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup4:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup5:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup6:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup7:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup8:
|
||||||
|
portgroups: []
|
||||||
|
customhostgroup9:
|
||||||
|
portgroups: []
|
||||||
|
|||||||
@@ -91,6 +91,10 @@ COMMIT
|
|||||||
-A INPUT -m conntrack --ctstate INVALID -j DROP
|
-A INPUT -m conntrack --ctstate INVALID -j DROP
|
||||||
-A INPUT -p icmp -j ACCEPT
|
-A INPUT -p icmp -j ACCEPT
|
||||||
-A INPUT -j LOGGING
|
-A INPUT -j LOGGING
|
||||||
|
{% if GLOBALS.role in ['so-hypervisor', 'so-managerhype'] -%}
|
||||||
|
-A FORWARD -m state --state RELATED,ESTABLISHED -j ACCEPT
|
||||||
|
-A FORWARD -i br0 -o br0 -j ACCEPT
|
||||||
|
{%- endif %}
|
||||||
-A FORWARD -j DOCKER-USER
|
-A FORWARD -j DOCKER-USER
|
||||||
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
|
-A FORWARD -j DOCKER-ISOLATION-STAGE-1
|
||||||
-A FORWARD -o sobridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
-A FORWARD -o sobridge -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
|
||||||
|
|||||||
@@ -25,7 +25,7 @@
|
|||||||
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
||||||
{% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
|
{% set kafka_node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
|
||||||
|
|
||||||
{% if role in ['manager', 'managersearch', 'standalone'] %}
|
{% if role.startswith('manager') or role == 'standalone' %}
|
||||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
|
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
|
||||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -38,8 +38,8 @@
|
|||||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] %}
|
{% if role.startswith('manager') or role in ['standalone', 'receiver'] %}
|
||||||
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
{% for r in ['manager', 'managersearch', 'managerhype', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
|
||||||
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
|
||||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
|
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -48,11 +48,11 @@
|
|||||||
|
|
||||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||||
{# Kafka external access only applies for Kafka nodes with the broker role. #}
|
{# Kafka external access only applies for Kafka nodes with the broker role. #}
|
||||||
{% if role in ['manager', 'managersearch', 'standalone', 'receiver'] and 'broker' in kafka_node_type %}
|
{% if role.startswith('manager') or role in ['standalone', 'receiver'] and 'broker' in kafka_node_type %}
|
||||||
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %}
|
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.external_kafka.portgroups.append('kafka_external_access') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ firewall:
|
|||||||
external_kafka: *hostgroupsettings
|
external_kafka: *hostgroupsettings
|
||||||
fleet: *hostgroupsettings
|
fleet: *hostgroupsettings
|
||||||
heavynode: *hostgroupsettings
|
heavynode: *hostgroupsettings
|
||||||
|
hypervisor: *hostgroupsettings
|
||||||
idh: *hostgroupsettings
|
idh: *hostgroupsettings
|
||||||
import: *hostgroupsettings
|
import: *hostgroupsettings
|
||||||
localhost: *ROhostgroupsettingsadv
|
localhost: *ROhostgroupsettingsadv
|
||||||
|
|||||||
125
salt/hypervisor/defaults.yaml
Normal file
125
salt/hypervisor/defaults.yaml
Normal file
@@ -0,0 +1,125 @@
|
|||||||
|
hypervisor:
|
||||||
|
model:
|
||||||
|
testModel:
|
||||||
|
hardware:
|
||||||
|
cpu: 128
|
||||||
|
memory: 128
|
||||||
|
disk:
|
||||||
|
1: pci_0000_c7_00_0
|
||||||
|
2: pci_0000_c8_00_0
|
||||||
|
copper:
|
||||||
|
1: pci_0000_c4_00_0
|
||||||
|
2: pci_0000_c4_00_1
|
||||||
|
3: pci_0000_c4_00_2
|
||||||
|
4: pci_0000_c4_00_3
|
||||||
|
sfp:
|
||||||
|
5: pci_0000_02_00_0
|
||||||
|
6: pci_0000_02_00_1
|
||||||
|
7: pci_0000_41_00_0
|
||||||
|
8: pci_0000_41_00_1
|
||||||
|
SOSSNNV:
|
||||||
|
hardware:
|
||||||
|
cpu: 128
|
||||||
|
memory: 256
|
||||||
|
disk:
|
||||||
|
1: pci_0000_42_00_0
|
||||||
|
2: pci_0000_43_00_0
|
||||||
|
3: pci_0000_44_00_0
|
||||||
|
4: pci_0000_45_00_0
|
||||||
|
copper:
|
||||||
|
sfp:
|
||||||
|
1: pci_0000_02_00_0
|
||||||
|
2: pci_0000_02_00_1
|
||||||
|
3: pci_0000_41_00_0
|
||||||
|
4: pci_0000_41_00_1
|
||||||
|
SOSSNNV-DE02:
|
||||||
|
hardware:
|
||||||
|
cpu: 128
|
||||||
|
memory: 384
|
||||||
|
disk:
|
||||||
|
1: pci_0000_41_00_0
|
||||||
|
2: pci_0000_42_00_0
|
||||||
|
3: pci_0000_81_00_0
|
||||||
|
4: pci_0000_82_00_0
|
||||||
|
5: pci_0000_83_00_0
|
||||||
|
6: pci_0000_84_00_0
|
||||||
|
copper:
|
||||||
|
1: pci_0000_85_00_0
|
||||||
|
2: pci_0000_85_00_1
|
||||||
|
3: pci_0000_85_00_2
|
||||||
|
4: pci_0000_85_00_3
|
||||||
|
sfp:
|
||||||
|
5: pci_0000_c4_00_0
|
||||||
|
6: pci_0000_c4_00_1
|
||||||
|
7: pci_0000_c5_00_0
|
||||||
|
8: pci_0000_c5_00_1
|
||||||
|
9: pci_0000_c5_00_2
|
||||||
|
10: pci_0000_c5_00_3
|
||||||
|
SOSSN7200:
|
||||||
|
hardware:
|
||||||
|
cpu: 128
|
||||||
|
memory: 256
|
||||||
|
copper:
|
||||||
|
1: pci_0000_03_00_0
|
||||||
|
2: pci_0000_03_00_1
|
||||||
|
3: pci_0000_03_00_2
|
||||||
|
4: pci_0000_03_00_3
|
||||||
|
sfp:
|
||||||
|
5: pci_0000_02_00_0
|
||||||
|
6: pci_0000_02_00_1
|
||||||
|
7: pci_0000_81_00_0
|
||||||
|
8: pci_0000_81_00_1
|
||||||
|
9: pci_0000_81_00_2
|
||||||
|
10: pci_0000_81_00_3
|
||||||
|
SOSSN7200-DE02:
|
||||||
|
hardware:
|
||||||
|
cpu: 128
|
||||||
|
memory: 384
|
||||||
|
copper:
|
||||||
|
1: pci_0000_82_00_0
|
||||||
|
2: pci_0000_82_00_1
|
||||||
|
3: pci_0000_82_00_2
|
||||||
|
4: pci_0000_82_00_3
|
||||||
|
sfp:
|
||||||
|
5: pci_0000_c4_00_0
|
||||||
|
6: pci_0000_c4_00_1
|
||||||
|
7: pci_0000_c5_00_0
|
||||||
|
8: pci_0000_c5_00_1
|
||||||
|
9: pci_0000_c6_00_0
|
||||||
|
10: pci_0000_c6_00_1
|
||||||
|
11: pci_0000_c6_00_2
|
||||||
|
12: pci_0000_c6_00_3
|
||||||
|
SOS4000:
|
||||||
|
hardware:
|
||||||
|
cpu: 128
|
||||||
|
memory: 256
|
||||||
|
copper:
|
||||||
|
1: pci_0000_03_00_0
|
||||||
|
2: pci_0000_03_00_1
|
||||||
|
3: pci_0000_03_00_2
|
||||||
|
4: pci_0000_03_00_3
|
||||||
|
sfp:
|
||||||
|
5: pci_0000_02_00_0
|
||||||
|
6: pci_0000_02_00_1
|
||||||
|
7: pci_0000_81_00_0
|
||||||
|
8: pci_0000_81_00_1
|
||||||
|
9: pci_0000_81_00_2
|
||||||
|
10: pci_0000_81_00_3
|
||||||
|
SOS5000-DE02:
|
||||||
|
hardware:
|
||||||
|
cpu: 128
|
||||||
|
memory: 384
|
||||||
|
copper:
|
||||||
|
1: pci_0000_82_00_0
|
||||||
|
2: pci_0000_82_00_1
|
||||||
|
3: pci_0000_82_00_2
|
||||||
|
4: pci_0000_82_00_3
|
||||||
|
sfp:
|
||||||
|
5: pci_0000_c4_00_0
|
||||||
|
6: pci_0000_c4_00_1
|
||||||
|
7: pci_0000_c5_00_0
|
||||||
|
8: pci_0000_c5_00_1
|
||||||
|
9: pci_0000_c6_00_0
|
||||||
|
10: pci_0000_c6_00_1
|
||||||
|
11: pci_0000_c6_00_2
|
||||||
|
12: pci_0000_c6_00_3
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user