mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-01-24 17:03:27 +01:00
Compare commits
685 Commits
2.4.170-20
...
reyesj2/pa
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
55b3fa389e | ||
|
|
b3ae716929 | ||
|
|
30d8cf5a6c | ||
|
|
07dbdb9f8f | ||
|
|
b4c8f7924a | ||
|
|
809422c517 | ||
|
|
bb7593a53a | ||
|
|
8e3ba8900f | ||
|
|
005ec87248 | ||
|
|
4c6ff0641b | ||
|
|
3e242913e9 | ||
|
|
ba68e3c9bd | ||
|
|
e1199a91b9 | ||
|
|
d381248e30 | ||
|
|
f4f0218cae | ||
|
|
7a38e52b01 | ||
|
|
959fd55e32 | ||
|
|
a8e218a9ff | ||
|
|
3f5cd46d7d | ||
|
|
627f0c2bcc | ||
|
|
f6bde3eb04 | ||
|
|
f6e95c17a0 | ||
|
|
1234cbd04b | ||
|
|
fd5b93542e | ||
|
|
a192455fae | ||
|
|
66f17e95aa | ||
|
|
6f4b96b61b | ||
|
|
9905d23976 | ||
|
|
17532fe49d | ||
|
|
074158b495 | ||
|
|
82d5115b3f | ||
|
|
5c63111002 | ||
|
|
6eda7932e8 | ||
|
|
399b7567dd | ||
|
|
2133ada3a1 | ||
|
|
4f6d4738c4 | ||
|
|
d430ed6727 | ||
|
|
596bc178df | ||
|
|
0cd3d7b5a8 | ||
|
|
349d77ffdf | ||
|
|
c3283b04e5 | ||
|
|
0da0788e6b | ||
|
|
6f7e249aa2 | ||
|
|
dfaeed54b6 | ||
|
|
4f59e46235 | ||
|
|
bf4cc7befb | ||
|
|
c63c6dc68b | ||
|
|
e4225d6e9b | ||
|
|
3fb153c43e | ||
|
|
6de20c63d4 | ||
|
|
00fbc1c259 | ||
|
|
3bc552ef38 | ||
|
|
ee70d94e15 | ||
|
|
1887d2c0e9 | ||
|
|
c99dd4e44f | ||
|
|
541b8b288d | ||
|
|
db168a0452 | ||
|
|
aa96cf44d4 | ||
|
|
0d59c35d2a | ||
|
|
8463bde90d | ||
|
|
150c31009e | ||
|
|
693494024d | ||
|
|
ee66d6c7d1 | ||
|
|
3effd30f7e | ||
|
|
4ab20c2454 | ||
|
|
c075b5a1a7 | ||
|
|
cb1e59fa49 | ||
|
|
588aa435ec | ||
|
|
752c764066 | ||
|
|
af604c2ea8 | ||
|
|
6c3f9f149d | ||
|
|
152f2e03f1 | ||
|
|
605797c86a | ||
|
|
1ee5b1611a | ||
|
|
5028729e4c | ||
|
|
ab00fa8809 | ||
|
|
2d705e7caa | ||
|
|
f2370043a8 | ||
|
|
3b349b9803 | ||
|
|
f2b7ffe0eb | ||
|
|
3a410eed1a | ||
|
|
a53619f10f | ||
|
|
893aaafa1b | ||
|
|
33c34cdeca | ||
|
|
9b411867df | ||
|
|
fd1596b3a0 | ||
|
|
b05de22f58 | ||
|
|
e9341ee8d3 | ||
|
|
f666ad600f | ||
|
|
9345718967 | ||
|
|
6c879cbd13 | ||
|
|
089b5aaf44 | ||
|
|
b61885add5 | ||
|
|
702ba2e0a4 | ||
|
|
5cb1e284af | ||
|
|
e3a4f0873e | ||
|
|
7977a020ac | ||
|
|
1d63269883 | ||
|
|
dd8027480b | ||
|
|
c45bd77e44 | ||
|
|
032e0abd61 | ||
|
|
8509d1e454 | ||
|
|
8ff0c6828b | ||
|
|
ddd6935e50 | ||
|
|
5588a56b24 | ||
|
|
12aed6e280 | ||
|
|
b2a469e08c | ||
|
|
285b0e4af9 | ||
|
|
f9edfd6391 | ||
|
|
c0845e1612 | ||
|
|
9878d9d37e | ||
|
|
a2196085d5 | ||
|
|
ba62a8c10c | ||
|
|
38f38e2789 | ||
|
|
1475f0fc2f | ||
|
|
a3396b77a3 | ||
|
|
8158fee8fc | ||
|
|
f6301bc3e5 | ||
|
|
6c5c176b7d | ||
|
|
c6d52b5eb1 | ||
|
|
7cac528389 | ||
|
|
d518f75468 | ||
|
|
c6fac8c36b | ||
|
|
17b5b81696 | ||
|
|
9960db200c | ||
|
|
b9ff1704b0 | ||
|
|
6fe817ca4a | ||
|
|
cb9a6fac25 | ||
|
|
a945768251 | ||
|
|
c6646e3821 | ||
|
|
99dc72cece | ||
|
|
04d6cca204 | ||
|
|
5ab6bda639 | ||
|
|
f433de7e12 | ||
|
|
8ef6c2f91d | ||
|
|
7575218697 | ||
|
|
dc945dad00 | ||
|
|
ddcd74ffd2 | ||
|
|
e105bd12e6 | ||
|
|
f5688175b6 | ||
|
|
72a4ba405f | ||
|
|
94694d394e | ||
|
|
03dd746601 | ||
|
|
eec3373ae7 | ||
|
|
db45ce07ed | ||
|
|
ba49765312 | ||
|
|
72c8c2371e | ||
|
|
80411ab6cf | ||
|
|
0ff8fa57e7 | ||
|
|
411f28a049 | ||
|
|
0f42233092 | ||
|
|
2dd49f6d9b | ||
|
|
271f545f4f | ||
|
|
c4a70b540e | ||
|
|
bef85772e3 | ||
|
|
a6b19c4a6c | ||
|
|
44f5e6659b | ||
|
|
3f9a9b7019 | ||
|
|
b7ad985c7a | ||
|
|
dba087ae25 | ||
|
|
bbc4b1b502 | ||
|
|
9304513ce8 | ||
|
|
0b127582cb | ||
|
|
6e9b8791c8 | ||
|
|
ef87ad77c3 | ||
|
|
8477420911 | ||
|
|
f5741e318f | ||
|
|
545060103a | ||
|
|
e010b5680a | ||
|
|
8620d3987e | ||
|
|
30487a54c1 | ||
|
|
f15a39c153 | ||
|
|
aed27fa111 | ||
|
|
822c411e83 | ||
|
|
41b3ac7554 | ||
|
|
23575fdf6c | ||
|
|
52f70dc49a | ||
|
|
79c9749ff7 | ||
|
|
8d2701e143 | ||
|
|
877444ac29 | ||
|
|
b0d9426f1b | ||
|
|
18accae47e | ||
|
|
55e3a2c6b6 | ||
|
|
ef092e2893 | ||
|
|
89eb95c077 | ||
|
|
e871ec358e | ||
|
|
271a2f74ad | ||
|
|
d6bd951c37 | ||
|
|
8abd4c9c78 | ||
|
|
45a8c0acd1 | ||
|
|
c372cd533d | ||
|
|
999f83ce57 | ||
|
|
6fbed2dd9f | ||
|
|
36a6a59d55 | ||
|
|
875de88cb4 | ||
|
|
63bb44886e | ||
|
|
bda83a47a2 | ||
|
|
e96cfd35f7 | ||
|
|
65c96b2edf | ||
|
|
87477ae4f6 | ||
|
|
89a9106d79 | ||
|
|
1284150382 | ||
|
|
edf3c9464f | ||
|
|
4bb0a7c9d9 | ||
|
|
ced3af818c | ||
|
|
cc8fb96047 | ||
|
|
3339b50daf | ||
|
|
415ea07a4f | ||
|
|
b80ec95fa8 | ||
|
|
99cb51482f | ||
|
|
90638f7a43 | ||
|
|
1fb00c8eb6 | ||
|
|
4490ea7635 | ||
|
|
bce7a20d8b | ||
|
|
9c06713f32 | ||
|
|
23da0d4ba0 | ||
|
|
d5f2cfb354 | ||
|
|
fb5ad4193d | ||
|
|
1f5f283c06 | ||
|
|
cf048030c4 | ||
|
|
2d716b44a8 | ||
|
|
d70d652310 | ||
|
|
c5db7c8752 | ||
|
|
6f42ff3442 | ||
|
|
433dab7376 | ||
|
|
97c1a46013 | ||
|
|
fbe97221bb | ||
|
|
841ce6b6ec | ||
|
|
dd0b4c3820 | ||
|
|
b52dd53e29 | ||
|
|
a155f45036 | ||
|
|
b407c68d88 | ||
|
|
5b6a7035af | ||
|
|
12d490ad4a | ||
|
|
76cbd18d2c | ||
|
|
148ef7ef21 | ||
|
|
1b55642c86 | ||
|
|
af7f7d0728 | ||
|
|
a7337c95e1 | ||
|
|
3f7c3326ea | ||
|
|
bf41de8c14 | ||
|
|
de4424fab0 | ||
|
|
136a829509 | ||
|
|
bcec999be4 | ||
|
|
7c73b4713f | ||
|
|
45b4b1d963 | ||
|
|
fcfd74ec1e | ||
|
|
68b0cd7549 | ||
|
|
715d801ce8 | ||
|
|
4a810696e7 | ||
|
|
6b525a2c21 | ||
|
|
a5d8385f07 | ||
|
|
211bf7e77b | ||
|
|
1542b74133 | ||
|
|
431e5abf89 | ||
|
|
4314c79f85 | ||
|
|
da9717bc79 | ||
|
|
f047677d8a | ||
|
|
045cf7866c | ||
|
|
431e0b0780 | ||
|
|
e782266caa | ||
|
|
a4666b2c08 | ||
|
|
dcc3206e51 | ||
|
|
8358b6ea6f | ||
|
|
d1a66a91c6 | ||
|
|
7fdcb92614 | ||
|
|
cec1890b6b | ||
|
|
b2606b6094 | ||
|
|
b1b66045ea | ||
|
|
33b22bf2e4 | ||
|
|
3a38886345 | ||
|
|
7be70faab6 | ||
|
|
2729fdbea6 | ||
|
|
bfd08d1d2e | ||
|
|
37b3fd9b7b | ||
|
|
573dded921 | ||
|
|
fed75c7b39 | ||
|
|
3427df2a54 | ||
|
|
be11c718f6 | ||
|
|
235dfd78f1 | ||
|
|
7c8b9b4374 | ||
|
|
81d7c313af | ||
|
|
9a6ff75793 | ||
|
|
1f24796eba | ||
|
|
7762faf075 | ||
|
|
80fbb31372 | ||
|
|
7c45db2295 | ||
|
|
0545e1d33b | ||
|
|
55bbbdb58d | ||
|
|
3a8a6bf5ff | ||
|
|
13789bc56f | ||
|
|
11518f6eea | ||
|
|
08147e27b0 | ||
|
|
c9153617be | ||
|
|
245ceb2d49 | ||
|
|
4c65975907 | ||
|
|
dfef7036ce | ||
|
|
44594ba726 | ||
|
|
1876c4d9df | ||
|
|
a2ff66b5d0 | ||
|
|
e3972dc5af | ||
|
|
18c0f197b2 | ||
|
|
5b371c220c | ||
|
|
78c193f0a2 | ||
|
|
274295bc97 | ||
|
|
6c7ef622c1 | ||
|
|
da1cac0d53 | ||
|
|
a84df14137 | ||
|
|
4a49f9d004 | ||
|
|
1eb4b5379a | ||
|
|
35c7fc06d7 | ||
|
|
b69d453a68 | ||
|
|
2f6fb717c1 | ||
|
|
b7e1989d45 | ||
|
|
202b03b32b | ||
|
|
1aa871ec94 | ||
|
|
4ffbb0bbd9 | ||
|
|
f859fe6517 | ||
|
|
021b425b8b | ||
|
|
d95122ca01 | ||
|
|
81d3c7351b | ||
|
|
ccb8ffd6eb | ||
|
|
5a8ea57a1b | ||
|
|
60228ec6e6 | ||
|
|
574703e551 | ||
|
|
fa154f1a8f | ||
|
|
635545630b | ||
|
|
df8afda999 | ||
|
|
f80b090c93 | ||
|
|
806173f7e3 | ||
|
|
2f6c1b82a6 | ||
|
|
b8c2808abe | ||
|
|
9027e4e065 | ||
|
|
8ca5276a0e | ||
|
|
ee45a5524d | ||
|
|
70d4223a75 | ||
|
|
7ab2840381 | ||
|
|
78c951cb70 | ||
|
|
a0a3a80151 | ||
|
|
3ecffd5588 | ||
|
|
8ea66bb0e9 | ||
|
|
9359fbbad6 | ||
|
|
1949be90c2 | ||
|
|
30970acfaf | ||
|
|
6d12a8bfa1 | ||
|
|
2fb41c8d65 | ||
|
|
835b2609b6 | ||
|
|
10ae53f108 | ||
|
|
68bfceb727 | ||
|
|
f348c7168f | ||
|
|
627d9bf45d | ||
|
|
2aee8ab511 | ||
|
|
33ada95bbc | ||
|
|
de9d3c9726 | ||
|
|
39572f36f4 | ||
|
|
0994cd515a | ||
|
|
bdcd1e099d | ||
|
|
c64760b5f4 | ||
|
|
d2aa60b961 | ||
|
|
83d615d236 | ||
|
|
e910de0a06 | ||
|
|
26b80aba38 | ||
|
|
ee617eeff4 | ||
|
|
463766782c | ||
|
|
d9f70898dd | ||
|
|
7e15c89510 | ||
|
|
ed5bd19f0e | ||
|
|
feba97738f | ||
|
|
348809bdbb | ||
|
|
ca0edb1cab | ||
|
|
0172f64f15 | ||
|
|
48f8944e3b | ||
|
|
3e22043ea6 | ||
|
|
e572b854b9 | ||
|
|
c8aad2b03b | ||
|
|
8773ebc3dc | ||
|
|
2baf2478da | ||
|
|
378d37d74e | ||
|
|
f8c8e5d8e5 | ||
|
|
dca38c286a | ||
|
|
860710f5f9 | ||
|
|
d56af4acab | ||
|
|
793e98f75c | ||
|
|
f9c5aa3fef | ||
|
|
254e782da6 | ||
|
|
fe3caf66a1 | ||
|
|
09d699432a | ||
|
|
79b44586ce | ||
|
|
feddd90e41 | ||
|
|
ca935e4272 | ||
|
|
8f75bfb0a4 | ||
|
|
e551c6e037 | ||
|
|
1c5a72ee85 | ||
|
|
8a8ea04088 | ||
|
|
92be8df95d | ||
|
|
f730e23e30 | ||
|
|
a3e7649a3c | ||
|
|
af42c31740 | ||
|
|
a22c9f6bcf | ||
|
|
bad9a16ebb | ||
|
|
7827e05c24 | ||
|
|
e45b0bf871 | ||
|
|
659c039ba8 | ||
|
|
c7edaac42a | ||
|
|
a1a8f75409 | ||
|
|
23e25fa2d7 | ||
|
|
f077484121 | ||
|
|
c16bf50493 | ||
|
|
564374a8fb | ||
|
|
4ab4264f77 | ||
|
|
60cccb21b4 | ||
|
|
39432198cc | ||
|
|
7af95317db | ||
|
|
8675193d1f | ||
|
|
ac0d6c57e1 | ||
|
|
3db6542398 | ||
|
|
9fd1b9aec1 | ||
|
|
e5563eb9b8 | ||
|
|
e8de9e3c26 | ||
|
|
c8a3603577 | ||
|
|
05321cf1ed | ||
|
|
7deef44ff6 | ||
|
|
9752d61699 | ||
|
|
6b8e2e2643 | ||
|
|
b1acbf3114 | ||
|
|
e3ac1dd1b4 | ||
|
|
86eca53d4b | ||
|
|
bfd3d822b1 | ||
|
|
030e4961d7 | ||
|
|
14bd92067b | ||
|
|
066e227325 | ||
|
|
f1cfb9cd91 | ||
|
|
5a2e704909 | ||
|
|
f04e54d1d5 | ||
|
|
e9af46a8cb | ||
|
|
b4b051908b | ||
|
|
0148e5638c | ||
|
|
c8814d0632 | ||
|
|
6c892fed78 | ||
|
|
8043e09ec1 | ||
|
|
e775299480 | ||
|
|
c4ca9c62aa | ||
|
|
c37aeff364 | ||
|
|
cdac49052f | ||
|
|
8e5fa9576c | ||
|
|
25c746bb14 | ||
|
|
cd04d1e5a7 | ||
|
|
1fb558cc77 | ||
|
|
7f1b76912c | ||
|
|
3a2ceb0b6f | ||
|
|
1345756fce | ||
|
|
d81d9a0722 | ||
|
|
55074fda69 | ||
|
|
23e12811a1 | ||
|
|
5d1edf6d86 | ||
|
|
a91e8b26f6 | ||
|
|
c836dd2acd | ||
|
|
e826ea5d04 | ||
|
|
3a87af805f | ||
|
|
328ac329ec | ||
|
|
a3401aad11 | ||
|
|
5a67b89a80 | ||
|
|
431f71cc82 | ||
|
|
23a9780ebb | ||
|
|
4587301cca | ||
|
|
9cb8ebbaa7 | ||
|
|
14ddbd32ad | ||
|
|
4599b95ae7 | ||
|
|
c92dc580a2 | ||
|
|
4666aa9818 | ||
|
|
f066baf6ba | ||
|
|
ba710c9944 | ||
|
|
198695af03 | ||
|
|
fec78f5fb5 | ||
|
|
d03dd7ac2d | ||
|
|
d2dd52b42a | ||
|
|
c9db52433f | ||
|
|
138849d258 | ||
|
|
a9ec12e402 | ||
|
|
87281efc24 | ||
|
|
29ac4f23c6 | ||
|
|
878a3f8962 | ||
|
|
21e27bce87 | ||
|
|
336ca0dbbd | ||
|
|
d9eba3cd0e | ||
|
|
81b7e2b420 | ||
|
|
cd5483623b | ||
|
|
faa112eddf | ||
|
|
f663f22628 | ||
|
|
8b07ff453d | ||
|
|
24a0fa3f6d | ||
|
|
a5011b398d | ||
|
|
5b70398c0a | ||
|
|
f3aaee1e41 | ||
|
|
d0e875928d | ||
|
|
3e16bc8335 | ||
|
|
c1d85493df | ||
|
|
e01d0f81ea | ||
|
|
376d0f3295 | ||
|
|
4418623f73 | ||
|
|
d1f4e26e29 | ||
|
|
5166db1caa | ||
|
|
ff5ad586af | ||
|
|
9e24d21282 | ||
|
|
5806999f63 | ||
|
|
4dae1afe0b | ||
|
|
456cad1ada | ||
|
|
ded520c2c1 | ||
|
|
a77157391c | ||
|
|
063a2b3348 | ||
|
|
bcd2e95fbe | ||
|
|
94e8cd84e6 | ||
|
|
948d72c282 | ||
|
|
bdeb92ab05 | ||
|
|
fdb5ad810a | ||
|
|
f588a80ec7 | ||
|
|
562b7e54cb | ||
|
|
3c847bca8b | ||
|
|
ce2cc26224 | ||
|
|
f3c574679c | ||
|
|
5da3fed1ce | ||
|
|
e6bcf5db6b | ||
|
|
4d24c57903 | ||
|
|
0606c0a454 | ||
|
|
bb984e05e3 | ||
|
|
b35b0aaf2c | ||
|
|
62f04fa5dd | ||
|
|
d89df5f0dd | ||
|
|
f0c1922600 | ||
|
|
ab2cdd18ed | ||
|
|
889bb7ddf4 | ||
|
|
a959f90d0b | ||
|
|
a54cd004d6 | ||
|
|
5100032fbd | ||
|
|
0f235baa7e | ||
|
|
e5660b8c8e | ||
|
|
588a1b86d1 | ||
|
|
46f0afa24b | ||
|
|
a7651b2734 | ||
|
|
890f76e45c | ||
|
|
03892bad5e | ||
|
|
e6eecc93c8 | ||
|
|
8dc0f8d20e | ||
|
|
fbdc0c4705 | ||
|
|
d1a2b57aa2 | ||
|
|
f5ec1d4b7c | ||
|
|
0aa556e375 | ||
|
|
d9e86c15bc | ||
|
|
4107fa006f | ||
|
|
29980ea958 | ||
|
|
8f36d2ec00 | ||
|
|
10511b8431 | ||
|
|
2535ae953d | ||
|
|
2f68cd7483 | ||
|
|
6655276410 | ||
|
|
9f7bcb0f7d | ||
|
|
aa43177d8c | ||
|
|
12959d114c | ||
|
|
855b489c4b | ||
|
|
673f9cb544 | ||
|
|
0a3ff47008 | ||
|
|
834e34128d | ||
|
|
73776f8d11 | ||
|
|
120e61e45c | ||
|
|
fc2d450de0 | ||
|
|
cea4eaf081 | ||
|
|
b1753f86f9 | ||
|
|
6323fbf46b | ||
|
|
ba601c39b3 | ||
|
|
ec27517bdd | ||
|
|
624ec3c93e | ||
|
|
f318a84c18 | ||
|
|
8cca58dba9 | ||
|
|
6c196ea61a | ||
|
|
207572f2f9 | ||
|
|
4afc986f48 | ||
|
|
ba5d140d4b | ||
|
|
348f9dcaec | ||
|
|
915b9e7bd7 | ||
|
|
dfec29d18e | ||
|
|
77fef02116 | ||
|
|
38ef4a6046 | ||
|
|
f3328c41fb | ||
|
|
a007fa6505 | ||
|
|
1a32a0897c | ||
|
|
e26310d172 | ||
|
|
c7cdb0b466 | ||
|
|
df0b484b45 | ||
|
|
2181cddf49 | ||
|
|
a2b6968cef | ||
|
|
285fbc2783 | ||
|
|
94c5a1fd98 | ||
|
|
19362fe5e5 | ||
|
|
a7a81e9825 | ||
|
|
31484d1158 | ||
|
|
f51cd008f2 | ||
|
|
a5675a79fe | ||
|
|
1ea7b3c09f | ||
|
|
d9127a288f | ||
|
|
23ae259c82 | ||
|
|
ebb78bc9bd | ||
|
|
e5920b6465 | ||
|
|
153a99a002 | ||
|
|
69a5e1e2f5 | ||
|
|
0858160be2 | ||
|
|
ccd79c814d | ||
|
|
45f25ca62d | ||
|
|
a8a01b8191 | ||
|
|
ac2c044a94 | ||
|
|
e10d00d114 | ||
|
|
cbdd369a18 | ||
|
|
b2e7f58b3d | ||
|
|
a6600b8762 | ||
|
|
5479d49379 | ||
|
|
304985b61e | ||
|
|
d6c725299b | ||
|
|
d99857002d | ||
|
|
2a6c74917e | ||
|
|
9f0bd4bad3 | ||
|
|
924b06976c | ||
|
|
1357f19e48 | ||
|
|
c91e9ea4e0 | ||
|
|
c2c96dad6e | ||
|
|
1a08833e77 | ||
|
|
d16dfcf4e8 | ||
|
|
b79c7b0540 | ||
|
|
9f45792217 | ||
|
|
d3108c3549 | ||
|
|
7d883cb5e0 | ||
|
|
ebd81c1df9 | ||
|
|
418dbee9fa | ||
|
|
cccc3bf625 | ||
|
|
a3e0072631 | ||
|
|
220e485312 | ||
|
|
67f8fca043 | ||
|
|
0e0ab8384c | ||
|
|
58228f70ca | ||
|
|
7968de06b4 | ||
|
|
87fdd90f56 | ||
|
|
65e7e56fbe | ||
|
|
424fdff934 | ||
|
|
f72996d9d1 | ||
|
|
d77556c672 | ||
|
|
c412e9bad2 | ||
|
|
87a28e8ce7 | ||
|
|
9ca0c7d53a | ||
|
|
2e94e452ed | ||
|
|
6a0d40ee0d | ||
|
|
0cebcf4432 | ||
|
|
ed0e24fcaf | ||
|
|
24be2f869b | ||
|
|
f8058a4a3a | ||
|
|
d0ba6df2fc | ||
|
|
95bee91b12 | ||
|
|
751b5bd556 | ||
|
|
77273449c9 | ||
|
|
46e1f1bc5c | ||
|
|
884bec7465 | ||
|
|
8d3220f94b | ||
|
|
9cb42911dc | ||
|
|
a3cc6f025e | ||
|
|
6fae4a9974 | ||
|
|
f7a1a3a172 | ||
|
|
292e1ad782 | ||
|
|
af1fe86586 | ||
|
|
97100cdfdd | ||
|
|
5f60ef1541 | ||
|
|
c7e7a0a871 | ||
|
|
f09eff530e | ||
|
|
50b34a116a | ||
|
|
42874fb0d0 | ||
|
|
482847187c | ||
|
|
a19b99268d | ||
|
|
3c5a03d7b6 | ||
|
|
c1a5c2b2d1 | ||
|
|
baf0f7ba95 | ||
|
|
ee27965314 | ||
|
|
d02093295b | ||
|
|
6381444fdc | ||
|
|
b307667ae2 | ||
|
|
4a4146f515 | ||
|
|
378ecad94c | ||
|
|
58ffe576d7 | ||
|
|
b0a515f2c3 | ||
|
|
59f8544324 |
5
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
5
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -30,6 +30,11 @@ body:
|
||||
- 2.4.150
|
||||
- 2.4.160
|
||||
- 2.4.170
|
||||
- 2.4.180
|
||||
- 2.4.190
|
||||
- 2.4.200
|
||||
- 2.4.201
|
||||
- 2.4.210
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
2
.github/workflows/pythontest.yml
vendored
2
.github/workflows/pythontest.yml
vendored
@@ -4,7 +4,7 @@ on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "salt/sensoroni/files/analyzers/**"
|
||||
- "salt/manager/tools/sbin"
|
||||
- "salt/manager/tools/sbin/**"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
@@ -1,17 +1,17 @@
|
||||
### 2.4.170-20250812 ISO image released on 2025/08/12
|
||||
### 2.4.201-20260114 ISO image released on 2026/1/15
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.170-20250812 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.170-20250812.iso
|
||||
2.4.201-20260114 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso
|
||||
|
||||
MD5: 50ECAAD05736298452DECEAE074FA773
|
||||
SHA1: 1B1EB520DE61ECC4BF34E512DAFE307317D7666A
|
||||
SHA256: 87D176A48A58BAD1C2D57196F999BED23DE9B526226E3754F0C166C866CCDC1A
|
||||
MD5: 20E926E433203798512EF46E590C89B9
|
||||
SHA1: 779E4084A3E1A209B494493B8F5658508B6014FA
|
||||
SHA256: 3D10E7C885AEC5C5D4F4E50F9644FF9728E8C0A2E36EBB8C96B32569685A7C40
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.170-20250812.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
@@ -25,22 +25,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.170-20250812.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.201-20260114.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.170-20250812.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.201-20260114.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.170-20250812.iso.sig securityonion-2.4.170-20250812.iso
|
||||
gpg --verify securityonion-2.4.201-20260114.iso.sig securityonion-2.4.201-20260114.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Fri 08 Aug 2025 06:24:56 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Wed 14 Jan 2026 05:23:39 PM EST using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
2
pillar/ca/init.sls
Normal file
2
pillar/ca/init.sls
Normal file
@@ -0,0 +1,2 @@
|
||||
ca:
|
||||
server:
|
||||
@@ -1,5 +1,6 @@
|
||||
base:
|
||||
'*':
|
||||
- ca
|
||||
- global.soc_global
|
||||
- global.adv_global
|
||||
- docker.soc_docker
|
||||
@@ -43,8 +44,6 @@ base:
|
||||
- secrets
|
||||
- manager.soc_manager
|
||||
- manager.adv_manager
|
||||
- idstools.soc_idstools
|
||||
- idstools.adv_idstools
|
||||
- logstash.nodes
|
||||
- logstash.soc_logstash
|
||||
- logstash.adv_logstash
|
||||
@@ -117,8 +116,6 @@ base:
|
||||
- elastalert.adv_elastalert
|
||||
- manager.soc_manager
|
||||
- manager.adv_manager
|
||||
- idstools.soc_idstools
|
||||
- idstools.adv_idstools
|
||||
- soc.soc_soc
|
||||
- soc.adv_soc
|
||||
- kibana.soc_kibana
|
||||
@@ -158,8 +155,6 @@ base:
|
||||
{% endif %}
|
||||
- secrets
|
||||
- healthcheck.standalone
|
||||
- idstools.soc_idstools
|
||||
- idstools.adv_idstools
|
||||
- kratos.soc_kratos
|
||||
- kratos.adv_kratos
|
||||
- hydra.soc_hydra
|
||||
@@ -262,6 +257,9 @@ base:
|
||||
- minions.adv_{{ grains.id }}
|
||||
- kafka.nodes
|
||||
- kafka.soc_kafka
|
||||
- stig.soc_stig
|
||||
- elasticfleet.soc_elasticfleet
|
||||
- elasticfleet.adv_elasticfleet
|
||||
|
||||
'*_import':
|
||||
- node_data.ips
|
||||
@@ -319,10 +317,12 @@ base:
|
||||
- elasticfleet.adv_elasticfleet
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
|
||||
'*_hypervisor':
|
||||
- minions.{{ grains.id }}
|
||||
- minions.adv_{{ grains.id }}
|
||||
- stig.soc_stig
|
||||
|
||||
'*_desktop':
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
91
salt/_modules/hypervisor.py
Normal file
91
salt/_modules/hypervisor.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/opt/saltstack/salt/bin/python3
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
#
|
||||
# Note: Per the Elastic License 2.0, the second limitation states:
|
||||
#
|
||||
# "You may not move, change, disable, or circumvent the license key functionality
|
||||
# in the software, and you may not remove or obscure any functionality in the
|
||||
# software that is protected by the license key."
|
||||
|
||||
"""
|
||||
Salt execution module for hypervisor operations.
|
||||
|
||||
This module provides functions for managing hypervisor configurations,
|
||||
including VM file management.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__virtualname__ = 'hypervisor'
|
||||
|
||||
|
||||
def __virtual__():
|
||||
"""
|
||||
Only load this module if we're on a system that can manage hypervisors.
|
||||
"""
|
||||
return __virtualname__
|
||||
|
||||
|
||||
def remove_vm_from_vms_file(vms_file_path, vm_hostname, vm_role):
|
||||
"""
|
||||
Remove a VM entry from the hypervisorVMs file.
|
||||
|
||||
Args:
|
||||
vms_file_path (str): Path to the hypervisorVMs file
|
||||
vm_hostname (str): Hostname of the VM to remove (without role suffix)
|
||||
vm_role (str): Role of the VM
|
||||
|
||||
Returns:
|
||||
dict: Result dictionary with success status and message
|
||||
|
||||
CLI Example:
|
||||
salt '*' hypervisor.remove_vm_from_vms_file /opt/so/saltstack/local/salt/hypervisor/hosts/hypervisor1VMs node1 nsm
|
||||
"""
|
||||
try:
|
||||
# Check if file exists
|
||||
if not os.path.exists(vms_file_path):
|
||||
msg = f"VMs file not found: {vms_file_path}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
|
||||
# Read current VMs
|
||||
with open(vms_file_path, 'r') as f:
|
||||
content = f.read().strip()
|
||||
vms = json.loads(content) if content else []
|
||||
|
||||
# Find and remove the VM entry
|
||||
original_count = len(vms)
|
||||
vms = [vm for vm in vms if not (vm.get('hostname') == vm_hostname and vm.get('role') == vm_role)]
|
||||
|
||||
if len(vms) < original_count:
|
||||
# VM was found and removed, write back to file
|
||||
with open(vms_file_path, 'w') as f:
|
||||
json.dump(vms, f, indent=2)
|
||||
|
||||
# Set socore:socore ownership (939:939)
|
||||
os.chown(vms_file_path, 939, 939)
|
||||
|
||||
msg = f"Removed VM {vm_hostname}_{vm_role} from {vms_file_path}"
|
||||
log.info(msg)
|
||||
return {'result': True, 'comment': msg}
|
||||
else:
|
||||
msg = f"VM {vm_hostname}_{vm_role} not found in {vms_file_path}"
|
||||
log.warning(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
|
||||
except json.JSONDecodeError as e:
|
||||
msg = f"Failed to parse JSON in {vms_file_path}: {str(e)}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
except Exception as e:
|
||||
msg = f"Failed to remove VM {vm_hostname}_{vm_role} from {vms_file_path}: {str(e)}"
|
||||
log.error(msg)
|
||||
return {'result': False, 'comment': msg}
|
||||
@@ -7,12 +7,14 @@
|
||||
|
||||
"""
|
||||
Salt module for managing QCOW2 image configurations and VM hardware settings. This module provides functions
|
||||
for modifying network configurations within QCOW2 images and adjusting virtual machine hardware settings.
|
||||
It serves as a Salt interface to the so-qcow2-modify-network and so-kvm-modify-hardware scripts.
|
||||
for modifying network configurations within QCOW2 images, adjusting virtual machine hardware settings, and
|
||||
creating virtual storage volumes. It serves as a Salt interface to the so-qcow2-modify-network,
|
||||
so-kvm-modify-hardware, and so-kvm-create-volume scripts.
|
||||
|
||||
The module offers two main capabilities:
|
||||
The module offers three main capabilities:
|
||||
1. Network Configuration: Modify network settings (DHCP/static IP) within QCOW2 images
|
||||
2. Hardware Configuration: Adjust VM hardware settings (CPU, memory, PCI passthrough)
|
||||
3. Volume Management: Create and attach virtual storage volumes for NSM data
|
||||
|
||||
This module is intended to work with Security Onion's virtualization infrastructure and is typically
|
||||
used in conjunction with salt-cloud for VM provisioning and management.
|
||||
@@ -244,3 +246,90 @@ def modify_hardware_config(vm_name, cpu=None, memory=None, pci=None, start=False
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
|
||||
def create_volume_config(vm_name, size_gb, start=False):
|
||||
'''
|
||||
Usage:
|
||||
salt '*' qcow2.create_volume_config vm_name=<name> size_gb=<size> [start=<bool>]
|
||||
|
||||
Options:
|
||||
vm_name
|
||||
Name of the virtual machine to attach the volume to
|
||||
size_gb
|
||||
Volume size in GB (positive integer)
|
||||
This determines the capacity of the virtual storage volume
|
||||
start
|
||||
Boolean flag to start the VM after volume creation
|
||||
Optional - defaults to False
|
||||
|
||||
Examples:
|
||||
1. **Create 500GB Volume:**
|
||||
```bash
|
||||
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=500
|
||||
```
|
||||
This creates a 500GB virtual volume for NSM storage
|
||||
|
||||
2. **Create 1TB Volume and Start VM:**
|
||||
```bash
|
||||
salt '*' qcow2.create_volume_config vm_name='sensor1_sensor' size_gb=1000 start=True
|
||||
```
|
||||
This creates a 1TB volume and starts the VM after attachment
|
||||
|
||||
Notes:
|
||||
- VM must be stopped before volume creation
|
||||
- Volume is created as a qcow2 image and attached to the VM
|
||||
- This is an alternative to disk passthrough via modify_hardware_config
|
||||
- Volume is automatically attached to the VM's libvirt configuration
|
||||
- Requires so-kvm-create-volume script to be installed
|
||||
- Volume files are stored in the hypervisor's VM storage directory
|
||||
|
||||
Description:
|
||||
This function creates and attaches a virtual storage volume to a KVM virtual machine
|
||||
using the so-kvm-create-volume script. It creates a qcow2 disk image of the specified
|
||||
size and attaches it to the VM for NSM (Network Security Monitoring) storage purposes.
|
||||
This provides an alternative to physical disk passthrough, allowing flexible storage
|
||||
allocation without requiring dedicated hardware. The VM can optionally be started
|
||||
after the volume is successfully created and attached.
|
||||
|
||||
Exit Codes:
|
||||
0: Success
|
||||
1: Invalid parameters
|
||||
2: VM state error (running when should be stopped)
|
||||
3: Volume creation error
|
||||
4: System command error
|
||||
255: Unexpected error
|
||||
|
||||
Logging:
|
||||
- All operations are logged to the salt minion log
|
||||
- Log entries are prefixed with 'qcow2 module:'
|
||||
- Volume creation and attachment operations are logged
|
||||
- Errors include detailed messages and stack traces
|
||||
- Final status of volume creation is logged
|
||||
'''
|
||||
|
||||
# Validate size_gb parameter
|
||||
if not isinstance(size_gb, int) or size_gb <= 0:
|
||||
raise ValueError('size_gb must be a positive integer.')
|
||||
|
||||
cmd = ['/usr/sbin/so-kvm-create-volume', '-v', vm_name, '-s', str(size_gb)]
|
||||
|
||||
if start:
|
||||
cmd.append('-S')
|
||||
|
||||
log.info('qcow2 module: Executing command: {}'.format(' '.join(shlex.quote(arg) for arg in cmd)))
|
||||
|
||||
try:
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, check=False)
|
||||
ret = {
|
||||
'retcode': result.returncode,
|
||||
'stdout': result.stdout,
|
||||
'stderr': result.stderr
|
||||
}
|
||||
if result.returncode != 0:
|
||||
log.error('qcow2 module: Script execution failed with return code {}: {}'.format(result.returncode, result.stderr))
|
||||
else:
|
||||
log.info('qcow2 module: Script executed successfully.')
|
||||
return ret
|
||||
except Exception as e:
|
||||
log.error('qcow2 module: An error occurred while executing the script: {}'.format(e))
|
||||
raise
|
||||
|
||||
@@ -172,7 +172,15 @@ MANAGER_HOSTNAME = socket.gethostname()
|
||||
|
||||
def _download_image():
|
||||
"""
|
||||
Download and validate the Oracle Linux KVM image.
|
||||
Download and validate the Oracle Linux KVM image with retry logic and progress monitoring.
|
||||
|
||||
Features:
|
||||
- Detects stalled downloads (no progress for 30 seconds)
|
||||
- Retries up to 3 times on failure
|
||||
- Connection timeout of 30 seconds
|
||||
- Read timeout of 60 seconds
|
||||
- Cleans up partial downloads on failure
|
||||
|
||||
Returns:
|
||||
bool: True if successful or file exists with valid checksum, False on error
|
||||
"""
|
||||
@@ -185,45 +193,107 @@ def _download_image():
|
||||
os.unlink(IMAGE_PATH)
|
||||
|
||||
log.info("Starting image download process")
|
||||
|
||||
# Retry configuration
|
||||
max_attempts = 3
|
||||
retry_delay = 5 # seconds to wait between retry attempts
|
||||
stall_timeout = 30 # seconds without progress before considering download stalled
|
||||
connection_timeout = 30 # seconds to establish connection
|
||||
read_timeout = 60 # seconds to wait for data chunks
|
||||
|
||||
for attempt in range(1, max_attempts + 1):
|
||||
log.info("Download attempt %d of %d", attempt, max_attempts)
|
||||
|
||||
try:
|
||||
# Download file with timeouts
|
||||
log.info("Downloading Oracle Linux KVM image from %s to %s", IMAGE_URL, IMAGE_PATH)
|
||||
response = requests.get(
|
||||
IMAGE_URL,
|
||||
stream=True,
|
||||
timeout=(connection_timeout, read_timeout)
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
try:
|
||||
# Download file
|
||||
log.info("Downloading Oracle Linux KVM image from %s to %s", IMAGE_URL, IMAGE_PATH)
|
||||
response = requests.get(IMAGE_URL, stream=True)
|
||||
response.raise_for_status()
|
||||
# Get total file size for progress tracking
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded_size = 0
|
||||
last_log_time = 0
|
||||
last_progress_time = time.time()
|
||||
last_downloaded_size = 0
|
||||
|
||||
# Get total file size for progress tracking
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded_size = 0
|
||||
last_log_time = 0
|
||||
# Save file with progress logging and stall detection
|
||||
with salt.utils.files.fopen(IMAGE_PATH, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if chunk: # filter out keep-alive new chunks
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
current_time = time.time()
|
||||
|
||||
# Check for stalled download
|
||||
if downloaded_size > last_downloaded_size:
|
||||
# Progress made, reset stall timer
|
||||
last_progress_time = current_time
|
||||
last_downloaded_size = downloaded_size
|
||||
elif current_time - last_progress_time > stall_timeout:
|
||||
# No progress for stall_timeout seconds
|
||||
raise Exception(
|
||||
f"Download stalled: no progress for {stall_timeout} seconds "
|
||||
f"at {downloaded_size}/{total_size} bytes"
|
||||
)
|
||||
|
||||
# Log progress every second
|
||||
if current_time - last_log_time >= 1:
|
||||
progress = (downloaded_size / total_size) * 100 if total_size > 0 else 0
|
||||
log.info("Progress - %.1f%% (%d/%d bytes)",
|
||||
progress, downloaded_size, total_size)
|
||||
last_log_time = current_time
|
||||
|
||||
# Save file with progress logging
|
||||
with salt.utils.files.fopen(IMAGE_PATH, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
# Validate downloaded file
|
||||
log.info("Download complete, validating checksum...")
|
||||
if not _validate_image_checksum(IMAGE_PATH, IMAGE_SHA256):
|
||||
log.error("Checksum validation failed on attempt %d", attempt)
|
||||
os.unlink(IMAGE_PATH)
|
||||
if attempt < max_attempts:
|
||||
log.info("Will retry download...")
|
||||
continue
|
||||
else:
|
||||
log.error("All download attempts failed due to checksum mismatch")
|
||||
return False
|
||||
|
||||
log.info("Successfully downloaded and validated Oracle Linux KVM image")
|
||||
return True
|
||||
|
||||
except requests.exceptions.Timeout as e:
|
||||
log.error("Download attempt %d failed: Timeout - %s", attempt, str(e))
|
||||
if os.path.exists(IMAGE_PATH):
|
||||
os.unlink(IMAGE_PATH)
|
||||
if attempt < max_attempts:
|
||||
log.info("Will retry download in %d seconds...", retry_delay)
|
||||
time.sleep(retry_delay)
|
||||
else:
|
||||
log.error("All download attempts failed due to timeout")
|
||||
|
||||
# Log progress every second
|
||||
current_time = time.time()
|
||||
if current_time - last_log_time >= 1:
|
||||
progress = (downloaded_size / total_size) * 100 if total_size > 0 else 0
|
||||
log.info("Progress - %.1f%% (%d/%d bytes)",
|
||||
progress, downloaded_size, total_size)
|
||||
last_log_time = current_time
|
||||
|
||||
# Validate downloaded file
|
||||
if not _validate_image_checksum(IMAGE_PATH, IMAGE_SHA256):
|
||||
os.unlink(IMAGE_PATH)
|
||||
return False
|
||||
|
||||
log.info("Successfully downloaded and validated Oracle Linux KVM image")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
log.error("Error downloading hypervisor image: %s", str(e))
|
||||
if os.path.exists(IMAGE_PATH):
|
||||
os.unlink(IMAGE_PATH)
|
||||
return False
|
||||
except requests.exceptions.RequestException as e:
|
||||
log.error("Download attempt %d failed: Network error - %s", attempt, str(e))
|
||||
if os.path.exists(IMAGE_PATH):
|
||||
os.unlink(IMAGE_PATH)
|
||||
if attempt < max_attempts:
|
||||
log.info("Will retry download in %d seconds...", retry_delay)
|
||||
time.sleep(retry_delay)
|
||||
else:
|
||||
log.error("All download attempts failed due to network errors")
|
||||
|
||||
except Exception as e:
|
||||
log.error("Download attempt %d failed: %s", attempt, str(e))
|
||||
if os.path.exists(IMAGE_PATH):
|
||||
os.unlink(IMAGE_PATH)
|
||||
if attempt < max_attempts:
|
||||
log.info("Will retry download in %d seconds...", retry_delay)
|
||||
time.sleep(retry_delay)
|
||||
else:
|
||||
log.error("All download attempts failed")
|
||||
|
||||
return False
|
||||
|
||||
def _check_ssh_keys_exist():
|
||||
"""
|
||||
@@ -419,25 +489,28 @@ def _ensure_hypervisor_host_dir(minion_id: str = None):
|
||||
log.error(f"Error creating hypervisor host directory: {str(e)}")
|
||||
return False
|
||||
|
||||
def _apply_dyanno_hypervisor_state():
|
||||
def _apply_dyanno_hypervisor_state(status):
|
||||
"""
|
||||
Apply the soc.dyanno.hypervisor state on the salt master.
|
||||
|
||||
This function applies the soc.dyanno.hypervisor state on the salt master
|
||||
to update the hypervisor annotation and ensure all hypervisor host directories exist.
|
||||
|
||||
Args:
|
||||
status: Status passed to the hypervisor annotation state
|
||||
|
||||
Returns:
|
||||
bool: True if state was applied successfully, False otherwise
|
||||
"""
|
||||
try:
|
||||
log.info("Applying soc.dyanno.hypervisor state on salt master")
|
||||
log.info(f"Applying soc.dyanno.hypervisor state on salt master with status: {status}")
|
||||
|
||||
# Initialize the LocalClient
|
||||
local = salt.client.LocalClient()
|
||||
|
||||
# Target the salt master to apply the soc.dyanno.hypervisor state
|
||||
target = MANAGER_HOSTNAME + '_*'
|
||||
state_result = local.cmd(target, 'state.apply', ['soc.dyanno.hypervisor', "pillar={'baseDomain': {'status': 'PreInit'}}", 'concurrent=True'], tgt_type='glob')
|
||||
state_result = local.cmd(target, 'state.apply', ['soc.dyanno.hypervisor', f"pillar={{'baseDomain': {{'status': '{status}'}}}}", 'concurrent=True'], tgt_type='glob')
|
||||
log.debug(f"state_result: {state_result}")
|
||||
# Check if state was applied successfully
|
||||
if state_result:
|
||||
@@ -454,17 +527,17 @@ def _apply_dyanno_hypervisor_state():
|
||||
success = False
|
||||
|
||||
if success:
|
||||
log.info("Successfully applied soc.dyanno.hypervisor state")
|
||||
log.info(f"Successfully applied soc.dyanno.hypervisor state with status: {status}")
|
||||
return True
|
||||
else:
|
||||
log.error("Failed to apply soc.dyanno.hypervisor state")
|
||||
log.error(f"Failed to apply soc.dyanno.hypervisor state with status: {status}")
|
||||
return False
|
||||
else:
|
||||
log.error("No response from salt master when applying soc.dyanno.hypervisor state")
|
||||
log.error(f"No response from salt master when applying soc.dyanno.hypervisor state with status: {status}")
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
log.error(f"Error applying soc.dyanno.hypervisor state: {str(e)}")
|
||||
log.error(f"Error applying soc.dyanno.hypervisor state with status: {status}: {str(e)}")
|
||||
return False
|
||||
|
||||
def _apply_cloud_config_state():
|
||||
@@ -598,11 +671,6 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
|
||||
log.warning("Failed to apply salt.cloud.config state, continuing with setup")
|
||||
# We don't return an error here as we want to continue with the setup process
|
||||
|
||||
# Apply the soc.dyanno.hypervisor state on the salt master
|
||||
if not _apply_dyanno_hypervisor_state():
|
||||
log.warning("Failed to apply soc.dyanno.hypervisor state, continuing with setup")
|
||||
# We don't return an error here as we want to continue with the setup process
|
||||
|
||||
log.info("Starting setup_environment in setup_hypervisor runner")
|
||||
|
||||
# Check if environment is already set up
|
||||
@@ -616,9 +684,12 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
|
||||
|
||||
# Handle image setup if needed
|
||||
if not image_valid:
|
||||
_apply_dyanno_hypervisor_state('ImageDownloadStart')
|
||||
log.info("Starting image download/validation process")
|
||||
if not _download_image():
|
||||
log.error("Image download failed")
|
||||
# Update hypervisor annotation with failure status
|
||||
_apply_dyanno_hypervisor_state('ImageDownloadFailed')
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'Image download failed',
|
||||
@@ -631,6 +702,8 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
|
||||
log.info("Setting up SSH keys")
|
||||
if not _setup_ssh_keys():
|
||||
log.error("SSH key setup failed")
|
||||
# Update hypervisor annotation with failure status
|
||||
_apply_dyanno_hypervisor_state('SSHKeySetupFailed')
|
||||
return {
|
||||
'success': False,
|
||||
'error': 'SSH key setup failed',
|
||||
@@ -655,6 +728,12 @@ def setup_environment(vm_name: str = 'sool9', disk_size: str = '220G', minion_id
|
||||
success = vm_result.get('success', False)
|
||||
log.info("Setup environment completed with status: %s", "SUCCESS" if success else "FAILED")
|
||||
|
||||
# Update hypervisor annotation with success status
|
||||
if success:
|
||||
_apply_dyanno_hypervisor_state('PreInit')
|
||||
else:
|
||||
_apply_dyanno_hypervisor_state('SetupFailed')
|
||||
|
||||
# If setup was successful and we have a minion_id, run highstate
|
||||
if success and minion_id:
|
||||
log.info("Running highstate on hypervisor %s", minion_id)
|
||||
|
||||
@@ -15,11 +15,7 @@
|
||||
'salt.minion-check',
|
||||
'sensoroni',
|
||||
'salt.lasthighstate',
|
||||
'salt.minion'
|
||||
] %}
|
||||
|
||||
{% set ssl_states = [
|
||||
'ssl',
|
||||
'salt.minion',
|
||||
'telegraf',
|
||||
'firewall',
|
||||
'schedule',
|
||||
@@ -28,7 +24,7 @@
|
||||
|
||||
{% set manager_states = [
|
||||
'salt.master',
|
||||
'ca',
|
||||
'ca.server',
|
||||
'registry',
|
||||
'manager',
|
||||
'nginx',
|
||||
@@ -38,8 +34,6 @@
|
||||
'hydra',
|
||||
'elasticfleet',
|
||||
'elastic-fleet-package-registry',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility'
|
||||
] %}
|
||||
|
||||
@@ -77,28 +71,24 @@
|
||||
{# Map role-specific states #}
|
||||
{% set role_states = {
|
||||
'so-eval': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states +
|
||||
elastic_stack_states | reject('equalto', 'logstash') | list
|
||||
elastic_stack_states | reject('equalto', 'logstash') | list +
|
||||
['logstash.ssl']
|
||||
),
|
||||
'so-heavynode': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
|
||||
),
|
||||
'so-idh': (
|
||||
ssl_states +
|
||||
['idh']
|
||||
),
|
||||
'so-import': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
sensor_states | reject('equalto', 'strelka') | reject('equalto', 'healthcheck') | list +
|
||||
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'strelka.manager']
|
||||
['elasticsearch', 'elasticsearch.auth', 'kibana', 'kibana.secrets', 'logstash.ssl', 'strelka.manager']
|
||||
),
|
||||
'so-manager': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
@@ -106,7 +96,6 @@
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managerhype': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'strelka.manager', 'hypervisor', 'libvirt'] +
|
||||
stig_states +
|
||||
@@ -114,7 +103,6 @@
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-managersearch': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users', 'strelka.manager'] +
|
||||
stig_states +
|
||||
@@ -122,12 +110,10 @@
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-searchnode': (
|
||||
ssl_states +
|
||||
['kafka.ca', 'kafka.ssl', 'elasticsearch', 'logstash', 'nginx'] +
|
||||
stig_states
|
||||
),
|
||||
'so-standalone': (
|
||||
ssl_states +
|
||||
manager_states +
|
||||
['salt.cloud', 'libvirt.packages', 'libvirt.ssh.users'] +
|
||||
sensor_states +
|
||||
@@ -136,28 +122,24 @@
|
||||
elastic_stack_states
|
||||
),
|
||||
'so-sensor': (
|
||||
ssl_states +
|
||||
sensor_states +
|
||||
['nginx'] +
|
||||
stig_states
|
||||
),
|
||||
'so-fleet': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['logstash', 'nginx', 'healthcheck', 'elasticfleet']
|
||||
),
|
||||
'so-receiver': (
|
||||
ssl_states +
|
||||
kafka_states +
|
||||
stig_states +
|
||||
['logstash', 'redis']
|
||||
),
|
||||
'so-hypervisor': (
|
||||
ssl_states +
|
||||
stig_states +
|
||||
['hypervisor', 'libvirt']
|
||||
),
|
||||
'so-desktop': (
|
||||
['ssl', 'docker_clean', 'telegraf'] +
|
||||
stig_states
|
||||
)
|
||||
} %}
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set PCAP_BPF_STATUS = 0 %}
|
||||
{% set STENO_BPF_COMPILED = "" %}
|
||||
|
||||
{% if GLOBALS.pcap_engine == "TRANSITION" %}
|
||||
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
|
||||
{% else %}
|
||||
@@ -8,3 +11,11 @@
|
||||
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
|
||||
{% set PCAPBPF = BPFMERGED.pcap %}
|
||||
{% endif %}
|
||||
|
||||
{% if PCAPBPF %}
|
||||
{% set PCAP_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ PCAPBPF|join(" "), cwd='/root') %}
|
||||
{% if PCAP_BPF_CALC['retcode'] == 0 %}
|
||||
{% set PCAP_BPF_STATUS = 1 %}
|
||||
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
bpf:
|
||||
pcap:
|
||||
description: List of BPF filters to apply to Stenographer.
|
||||
description: List of BPF filters to apply to the PCAP engine.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
suricata:
|
||||
description: List of BPF filters to apply to Suricata.
|
||||
description: List of BPF filters to apply to Suricata. This will apply to alerts and, if enabled, to metadata and PCAP logs generated by Suricata.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||
{% set SURICATA_BPF_STATUS = 0 %}
|
||||
{% import 'bpf/macros.jinja' as MACROS %}
|
||||
|
||||
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
|
||||
|
||||
{% set SURICATABPF = BPFMERGED.suricata %}
|
||||
|
||||
{% if SURICATABPF %}
|
||||
{% set SURICATA_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ SURICATABPF|join(" "), cwd='/root') %}
|
||||
{% if SURICATA_BPF_CALC['retcode'] == 0 %}
|
||||
{% set SURICATA_BPF_STATUS = 1 %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||
{% set ZEEK_BPF_STATUS = 0 %}
|
||||
{% import 'bpf/macros.jinja' as MACROS %}
|
||||
|
||||
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
|
||||
|
||||
{% set ZEEKBPF = BPFMERGED.zeek %}
|
||||
|
||||
{% if ZEEKBPF %}
|
||||
{% set ZEEK_BPF_CALC = salt['cmd.run_all']('/usr/sbin/so-bpf-compile ' ~ GLOBALS.sensor.interface ~ ' ' ~ ZEEKBPF|join(" "), cwd='/root') %}
|
||||
{% if ZEEK_BPF_CALC['retcode'] == 0 %}
|
||||
{% set ZEEK_BPF_STATUS = 1 %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
pki_issued_certs:
|
||||
file.directory:
|
||||
- name: /etc/pki/issued_certs
|
||||
- makedirs: True
|
||||
@@ -3,70 +3,10 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
|
||||
include:
|
||||
- ca.dirs
|
||||
|
||||
/etc/salt/minion.d/signing_policies.conf:
|
||||
file.managed:
|
||||
- source: salt://ca/files/signing_policies.conf
|
||||
|
||||
pki_private_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/ca.key
|
||||
- keysize: 4096
|
||||
- passphrase:
|
||||
- backup: True
|
||||
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/ca.crt
|
||||
{%- endif %}
|
||||
|
||||
pki_public_ca_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/ca.crt
|
||||
- signing_private_key: /etc/pki/ca.key
|
||||
- CN: {{ GLOBALS.manager }}
|
||||
- C: US
|
||||
- ST: Utah
|
||||
- L: Salt Lake City
|
||||
- basicConstraints: "critical CA:true"
|
||||
- keyUsage: "critical cRLSign, keyCertSign"
|
||||
- extendedkeyUsage: "serverAuth, clientAuth"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid:always, issuer
|
||||
- days_valid: 3650
|
||||
- days_remaining: 0
|
||||
- backup: True
|
||||
- replace: False
|
||||
- require:
|
||||
- sls: ca.dirs
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
mine_update_ca_crt:
|
||||
module.run:
|
||||
- mine.update: []
|
||||
- onchanges:
|
||||
- x509: pki_public_ca_crt
|
||||
|
||||
cakeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/ca.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% if GLOBALS.is_manager %}
|
||||
- ca.server
|
||||
{% endif %}
|
||||
- ca.trustca
|
||||
|
||||
3
salt/ca/map.jinja
Normal file
3
salt/ca/map.jinja
Normal file
@@ -0,0 +1,3 @@
|
||||
{% set CA = {
|
||||
'server': pillar.ca.server
|
||||
}%}
|
||||
@@ -1,7 +1,35 @@
|
||||
pki_private_key:
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% set setup_running = salt['cmd.retcode']('pgrep -x so-setup') == 0 %}
|
||||
|
||||
{% if setup_running%}
|
||||
|
||||
include:
|
||||
- ssl.remove
|
||||
|
||||
remove_pki_private_key:
|
||||
file.absent:
|
||||
- name: /etc/pki/ca.key
|
||||
|
||||
pki_public_ca_crt:
|
||||
remove_pki_public_ca_crt:
|
||||
file.absent:
|
||||
- name: /etc/pki/ca.crt
|
||||
|
||||
remove_trusttheca:
|
||||
file.absent:
|
||||
- name: /etc/pki/tls/certs/intca.crt
|
||||
|
||||
remove_pki_public_ca_crt_symlink:
|
||||
file.absent:
|
||||
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
|
||||
|
||||
{% else %}
|
||||
|
||||
so-setup_not_running:
|
||||
test.show_notification:
|
||||
- text: "This state is reserved for usage during so-setup."
|
||||
|
||||
{% endif %}
|
||||
|
||||
63
salt/ca/server.sls
Normal file
63
salt/ca/server.sls
Normal file
@@ -0,0 +1,63 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
pki_private_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/ca.key
|
||||
- keysize: 4096
|
||||
- passphrase:
|
||||
- backup: True
|
||||
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/ca.crt
|
||||
{%- endif %}
|
||||
|
||||
pki_public_ca_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/ca.crt
|
||||
- signing_private_key: /etc/pki/ca.key
|
||||
- CN: {{ GLOBALS.manager }}
|
||||
- C: US
|
||||
- ST: Utah
|
||||
- L: Salt Lake City
|
||||
- basicConstraints: "critical CA:true"
|
||||
- keyUsage: "critical cRLSign, keyCertSign"
|
||||
- extendedkeyUsage: "serverAuth, clientAuth"
|
||||
- subjectKeyIdentifier: hash
|
||||
- authorityKeyIdentifier: keyid:always, issuer
|
||||
- days_valid: 3650
|
||||
- days_remaining: 7
|
||||
- backup: True
|
||||
- replace: False
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
pki_public_ca_crt_symlink:
|
||||
file.symlink:
|
||||
- name: /opt/so/saltstack/local/salt/ca/files/ca.crt
|
||||
- target: /etc/pki/ca.crt
|
||||
- require:
|
||||
- x509: pki_public_ca_crt
|
||||
|
||||
cakeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/ca.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
15
salt/idstools/tools/sbin/so-idstools-start → salt/ca/signing_policy.sls
Executable file → Normal file
15
salt/idstools/tools/sbin/so-idstools-start → salt/ca/signing_policy.sls
Executable file → Normal file
@@ -1,12 +1,15 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
# when the salt-minion signs the cert, a copy is stored here
|
||||
issued_certs_copypath:
|
||||
file.directory:
|
||||
- name: /etc/pki/issued_certs
|
||||
- makedirs: True
|
||||
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
/usr/sbin/so-start idstools $1
|
||||
signing_policy:
|
||||
file.managed:
|
||||
- name: /etc/salt/minion.d/signing_policies.conf
|
||||
- source: salt://ca/files/signing_policies.conf
|
||||
26
salt/ca/trustca.sls
Normal file
26
salt/ca/trustca.sls
Normal file
@@ -0,0 +1,26 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- docker
|
||||
|
||||
# Trust the CA
|
||||
trusttheca:
|
||||
file.managed:
|
||||
- name: /etc/pki/tls/certs/intca.crt
|
||||
- source: salt://ca/files/ca.crt
|
||||
- watch_in:
|
||||
- service: docker_running
|
||||
- show_changes: False
|
||||
- makedirs: True
|
||||
|
||||
{% if GLOBALS.os_family == 'Debian' %}
|
||||
symlinkca:
|
||||
file.symlink:
|
||||
- target: /etc/pki/tls/certs/intca.crt
|
||||
- name: /etc/ssl/certs/intca.crt
|
||||
{% endif %}
|
||||
21
salt/common/grains.sls
Normal file
21
salt/common/grains.sls
Normal file
@@ -0,0 +1,21 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% set nsm_exists = salt['file.directory_exists']('/nsm') %}
|
||||
{% if nsm_exists %}
|
||||
{% set nsm_total = salt['cmd.shell']('df -BG /nsm | tail -1 | awk \'{print $2}\'') %}
|
||||
|
||||
nsm_total:
|
||||
grains.present:
|
||||
- name: nsm_total
|
||||
- value: {{ nsm_total }}
|
||||
|
||||
{% else %}
|
||||
|
||||
nsm_missing:
|
||||
test.succeed_without_changes:
|
||||
- name: /nsm does not exist, skipping grain assignment
|
||||
|
||||
{% endif %}
|
||||
@@ -4,6 +4,7 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- common.grains
|
||||
- common.packages
|
||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||
- manager.elasticsearch # needed for elastic_curl_config state
|
||||
@@ -176,7 +177,7 @@ so-status_script:
|
||||
- source: salt://common/tools/sbin/so-status
|
||||
- mode: 755
|
||||
|
||||
{% if GLOBALS.role in GLOBALS.sensor_roles %}
|
||||
{% if GLOBALS.is_sensor %}
|
||||
# Add sensor cleanup
|
||||
so-sensor-clean:
|
||||
cron.present:
|
||||
|
||||
@@ -29,9 +29,26 @@ fi
|
||||
|
||||
interface="$1"
|
||||
shift
|
||||
tcpdump -i $interface -ddd $@ | tail -n+2 |
|
||||
while read line; do
|
||||
|
||||
# Capture tcpdump output and exit code
|
||||
tcpdump_output=$(tcpdump -i "$interface" -ddd "$@" 2>&1)
|
||||
tcpdump_exit=$?
|
||||
|
||||
if [ $tcpdump_exit -ne 0 ]; then
|
||||
echo "$tcpdump_output" >&2
|
||||
exit $tcpdump_exit
|
||||
fi
|
||||
|
||||
# Process the output, skipping the first line
|
||||
echo "$tcpdump_output" | tail -n+2 | while read -r line; do
|
||||
cols=( $line )
|
||||
printf "%04x%02x%02x%08x" ${cols[0]} ${cols[1]} ${cols[2]} ${cols[3]}
|
||||
printf "%04x%02x%02x%08x" "${cols[0]}" "${cols[1]}" "${cols[2]}" "${cols[3]}"
|
||||
done
|
||||
|
||||
# Check if the pipeline succeeded
|
||||
if [ "${PIPESTATUS[0]}" -ne 0 ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
exit 0
|
||||
|
||||
@@ -220,12 +220,22 @@ compare_es_versions() {
|
||||
}
|
||||
|
||||
copy_new_files() {
|
||||
# Define files to exclude from deletion (relative to their respective base directories)
|
||||
local EXCLUDE_FILES=(
|
||||
"salt/hypervisor/soc_hypervisor.yaml"
|
||||
)
|
||||
|
||||
# Build rsync exclude arguments
|
||||
local EXCLUDE_ARGS=()
|
||||
for file in "${EXCLUDE_FILES[@]}"; do
|
||||
EXCLUDE_ARGS+=(--exclude="$file")
|
||||
done
|
||||
|
||||
# Copy new files over to the salt dir
|
||||
cd $UPDATE_DIR
|
||||
rsync -a salt $DEFAULT_SALT_DIR/ --delete
|
||||
rsync -a pillar $DEFAULT_SALT_DIR/ --delete
|
||||
rsync -a salt $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
||||
rsync -a pillar $DEFAULT_SALT_DIR/ --delete "${EXCLUDE_ARGS[@]}"
|
||||
chown -R socore:socore $DEFAULT_SALT_DIR/
|
||||
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
|
||||
cd /tmp
|
||||
}
|
||||
|
||||
@@ -385,7 +395,7 @@ is_manager_node() {
|
||||
}
|
||||
|
||||
is_sensor_node() {
|
||||
# Check to see if this is a sensor (forward) node
|
||||
# Check to see if this is a sensor node
|
||||
is_single_node_grid && return 0
|
||||
grep "role: so-" /etc/salt/grains | grep -E "sensor|heavynode" &> /dev/null
|
||||
}
|
||||
@@ -441,8 +451,7 @@ lookup_grain() {
|
||||
|
||||
lookup_role() {
|
||||
id=$(lookup_grain id)
|
||||
pieces=($(echo $id | tr '_' ' '))
|
||||
echo ${pieces[1]}
|
||||
echo "${id##*_}"
|
||||
}
|
||||
|
||||
is_feature_enabled() {
|
||||
@@ -545,21 +554,39 @@ run_check_net_err() {
|
||||
}
|
||||
|
||||
wait_for_salt_minion() {
|
||||
local minion="$1"
|
||||
local timeout="${2:-5}"
|
||||
local logfile="${3:-'/dev/stdout'}"
|
||||
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
|
||||
local attempt=0
|
||||
# each attempts would take about 15 seconds
|
||||
local maxAttempts=20
|
||||
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
|
||||
attempt=$((attempt+1))
|
||||
if [[ $attempt -eq $maxAttempts ]]; then
|
||||
return 1
|
||||
fi
|
||||
sleep 10
|
||||
done
|
||||
return 0
|
||||
local minion="$1"
|
||||
local max_wait="${2:-30}"
|
||||
local interval="${3:-2}"
|
||||
local logfile="${4:-'/dev/stdout'}"
|
||||
local elapsed=0
|
||||
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting for salt-minion '$minion' to be ready..."
|
||||
|
||||
while [ $elapsed -lt $max_wait ]; do
|
||||
# Check if service is running
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if salt-minion service is running"
|
||||
if ! systemctl is-active --quiet salt-minion; then
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service not running (elapsed: ${elapsed}s)"
|
||||
sleep $interval
|
||||
elapsed=$((elapsed + interval))
|
||||
continue
|
||||
fi
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion service is running"
|
||||
|
||||
# Check if minion responds to ping
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Check if $minion responds to ping"
|
||||
if salt "$minion" test.ping --timeout=3 --out=json 2>> "$logfile" | grep -q "true"; then
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - salt-minion '$minion' is connected and ready!"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - Waiting... (${elapsed}s / ${max_wait}s)"
|
||||
sleep $interval
|
||||
elapsed=$((elapsed + interval))
|
||||
done
|
||||
|
||||
echo "$(date '+%a %d %b %Y %H:%M:%S.%6N') - ERROR: salt-minion '$minion' not ready after $max_wait seconds"
|
||||
return 1
|
||||
}
|
||||
|
||||
salt_minion_count() {
|
||||
|
||||
@@ -25,7 +25,6 @@ container_list() {
|
||||
if [ $MANAGERCHECK == 'so-import' ]; then
|
||||
TRUSTED_CONTAINERS=(
|
||||
"so-elasticsearch"
|
||||
"so-idstools"
|
||||
"so-influxdb"
|
||||
"so-kibana"
|
||||
"so-kratos"
|
||||
@@ -49,7 +48,6 @@ container_list() {
|
||||
"so-elastic-fleet-package-registry"
|
||||
"so-elasticsearch"
|
||||
"so-idh"
|
||||
"so-idstools"
|
||||
"so-influxdb"
|
||||
"so-kafka"
|
||||
"so-kibana"
|
||||
@@ -62,8 +60,6 @@ container_list() {
|
||||
"so-soc"
|
||||
"so-steno"
|
||||
"so-strelka-backend"
|
||||
"so-strelka-filestream"
|
||||
"so-strelka-frontend"
|
||||
"so-strelka-manager"
|
||||
"so-suricata"
|
||||
"so-telegraf"
|
||||
@@ -71,7 +67,6 @@ container_list() {
|
||||
)
|
||||
else
|
||||
TRUSTED_CONTAINERS=(
|
||||
"so-idstools"
|
||||
"so-elasticsearch"
|
||||
"so-logstash"
|
||||
"so-nginx"
|
||||
|
||||
@@ -129,6 +129,8 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|responded with status-code 503" # telegraf getting 503 from ES during startup
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|process_cluster_event_timeout_exception" # logstash waiting for elasticsearch to start
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Cancelling deferred write event maybeFenceReplicas because the event queue is now closed" # Kafka controller log during shutdown/restart
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
@@ -159,7 +161,9 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding ingest pipeline" # false positive (elasticsearch ingest pipeline names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating index template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|updating component template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading component template" # false positive (elasticsearch index or template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|upgrading composable template" # false positive (elasticsearch composable template names contain 'error')
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Error while parsing document for index \[.ds-logs-kratos-so-.*object mapping for \[file\]" # false positive (mapping error occuring BEFORE kratos index has rolled over in 2.4.210)
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
@@ -222,6 +226,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Initialized license manager" # SOC log: before fields.status was changed to fields.licenseStatus
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
|
||||
fi
|
||||
|
||||
RESULT=0
|
||||
@@ -268,6 +273,13 @@ for log_file in $(cat /tmp/log_check_files); do
|
||||
tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check
|
||||
check_for_errors
|
||||
done
|
||||
# Look for OOM specific errors in /var/log/messages which can lead to odd behavior / test failures
|
||||
if [[ -f /var/log/messages ]]; then
|
||||
status "Checking log file /var/log/messages"
|
||||
if journalctl --since "24 hours ago" | grep -iE 'out of memory|oom-kill'; then
|
||||
RESULT=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Cleanup temp files
|
||||
rm -f /tmp/log_check_files
|
||||
|
||||
@@ -85,7 +85,7 @@ function suricata() {
|
||||
docker run --rm \
|
||||
-v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \
|
||||
-v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \
|
||||
-v /opt/so/conf/suricata/rules:/etc/suricata/rules:ro \
|
||||
-v /opt/so/rules/suricata/:/etc/suricata/rules:ro \
|
||||
-v ${LOG_PATH}:/var/log/suricata/:rw \
|
||||
-v ${NSM_PATH}/:/nsm/:rw \
|
||||
-v "$PCAP:/input.pcap:ro" \
|
||||
@@ -173,7 +173,7 @@ for PCAP in $INPUT_FILES; do
|
||||
status "- assigning unique identifier to import: $HASH"
|
||||
|
||||
pcap_data=$(pcapinfo "${PCAP}")
|
||||
if ! echo "$pcap_data" | grep -q "First packet time:" || echo "$pcap_data" |egrep -q "Last packet time: 1970-01-01|Last packet time: n/a"; then
|
||||
if ! echo "$pcap_data" | grep -q "Earliest packet time:" || echo "$pcap_data" |egrep -q "Latest packet time: 1970-01-01|Latest packet time: n/a"; then
|
||||
status "- this PCAP file is invalid; skipping"
|
||||
INVALID_PCAPS_COUNT=$((INVALID_PCAPS_COUNT + 1))
|
||||
else
|
||||
@@ -205,8 +205,8 @@ for PCAP in $INPUT_FILES; do
|
||||
HASHES="${HASHES} ${HASH}"
|
||||
fi
|
||||
|
||||
START=$(pcapinfo "${PCAP}" -a |grep "First packet time:" | awk '{print $4}')
|
||||
END=$(pcapinfo "${PCAP}" -e |grep "Last packet time:" | awk '{print $4}')
|
||||
START=$(pcapinfo "${PCAP}" -a |grep "Earliest packet time:" | awk '{print $4}')
|
||||
END=$(pcapinfo "${PCAP}" -e |grep "Latest packet time:" | awk '{print $4}')
|
||||
status "- found PCAP data spanning dates $START through $END"
|
||||
|
||||
# compare $START to $START_OLDEST
|
||||
|
||||
@@ -3,29 +3,16 @@
|
||||
{# we only want this state to run it is CentOS #}
|
||||
{% if GLOBALS.os == 'OEL' %}
|
||||
|
||||
{% set global_ca_text = [] %}
|
||||
{% set global_ca_server = [] %}
|
||||
{% set manager = GLOBALS.manager %}
|
||||
{% set x509dict = salt['mine.get'](manager | lower~'*', 'x509.get_pem_entries') %}
|
||||
{% for host in x509dict %}
|
||||
{% if host.split('_')|last in ['manager', 'managersearch', 'standalone', 'import', 'eval'] %}
|
||||
{% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %}
|
||||
{% do global_ca_server.append(host) %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% set trusttheca_text = global_ca_text[0] %}
|
||||
{% set ca_server = global_ca_server[0] %}
|
||||
|
||||
trusted_ca:
|
||||
x509.pem_managed:
|
||||
file.managed:
|
||||
- name: /etc/pki/ca-trust/source/anchors/ca.crt
|
||||
- text: {{ trusttheca_text }}
|
||||
- source: salt://ca/files/ca.crt
|
||||
|
||||
update_ca_certs:
|
||||
cmd.run:
|
||||
- name: update-ca-trust
|
||||
- onchanges:
|
||||
- x509: trusted_ca
|
||||
- file: trusted_ca
|
||||
|
||||
{% else %}
|
||||
|
||||
|
||||
@@ -24,11 +24,6 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-idstools':
|
||||
final_octet: 25
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-influxdb':
|
||||
final_octet: 26
|
||||
port_bindings:
|
||||
|
||||
@@ -6,9 +6,9 @@
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
# include ssl since docker service requires the intca
|
||||
# docker service requires the ca.crt
|
||||
include:
|
||||
- ssl
|
||||
- ca
|
||||
|
||||
dockergroup:
|
||||
group.present:
|
||||
@@ -89,10 +89,9 @@ docker_running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: docker_daemon
|
||||
- x509: trusttheca
|
||||
- require:
|
||||
- file: docker_daemon
|
||||
- x509: trusttheca
|
||||
- file: trusttheca
|
||||
|
||||
|
||||
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present
|
||||
|
||||
@@ -41,7 +41,6 @@ docker:
|
||||
forcedType: "[]string"
|
||||
so-elastic-fleet: *dockerOptions
|
||||
so-elasticsearch: *dockerOptions
|
||||
so-idstools: *dockerOptions
|
||||
so-influxdb: *dockerOptions
|
||||
so-kibana: *dockerOptions
|
||||
so-kratos: *dockerOptions
|
||||
@@ -102,4 +101,4 @@ docker:
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
so-zeek: *dockerOptions
|
||||
so-kafka: *dockerOptions
|
||||
so-kafka: *dockerOptions
|
||||
|
||||
@@ -60,7 +60,7 @@ so-elastalert:
|
||||
- watch:
|
||||
- file: elastaconf
|
||||
- onlyif:
|
||||
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 8" {# only run this state if elasticsearch is version 8 #}
|
||||
- "so-elasticsearch-query / | jq -r '.version.number[0:1]' | grep -q 9" {# only run this state if elasticsearch is version 9 #}
|
||||
|
||||
delete_so-elastalert_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
- elasticagent.config
|
||||
- elasticagent.sostatus
|
||||
|
||||
@@ -55,8 +56,10 @@ so-elastic-agent:
|
||||
{% endif %}
|
||||
- require:
|
||||
- file: create-elastic-agent-config
|
||||
- file: trusttheca
|
||||
- watch:
|
||||
- file: create-elastic-agent-config
|
||||
- file: trusttheca
|
||||
|
||||
delete_so-elastic-agent_so-status.disabled:
|
||||
file.uncomment:
|
||||
|
||||
@@ -9,3 +9,6 @@ fleetartifactdir:
|
||||
- user: 947
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
- recurse:
|
||||
- user
|
||||
- group
|
||||
|
||||
34
salt/elasticfleet/config.map.jinja
Normal file
34
salt/elasticfleet/config.map.jinja
Normal file
@@ -0,0 +1,34 @@
|
||||
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
Elastic License 2.0. #}
|
||||
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
|
||||
{# advanced config_yaml options for elasticfleet logstash output #}
|
||||
{% set ADV_OUTPUT_LOGSTASH_RAW = ELASTICFLEETMERGED.config.outputs.logstash %}
|
||||
{% set ADV_OUTPUT_LOGSTASH = {} %}
|
||||
{% for k, v in ADV_OUTPUT_LOGSTASH_RAW.items() %}
|
||||
{% if v != "" and v is not none %}
|
||||
{% if k == 'queue_mem_events' %}
|
||||
{# rename queue_mem_events queue.mem.events #}
|
||||
{% do ADV_OUTPUT_LOGSTASH.update({'queue.mem.events':v}) %}
|
||||
{% elif k == 'loadbalance' %}
|
||||
{% if v %}
|
||||
{# only include loadbalance config when its True #}
|
||||
{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
{% set LOGSTASH_CONFIG_YAML_RAW = [] %}
|
||||
{% if ADV_OUTPUT_LOGSTASH %}
|
||||
{% for k, v in ADV_OUTPUT_LOGSTASH.items() %}
|
||||
{% do LOGSTASH_CONFIG_YAML_RAW.append(k ~ ': ' ~ v) %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% set LOGSTASH_CONFIG_YAML = LOGSTASH_CONFIG_YAML_RAW | join('\\n') if LOGSTASH_CONFIG_YAML_RAW else '' %}
|
||||
@@ -9,6 +9,9 @@
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{% set node_data = salt['pillar.get']('node_data') %}
|
||||
|
||||
include:
|
||||
- elasticfleet.artifact_registry
|
||||
|
||||
# Add EA Group
|
||||
elasticfleetgroup:
|
||||
group.present:
|
||||
@@ -92,6 +95,9 @@ soresourcesrepoclone:
|
||||
- rev: 'main'
|
||||
- depth: 1
|
||||
- force_reset: True
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
{% endif %}
|
||||
|
||||
elasticdefendconfdir:
|
||||
|
||||
@@ -10,12 +10,19 @@ elasticfleet:
|
||||
grid_enrollment: ''
|
||||
defend_filters:
|
||||
enable_auto_configuration: False
|
||||
outputs:
|
||||
logstash:
|
||||
bulk_max_size: ''
|
||||
worker: ''
|
||||
queue_mem_events: ''
|
||||
timeout: ''
|
||||
loadbalance: False
|
||||
compression_level: ''
|
||||
subscription_integrations: False
|
||||
auto_upgrade_integrations: False
|
||||
logging:
|
||||
zeek:
|
||||
excluded:
|
||||
- analyzer
|
||||
- broker
|
||||
- capture_loss
|
||||
- cluster
|
||||
@@ -38,6 +45,7 @@ elasticfleet:
|
||||
- elasticsearch
|
||||
- endpoint
|
||||
- fleet_server
|
||||
- filestream
|
||||
- http_endpoint
|
||||
- httpjson
|
||||
- log
|
||||
|
||||
@@ -13,9 +13,11 @@
|
||||
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
- logstash.ssl
|
||||
- elasticfleet.ssl
|
||||
- elasticfleet.config
|
||||
- elasticfleet.sostatus
|
||||
- ssl
|
||||
|
||||
{% if grains.role not in ['so-fleet'] %}
|
||||
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
|
||||
@@ -32,6 +34,17 @@ so-elastic-fleet-auto-configure-logstash-outputs:
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
|
||||
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
||||
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
|
||||
- retry:
|
||||
attempts: 4
|
||||
interval: 30
|
||||
- onchanges:
|
||||
- x509: etc_elasticfleet_logstash_crt
|
||||
- x509: elasticfleet_kafka_crt
|
||||
{% endif %}
|
||||
|
||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||
@@ -67,6 +80,8 @@ so-elastic-fleet-auto-configure-artifact-urls:
|
||||
elasticagent_syncartifacts:
|
||||
file.recurse:
|
||||
- name: /nsm/elastic-fleet/artifacts/beats
|
||||
- user: 947
|
||||
- group: 947
|
||||
- source: salt://beats
|
||||
{% endif %}
|
||||
|
||||
@@ -120,6 +135,11 @@ so-elastic-fleet:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: trusttheca
|
||||
- x509: etc_elasticfleet_key
|
||||
- x509: etc_elasticfleet_crt
|
||||
- require:
|
||||
- file: trusttheca
|
||||
- x509: etc_elasticfleet_key
|
||||
- x509: etc_elasticfleet_crt
|
||||
{% endif %}
|
||||
@@ -133,12 +153,18 @@ so-elastic-fleet-package-statefile:
|
||||
so-elastic-fleet-package-upgrade:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-package-upgrade
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
- onchanges:
|
||||
- file: /opt/so/state/elastic_fleet_packages.txt
|
||||
|
||||
so-elastic-fleet-integrations:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
|
||||
so-elastic-agent-grid-upgrade:
|
||||
cmd.run:
|
||||
@@ -150,7 +176,11 @@ so-elastic-agent-grid-upgrade:
|
||||
so-elastic-fleet-integration-upgrade:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 10
|
||||
|
||||
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
|
||||
so-elastic-fleet-addon-integrations:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
{%- raw -%}
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-zeek-logs",
|
||||
@@ -10,19 +10,31 @@
|
||||
"description": "Zeek Import logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/zeek/logs/*.log"
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"tags": [],
|
||||
"pipeline": "",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}).log$"],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/zeek/logs/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"import.file\").slice(0,-4);\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n imported: true\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n import.file: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
|
||||
"custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,36 +11,51 @@
|
||||
{%- endif -%}
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "kratos-logs",
|
||||
"namespace": "so",
|
||||
"description": "Kratos logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/kratos/kratos.log"
|
||||
],
|
||||
"data_stream.dataset": "kratos",
|
||||
"tags": ["so-kratos"],
|
||||
"pipeline": "kratos",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
{%- if valid_identities -%}
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos\n- if:\n has_fields:\n - identity_id\n then:{% for id, email in identities %}\n - if:\n equals:\n identity_id: \"{{ id }}\"\n then:\n - add_fields:\n target: ''\n fields:\n user.name: \"{{ email }}\"{% endfor %}",
|
||||
{%- else -%}
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: kratos",
|
||||
{%- endif -%}
|
||||
"custom": "pipeline: kratos"
|
||||
"tags": [
|
||||
"so-kratos"
|
||||
],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
}
|
||||
@@ -2,28 +2,38 @@
|
||||
{%- raw -%}
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"id": "zeek-logs",
|
||||
"name": "zeek-logs",
|
||||
"namespace": "so",
|
||||
"description": "Zeek logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/zeek/logs/current/*.log"
|
||||
],
|
||||
"data_stream.dataset": "zeek",
|
||||
"tags": [],
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}).log$"],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/zeek/logs/current/%{pipeline}.log\"\n field: \"log.file.path\"\n trim_chars: \".log\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\");\n event.Put(\"@metadata.pipeline\", \"zeek.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: zeek\n- add_tags:\n tags: \"ics\"\n when:\n regexp:\n pipeline: \"^bacnet*|^bsap*|^cip*|^cotp*|^dnp3*|^ecat*|^enip*|^modbus*|^opcua*|^profinet*|^s7comm*\"",
|
||||
"custom": "exclude_files: [\"{%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%}.log$\"]\n"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -31,4 +41,4 @@
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
{%- endraw -%}
|
||||
{%- endraw -%}
|
||||
@@ -5,7 +5,7 @@
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "8.18.1",
|
||||
"version": "9.0.2",
|
||||
"requires_root": true
|
||||
},
|
||||
"enabled": true,
|
||||
|
||||
@@ -0,0 +1,48 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "agent-monitor",
|
||||
"namespace": "",
|
||||
"description": "",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"output_id": null,
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/agents/agent-monitor.log"
|
||||
],
|
||||
"data_stream.dataset": "agentmonitor",
|
||||
"pipeline": "elasticagent.monitor",
|
||||
"parsers": "",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- add_fields:\n target: event\n fields:\n module: gridmetrics",
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": true,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": 64,
|
||||
"file_identity_native": false,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -40,7 +40,7 @@
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/elasticsearch/*.log"
|
||||
"/opt/so/log/elasticsearch/*.json"
|
||||
]
|
||||
}
|
||||
},
|
||||
|
||||
@@ -1,26 +1,43 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "hydra-logs",
|
||||
"namespace": "so",
|
||||
"description": "Hydra logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/hydra/hydra.log"
|
||||
],
|
||||
"data_stream.dataset": "hydra",
|
||||
"tags": ["so-hydra"],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
|
||||
"custom": "pipeline: hydra"
|
||||
"pipeline": "hydra",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- add_fields:\n target: event\n fields:\n category: iam\n module: hydra",
|
||||
"tags": [
|
||||
"so-hydra"
|
||||
],
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -28,3 +45,5 @@
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,30 +1,44 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "idh-logs",
|
||||
"namespace": "so",
|
||||
"description": "IDH integration",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/idh/opencanary.log"
|
||||
],
|
||||
"data_stream.dataset": "idh",
|
||||
"tags": [],
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n add_error_key: true\n- convert:\n fields:\n - {from: \"logtype\", to: \"event.code\", type: \"string\"}\n- drop_fields:\n when:\n equals:\n event.code: \"1001\"\n fields: [\"src_host\", \"src_port\", \"dst_host\", \"dst_port\" ]\n ignore_missing: true\n- rename:\n fields:\n - from: \"src_host\"\n to: \"source.ip\"\n - from: \"src_port\"\n to: \"source.port\"\n - from: \"dst_host\"\n to: \"destination.host\"\n - from: \"dst_port\"\n to: \"destination.port\"\n ignore_missing: true\n- drop_fields:\n fields: '[\"prospector\", \"input\", \"offset\", \"beat\"]'\n- add_fields:\n target: event\n fields:\n category: host\n module: opencanary",
|
||||
"custom": "pipeline: common"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,33 +1,46 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-evtx-logs",
|
||||
"namespace": "so",
|
||||
"description": "Import Windows EVTX logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/evtx/*.json"
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"custom": "",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.3.3\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.3.3\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.3.3\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"tags": [
|
||||
"import"
|
||||
]
|
||||
],
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,45 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-suricata-logs",
|
||||
"namespace": "so",
|
||||
"description": "Import Suricata logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/suricata/eve*.json"
|
||||
],
|
||||
"data_stream.dataset": "import",
|
||||
"pipeline": "suricata.common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n",
|
||||
"tags": [],
|
||||
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata\n imported: true\n- dissect:\n tokenizer: \"/nsm/import/%{import.id}/suricata/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"",
|
||||
"custom": "pipeline: suricata.common"
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,18 +1,17 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "rita-logs",
|
||||
"namespace": "so",
|
||||
"description": "RITA Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
@@ -20,15 +19,28 @@
|
||||
"/nsm/rita/exploded-dns.csv",
|
||||
"/nsm/rita/long-connections.csv"
|
||||
],
|
||||
"exclude_files": [],
|
||||
"ignore_older": "72h",
|
||||
"data_stream.dataset": "rita",
|
||||
"tags": [],
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/rita/%{pipeline}.csv\"\n field: \"log.file.path\"\n trim_chars: \".csv\"\n target_prefix: \"\"\n- script:\n lang: javascript\n source: >\n function process(event) {\n var pl = event.Get(\"pipeline\").split(\"-\");\n if (pl.length > 1) {\n pl = pl[1];\n }\n else {\n pl = pl[0];\n }\n event.Put(\"@metadata.pipeline\", \"rita.\" + pl);\n }\n- add_fields:\n target: event\n fields:\n category: network\n module: rita",
|
||||
"custom": "exclude_lines: ['^Score', '^Source', '^Domain', '^No results']"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
@@ -1,29 +1,41 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "so-ip-mappings",
|
||||
"namespace": "so",
|
||||
"description": "IP Description mappings",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"vars": {},
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/custom-mappings/ip-descriptions.csv"
|
||||
],
|
||||
"data_stream.dataset": "hostnamemappings",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
|
||||
"tags": [
|
||||
"so-ip-mappings"
|
||||
],
|
||||
"processors": "- decode_csv_fields:\n fields:\n message: decoded.csv\n separator: \",\"\n ignore_missing: false\n overwrite_keys: true\n trim_leading_space: true\n fail_on_error: true\n\n- extract_array:\n field: decoded.csv\n mappings:\n so.ip_address: '0'\n so.description: '1'\n\n- script:\n lang: javascript\n source: >\n function process(event) {\n var ip = event.Get('so.ip_address');\n var validIpRegex = /^((25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)\\.){3}(25[0-5]|2[0-4]\\d|1\\d{2}|[1-9]?\\d)$/\n if (!validIpRegex.test(ip)) {\n event.Cancel();\n }\n }\n- fingerprint:\n fields: [\"so.ip_address\"]\n target_field: \"@metadata._id\"\n",
|
||||
"custom": ""
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -31,5 +43,3 @@
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -1,30 +1,44 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-auth-sync-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Elastic Auth Sync - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sync.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": ["so-soc"],
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"%{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: auth_sync",
|
||||
"custom": "pipeline: common"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,35 +1,48 @@
|
||||
{
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-detections-logs",
|
||||
"description": "Security Onion Console - Detections Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/detections_runtime-status_sigma.log",
|
||||
"/opt/so/log/soc/detections_runtime-status_yara.log"
|
||||
],
|
||||
"exclude_files": [],
|
||||
"ignore_older": "72h",
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
|
||||
"tags": [
|
||||
"so-soc"
|
||||
],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: detections\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
|
||||
"custom": "pipeline: common"
|
||||
"recursive_glob": true,
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,46 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-salt-relay-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Salt Relay - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/salt-relay.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": ["so-soc"],
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"%{soc.ts} | %{event.action}\"\n field: \"message\"\n target_prefix: \"\"\n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: salt_relay",
|
||||
"custom": "pipeline: common"
|
||||
"tags": [
|
||||
"so-soc"
|
||||
],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,44 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-sensoroni-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Sensoroni - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/sensoroni/sensoroni.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": [],
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"sensoroni\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: sensoroni\n- rename:\n fields:\n - from: \"sensoroni.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"sensoroni.fields.status\"\n to: \"http.response.status_code\"\n - from: \"sensoroni.fields.method\"\n to: \"http.request.method\"\n - from: \"sensoroni.fields.path\"\n to: \"url.path\"\n - from: \"sensoroni.message\"\n to: \"event.action\"\n - from: \"sensoroni.level\"\n to: \"log.level\"\n ignore_missing: true",
|
||||
"custom": "pipeline: common"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
"force": true
|
||||
}
|
||||
@@ -1,30 +1,46 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-server-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion Console Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sensoroni-server.log"
|
||||
],
|
||||
"data_stream.dataset": "soc",
|
||||
"tags": ["so-soc"],
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- decode_json_fields:\n fields: [\"message\"]\n target: \"soc\"\n process_array: true\n max_depth: 2\n add_error_key: true \n- add_fields:\n target: event\n fields:\n category: host\n module: soc\n dataset_temp: server\n- rename:\n fields:\n - from: \"soc.fields.sourceIp\"\n to: \"source.ip\"\n - from: \"soc.fields.status\"\n to: \"http.response.status_code\"\n - from: \"soc.fields.method\"\n to: \"http.request.method\"\n - from: \"soc.fields.path\"\n to: \"url.path\"\n - from: \"soc.message\"\n to: \"event.action\"\n - from: \"soc.level\"\n to: \"log.level\"\n ignore_missing: true",
|
||||
"custom": "pipeline: common"
|
||||
"tags": [
|
||||
"so-soc"
|
||||
],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,44 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "strelka-logs",
|
||||
"namespace": "so",
|
||||
"description": "Strelka logs",
|
||||
"description": "Strelka Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/strelka/log/strelka.log"
|
||||
],
|
||||
"data_stream.dataset": "strelka",
|
||||
"tags": [],
|
||||
"pipeline": "strelka.file",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- add_fields:\n target: event\n fields:\n category: file\n module: strelka",
|
||||
"custom": "pipeline: strelka.file"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,44 @@
|
||||
{
|
||||
"package": {
|
||||
"name": "log",
|
||||
"name": "filestream",
|
||||
"version": ""
|
||||
},
|
||||
"name": "suricata-logs",
|
||||
"namespace": "so",
|
||||
"description": "Suricata integration",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"inputs": {
|
||||
"logs-logfile": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"log.logs": {
|
||||
"filestream.generic": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/suricata/eve*.json"
|
||||
],
|
||||
"data_stream.dataset": "suricata",
|
||||
"tags": [],
|
||||
"data_stream.dataset": "filestream.generic",
|
||||
"pipeline": "suricata.common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- add_fields:\n target: event\n fields:\n category: network\n module: suricata",
|
||||
"custom": "pipeline: suricata.common"
|
||||
"tags": [],
|
||||
"recursive_glob": true,
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
}
|
||||
@@ -2,26 +2,32 @@
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
|
||||
{%- set GRIDNODETOKENGENERAL = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
|
||||
{%- set GRIDNODETOKENHEAVY = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
|
||||
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_general') -%}
|
||||
{% if grains.role == 'so-heavynode' %}
|
||||
{% set GRIDNODETOKEN = salt['pillar.get']('global:fleet_grid_enrollment_token_heavy') -%}
|
||||
{% endif %}
|
||||
|
||||
{% set AGENT_STATUS = salt['service.available']('elastic-agent') %}
|
||||
{% if not AGENT_STATUS %}
|
||||
{% set AGENT_EXISTS = salt['file.file_exists']('/opt/Elastic/Agent/elastic-agent') %}
|
||||
|
||||
{% if grains.role not in ['so-heavynode'] %}
|
||||
run_installer:
|
||||
cmd.script:
|
||||
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- cwd: /opt/so
|
||||
- args: -token={{ GRIDNODETOKENGENERAL }}
|
||||
- retry: True
|
||||
{% else %}
|
||||
run_installer:
|
||||
cmd.script:
|
||||
- name: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- cwd: /opt/so
|
||||
- args: -token={{ GRIDNODETOKENHEAVY }}
|
||||
- retry: True
|
||||
{% endif %}
|
||||
{% if not AGENT_STATUS or not AGENT_EXISTS %}
|
||||
|
||||
pull_agent_installer:
|
||||
file.managed:
|
||||
- name: /opt/so/so-elastic-agent_linux_amd64
|
||||
- source: salt://elasticfleet/files/so_agent-installers/so-elastic-agent_linux_amd64
|
||||
- mode: 755
|
||||
- makedirs: True
|
||||
|
||||
run_installer:
|
||||
cmd.run:
|
||||
- name: ./so-elastic-agent_linux_amd64 -token={{ GRIDNODETOKEN }} -force
|
||||
- cwd: /opt/so
|
||||
- retry:
|
||||
attempts: 3
|
||||
interval: 20
|
||||
|
||||
cleanup_agent_installer:
|
||||
file.absent:
|
||||
- name: /opt/so/so-elastic-agent_linux_amd64
|
||||
{% endif %}
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
'azure_application_insights.app_state': 'azure.app_state',
|
||||
'azure_billing.billing': 'azure.billing',
|
||||
'azure_functions.metrics': 'azure.function',
|
||||
'azure_ai_foundry.metrics': 'azure.ai_foundry',
|
||||
'azure_metrics.compute_vm_scaleset': 'azure.compute_vm_scaleset',
|
||||
'azure_metrics.compute_vm': 'azure.compute_vm',
|
||||
'azure_metrics.container_instance': 'azure.container_instance',
|
||||
@@ -121,6 +122,9 @@
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"allocate":{
|
||||
"number_of_replicas": ""
|
||||
},
|
||||
"set_priority": {"priority": 0}
|
||||
},
|
||||
"min_age": "60d"
|
||||
@@ -137,12 +141,31 @@
|
||||
"max_age": "30d",
|
||||
"max_primary_shard_size": "50gb"
|
||||
},
|
||||
"forcemerge":{
|
||||
"max_num_segments": ""
|
||||
},
|
||||
"shrink":{
|
||||
"max_primary_shard_size": "",
|
||||
"method": "COUNT",
|
||||
"number_of_shards": ""
|
||||
},
|
||||
"set_priority": {"priority": 100}
|
||||
},
|
||||
"min_age": "0ms"
|
||||
},
|
||||
"warm": {
|
||||
"actions": {
|
||||
"allocate": {
|
||||
"number_of_replicas": ""
|
||||
},
|
||||
"forcemerge": {
|
||||
"max_num_segments": ""
|
||||
},
|
||||
"shrink":{
|
||||
"max_primary_shard_size": "",
|
||||
"method": "COUNT",
|
||||
"number_of_shards": ""
|
||||
},
|
||||
"set_priority": {"priority": 50}
|
||||
},
|
||||
"min_age": "30d"
|
||||
|
||||
@@ -50,6 +50,46 @@ elasticfleet:
|
||||
global: True
|
||||
forcedType: bool
|
||||
helpLink: elastic-fleet.html
|
||||
outputs:
|
||||
logstash:
|
||||
bulk_max_size:
|
||||
description: The maximum number of events to bulk in a single Logstash request.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
helpLink: elastic-fleet.html
|
||||
worker:
|
||||
description: The number of workers per configured host publishing events.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: true
|
||||
helpLink: elastic-fleet.html
|
||||
queue_mem_events:
|
||||
title: queued events
|
||||
description: The number of events the queue can store. This value should be evenly divisible by the smaller of 'bulk_max_size' to avoid sending partial batches to the output.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
helpLink: elastic-fleet.html
|
||||
timeout:
|
||||
description: The number of seconds to wait for responses from the Logstash server before timing out. Eg 30s
|
||||
regex: ^[0-9]+s$
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
loadbalance:
|
||||
description: If true and multiple Logstash hosts are configured, the output plugin load balances published events onto all Logstash hosts. If false, the output plugin sends all events to one host (determined at random) and switches to another host if the selected one becomes unresponsive.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
compression_level:
|
||||
description: The gzip compression level. The compression level must be in the range of 1 (best speed) to 9 (best compression).
|
||||
regex: ^[1-9]$
|
||||
forcedType: int
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
server:
|
||||
custom_fqdn:
|
||||
description: Custom FQDN for Agents to connect to. One per line.
|
||||
|
||||
186
salt/elasticfleet/ssl.sls
Normal file
186
salt/elasticfleet/ssl.sls
Normal file
@@ -0,0 +1,186 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{% from 'ca/map.jinja' import CA %}
|
||||
|
||||
{% if GLOBALS.is_manager or GLOBALS.role in ['so-heavynode', 'so-fleet', 'so-receiver'] %}
|
||||
|
||||
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
|
||||
# Start -- Elastic Fleet Host Cert
|
||||
etc_elasticfleet_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticfleet-server.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/elasticfleet-server.key') -%}
|
||||
- prereq:
|
||||
- x509: etc_elasticfleet_crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
etc_elasticfleet_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/elasticfleet-server.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: elasticfleet
|
||||
- private_key: /etc/pki/elasticfleet-server.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }},DNS:{{ GLOBALS.url_base }},IP:{{ GLOBALS.node_ip }}{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %},DNS:{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(',DNS:') }}{% endif %}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
efperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-server.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
chownelasticfleetcrt:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-server.crt
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
|
||||
chownelasticfleetkey:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-server.key
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
# End -- Elastic Fleet Host Cert
|
||||
{% endif %} # endif is for not including HeavyNodes & Receivers
|
||||
|
||||
|
||||
# Start -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
|
||||
etc_elasticfleet_agent_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticfleet-agent.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/elasticfleet-agent.key') -%}
|
||||
- prereq:
|
||||
- x509: etc_elasticfleet_agent_crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
etc_elasticfleet_agent_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/elasticfleet-agent.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: elasticfleet
|
||||
- private_key: /etc/pki/elasticfleet-agent.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-agent.key -topk8 -out /etc/pki/elasticfleet-agent.p8 -nocrypt"
|
||||
- onchanges:
|
||||
- x509: etc_elasticfleet_agent_key
|
||||
|
||||
efagentperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-agent.key
|
||||
- mode: 640
|
||||
- group: 939
|
||||
|
||||
chownelasticfleetagentcrt:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-agent.crt
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
|
||||
chownelasticfleetagentkey:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-agent.key
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
# End -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role in ['so-manager', 'so-managerhype', 'so-managersearch', 'so-standalone'] %}
|
||||
elasticfleet_kafka_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticfleet-kafka.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%}
|
||||
- prereq:
|
||||
- x509: elasticfleet_kafka_crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
elasticfleet_kafka_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/elasticfleet-kafka.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: kafka
|
||||
- private_key: /etc/pki/elasticfleet-kafka.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
elasticfleet_kafka_cert_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-kafka.crt
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
|
||||
elasticfleet_kafka_key_perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticfleet-kafka.key
|
||||
- mode: 640
|
||||
- user: 947
|
||||
- group: 939
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -23,6 +23,13 @@ fi
|
||||
# Define a banner to separate sections
|
||||
banner="========================================================================="
|
||||
|
||||
fleet_api() {
|
||||
local QUERYPATH=$1
|
||||
shift
|
||||
|
||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/${QUERYPATH}" "$@" --retry 3 --retry-delay 10 --fail 2>/dev/null
|
||||
}
|
||||
|
||||
elastic_fleet_integration_check() {
|
||||
|
||||
AGENT_POLICY=$1
|
||||
@@ -39,7 +46,9 @@ elastic_fleet_integration_create() {
|
||||
|
||||
JSON_STRING=$1
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
@@ -56,7 +65,10 @@ elastic_fleet_integration_remove() {
|
||||
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
||||
)
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies/delete" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo "Error: Unable to delete '$NAME' from '$AGENT_POLICY'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_update() {
|
||||
@@ -65,7 +77,9 @@ elastic_fleet_integration_update() {
|
||||
|
||||
JSON_STRING=$2
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPUT -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_upgrade() {
|
||||
@@ -77,78 +91,83 @@ elastic_fleet_integration_policy_upgrade() {
|
||||
'{"packagePolicyIds":[$INTEGRATIONID]}'
|
||||
)
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "package_policies/upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
elastic_fleet_package_version_check() {
|
||||
PACKAGE=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version'
|
||||
|
||||
if output=$(fleet_api "epm/packages/$PACKAGE"); then
|
||||
echo "$output" | jq -r '.item.version'
|
||||
else
|
||||
echo "Error: Failed to get current package version for '$PACKAGE'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_package_latest_version_check() {
|
||||
PACKAGE=$1
|
||||
if output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" --fail); then
|
||||
if output=$(fleet_api "epm/packages/$PACKAGE"); then
|
||||
if version=$(jq -e -r '.item.latestVersion' <<< $output); then
|
||||
echo "$version"
|
||||
fi
|
||||
else
|
||||
echo "Error: Failed to get latest version for $PACKAGE"
|
||||
echo "Error: Failed to get latest version for '$PACKAGE'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_package_install() {
|
||||
PKG=$1
|
||||
VERSION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}' "localhost:5601/api/fleet/epm/packages/$PKG/$VERSION"
|
||||
if ! fleet_api "epm/packages/$PKG/$VERSION" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"force":true}'; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_bulk_package_install() {
|
||||
BULK_PKG_LIST=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X POST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$1 "localhost:5601/api/fleet/epm/packages/_bulk"
|
||||
}
|
||||
|
||||
elastic_fleet_package_is_installed() {
|
||||
PACKAGE=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.status'
|
||||
}
|
||||
|
||||
elastic_fleet_installed_packages() {
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET -H 'kbn-xsrf: true' -H 'Content-Type: application/json' "localhost:5601/api/fleet/epm/packages/installed?perPage=500"
|
||||
}
|
||||
|
||||
elastic_fleet_agent_policy_ids() {
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].id
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
if ! fleet_api "epm/packages/_bulk" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d@$BULK_PKG_LIST; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_agent_policy_names() {
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies" | jq -r .items[].name
|
||||
if [ $? -ne 0 ]; then
|
||||
elastic_fleet_installed_packages() {
|
||||
if ! fleet_api "epm/packages/installed?perPage=500"; then
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_agent_policy_ids() {
|
||||
if output=$(fleet_api "agent_policies"); then
|
||||
echo "$output" | jq -r .items[].id
|
||||
else
|
||||
echo "Error: Failed to retrieve agent policies."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_names() {
|
||||
AGENT_POLICY=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r .item.package_policies[].name
|
||||
if [ $? -ne 0 ]; then
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
echo "$output" | jq -r .item.package_policies[].name
|
||||
else
|
||||
echo "Error: Failed to retrieve integrations for '$AGENT_POLICY'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_package_name() {
|
||||
AGENT_POLICY=$1
|
||||
INTEGRATION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
|
||||
if [ $? -ne 0 ]; then
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.name'
|
||||
else
|
||||
echo "Error: Failed to retrieve package name for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -156,32 +175,32 @@ elastic_fleet_integration_policy_package_version() {
|
||||
AGENT_POLICY=$1
|
||||
INTEGRATION=$2
|
||||
|
||||
if output=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" --fail); then
|
||||
if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< $output); then
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
if version=$(jq -e -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .package.version' <<< "$output"); then
|
||||
echo "$version"
|
||||
fi
|
||||
else
|
||||
echo "Error: Failed to retrieve agent policy $AGENT_POLICY"
|
||||
exit 1
|
||||
echo "Error: Failed to retrieve integration version for '$INTEGRATION' in policy '$AGENT_POLICY'"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_id() {
|
||||
AGENT_POLICY=$1
|
||||
INTEGRATION=$2
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/agent_policies/$AGENT_POLICY" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
|
||||
if [ $? -ne 0 ]; then
|
||||
if output=$(fleet_api "agent_policies/$AGENT_POLICY"); then
|
||||
echo "$output" | jq -r --arg INTEGRATION "$INTEGRATION" '.item.package_policies[] | select(.name==$INTEGRATION)| .id'
|
||||
else
|
||||
echo "Error: Failed to retrieve integration ID for '$INTEGRATION' in '$AGENT_POLICY'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
elastic_fleet_integration_policy_dryrun_upgrade() {
|
||||
INTEGRATION_ID=$1
|
||||
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -L -X POST "localhost:5601/api/fleet/package_policies/upgrade/dryrun" -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"
|
||||
if [ $? -ne 0 ]; then
|
||||
if ! fleet_api "package_policies/upgrade/dryrun" -H "Content-Type: application/json" -H 'kbn-xsrf: true' -XPOST -d "{\"packagePolicyIds\":[\"$INTEGRATION_ID\"]}"; then
|
||||
echo "Error: Failed to complete dry run for '$INTEGRATION_ID'."
|
||||
exit 1
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -190,25 +209,18 @@ elastic_fleet_policy_create() {
|
||||
NAME=$1
|
||||
DESC=$2
|
||||
FLEETSERVER=$3
|
||||
TIMEOUT=$4
|
||||
TIMEOUT=$4
|
||||
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg NAME "$NAME" \
|
||||
--arg DESC "$DESC" \
|
||||
--arg TIMEOUT $TIMEOUT \
|
||||
--arg FLEETSERVER "$FLEETSERVER" \
|
||||
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
|
||||
)
|
||||
--arg NAME "$NAME" \
|
||||
--arg DESC "$DESC" \
|
||||
--arg TIMEOUT $TIMEOUT \
|
||||
--arg FLEETSERVER "$FLEETSERVER" \
|
||||
'{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}'
|
||||
)
|
||||
# Create Fleet Policy
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "agent_policies" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
elastic_fleet_policy_update() {
|
||||
|
||||
POLICYID=$1
|
||||
JSON_STRING=$2
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
}
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@
|
||||
|
||||
. /usr/sbin/so-elastic-fleet-common
|
||||
|
||||
ERROR=false
|
||||
# Manage Elastic Defend Integration for Initial Endpoints Policy
|
||||
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
|
||||
do
|
||||
@@ -15,9 +16,20 @@ do
|
||||
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
|
||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
||||
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
|
||||
echo -e "\nFailed to upgrade integration policy for ${INTEGRATION##*/}"
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ "$ERROR" == "true" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
@@ -25,5 +25,9 @@ for POLICYNAME in $POLICY; do
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Now update the integration policy using the modified JSON
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "$UPDATED_INTEGRATION_POLICY"; then
|
||||
# exit 1 on failure to update fleet integration policies, let salt handle retries
|
||||
echo "Failed to update $POLICYNAME.."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
@@ -13,11 +13,10 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
/usr/sbin/so-elastic-fleet-package-upgrade
|
||||
|
||||
# Second, update Fleet Server policies
|
||||
/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-fleet-server
|
||||
|
||||
# Third, configure Elastic Defend Integration seperately
|
||||
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
|
||||
|
||||
# Initial Endpoints
|
||||
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
|
||||
do
|
||||
@@ -25,10 +24,18 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -39,10 +46,18 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "so-grid-nodes_general" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
done
|
||||
if [[ "$RETURN_CODE" != "1" ]]; then
|
||||
@@ -56,11 +71,19 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "so-grid-nodes_heavy" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
if [ "$NAME" != "elasticsearch-logs" ]; then
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
@@ -77,11 +100,19 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
|
||||
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
|
||||
if [ -n "$INTEGRATION_ID" ]; then
|
||||
printf "\n\nIntegration $NAME exists - Updating integration\n"
|
||||
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to update integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
else
|
||||
printf "\n\nIntegration does not exist - Creating integration\n"
|
||||
if [ "$NAME" != "elasticsearch-logs" ]; then
|
||||
elastic_fleet_integration_create "@$INTEGRATION"
|
||||
if ! elastic_fleet_integration_create "@$INTEGRATION"; then
|
||||
echo -e "\nFailed to create integration for ${INTEGRATION##*/}"
|
||||
RETURN_CODE=1
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -14,7 +14,7 @@ if ! is_manager_node; then
|
||||
fi
|
||||
|
||||
# Get current list of Grid Node Agents that need to be upgraded
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true")
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%20:%20%22{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%22%20and%20policy_id%20:%20%22so-grid-nodes_general%22&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
|
||||
|
||||
@@ -26,7 +26,7 @@ function update_es_urls() {
|
||||
}
|
||||
|
||||
# Get current list of Fleet Elasticsearch URLs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch')
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_elasticsearch' --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
|
||||
@@ -24,12 +24,18 @@ fi
|
||||
|
||||
default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.last %} {% endif %}{% endfor %})
|
||||
|
||||
ERROR=false
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
|
||||
# this script upgrades default integration packages, exit 1 and let salt handle retrying
|
||||
exit 1
|
||||
fi
|
||||
for INTEGRATION in $integrations; do
|
||||
if ! [[ "$INTEGRATION" == "elastic-defend-endpoints" ]] && ! [[ "$INTEGRATION" == "fleet_server-"* ]]; then
|
||||
# Get package name so we know what package to look for when checking the current and latest available version
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then
|
||||
exit 1
|
||||
fi
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
if [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||
{%- endif %}
|
||||
@@ -48,7 +54,9 @@ for AGENT_POLICY in $agent_policies; do
|
||||
fi
|
||||
|
||||
# Get integration ID
|
||||
INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION")
|
||||
if ! INTEGRATION_ID=$(elastic_fleet_integration_id "$AGENT_POLICY" "$INTEGRATION"); then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$PACKAGE_VERSION" != "$AVAILABLE_VERSION" ]]; then
|
||||
# Dry run of the upgrade
|
||||
@@ -56,20 +64,23 @@ for AGENT_POLICY in $agent_policies; do
|
||||
echo "Current $PACKAGE_NAME package version ($PACKAGE_VERSION) is not the same as the latest available package ($AVAILABLE_VERSION)..."
|
||||
echo "Upgrading $INTEGRATION..."
|
||||
echo "Starting dry run..."
|
||||
DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID")
|
||||
if ! DRYRUN_OUTPUT=$(elastic_fleet_integration_policy_dryrun_upgrade "$INTEGRATION_ID"); then
|
||||
exit 1
|
||||
fi
|
||||
DRYRUN_ERRORS=$(echo "$DRYRUN_OUTPUT" | jq .[].hasErrors)
|
||||
|
||||
# If no errors with dry run, proceed with actual upgrade
|
||||
if [[ "$DRYRUN_ERRORS" == "false" ]]; then
|
||||
echo "No errors detected. Proceeding with upgrade..."
|
||||
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
|
||||
if [ $? -ne 0 ]; then
|
||||
if ! elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"; then
|
||||
echo "Error: Upgrade failed for $PACKAGE_NAME with integration ID '$INTEGRATION_ID'."
|
||||
exit 1
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
else
|
||||
echo "Errors detected during dry run for $PACKAGE_NAME policy upgrade..."
|
||||
exit 1
|
||||
ERROR=true
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
{%- if not AUTO_UPGRADE_INTEGRATIONS %}
|
||||
@@ -78,4 +89,7 @@ for AGENT_POLICY in $agent_policies; do
|
||||
fi
|
||||
done
|
||||
done
|
||||
if [[ "$ERROR" == "true" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
|
||||
@@ -62,9 +62,17 @@ default_packages=({% for pkg in SUPPORTED_PACKAGES %}"{{ pkg }}"{% if not loop.l
|
||||
in_use_integrations=()
|
||||
|
||||
for AGENT_POLICY in $agent_policies; do
|
||||
integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY")
|
||||
|
||||
if ! integrations=$(elastic_fleet_integration_policy_names "$AGENT_POLICY"); then
|
||||
# skip the agent policy if we can't get required info, let salt retry. Integrations loaded by this script are non-default integrations.
|
||||
echo "Skipping $AGENT_POLICY.. "
|
||||
continue
|
||||
fi
|
||||
for INTEGRATION in $integrations; do
|
||||
PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION")
|
||||
if ! PACKAGE_NAME=$(elastic_fleet_integration_policy_package_name "$AGENT_POLICY" "$INTEGRATION"); then
|
||||
echo "Not adding $INTEGRATION, couldn't get package name"
|
||||
continue
|
||||
fi
|
||||
# non-default integrations that are in-use in any policy
|
||||
if ! [[ " ${default_packages[@]} " =~ " $PACKAGE_NAME " ]]; then
|
||||
in_use_integrations+=("$PACKAGE_NAME")
|
||||
@@ -78,7 +86,7 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
latest_package_list=$(/usr/sbin/so-elastic-fleet-package-list)
|
||||
echo '{ "packages" : []}' > $BULK_INSTALL_PACKAGE_LIST
|
||||
rm -f $INSTALLED_PACKAGE_LIST
|
||||
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .savedObject.attributes.install_version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
|
||||
echo $latest_package_list | jq '{packages: [.items[] | {name: .name, latest_version: .version, installed_version: .installationInfo.version, subscription: .conditions.elastic.subscription }]}' >> $INSTALLED_PACKAGE_LIST
|
||||
|
||||
while read -r package; do
|
||||
# get package details
|
||||
@@ -160,7 +168,11 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
|
||||
for file in "${pkg_filename}_"*.json; do
|
||||
[ -e "$file" ] || continue
|
||||
elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT
|
||||
if ! elastic_fleet_bulk_package_install $file >> $BULK_INSTALL_OUTPUT; then
|
||||
# integrations loaded my this script are non-essential and shouldn't cause exit, skip them for now next highstate run can retry
|
||||
echo "Failed to complete a chunk of bulk package installs -- $file "
|
||||
continue
|
||||
fi
|
||||
done
|
||||
# cleanup any temp files for chunked package install
|
||||
rm -f ${pkg_filename}_*.json $BULK_INSTALL_PACKAGE_LIST
|
||||
@@ -168,8 +180,9 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
||||
echo "Elastic integrations don't appear to need installation/updating..."
|
||||
fi
|
||||
# Write out file for generating index/component/ilm templates
|
||||
latest_installed_package_list=$(elastic_fleet_installed_packages)
|
||||
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
||||
if latest_installed_package_list=$(elastic_fleet_installed_packages); then
|
||||
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
||||
fi
|
||||
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
|
||||
# Refresh installed component template list
|
||||
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
|
||||
|
||||
@@ -3,11 +3,36 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||
# this file except in compliance with the Elastic License 2.0.
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
{%- from 'elasticfleet/config.map.jinja' import LOGSTASH_CONFIG_YAML %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
FORCE_UPDATE=false
|
||||
UPDATE_CERTS=false
|
||||
LOGSTASH_PILLAR_CONFIG_YAML="{{ LOGSTASH_CONFIG_YAML }}"
|
||||
LOGSTASH_PILLAR_STATE_FILE="/opt/so/state/esfleet_logstash_config_pillar"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-f|--force)
|
||||
FORCE_UPDATE=true
|
||||
shift
|
||||
;;
|
||||
-c| --certs)
|
||||
UPDATE_CERTS=true
|
||||
FORCE_UPDATE=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option $1"
|
||||
echo "Usage: $0 [-f|--force] [-c|--certs]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Only run on Managers
|
||||
if ! is_manager_node; then
|
||||
printf "Not a Manager Node... Exiting"
|
||||
@@ -15,27 +40,109 @@ if ! is_manager_node; then
|
||||
fi
|
||||
|
||||
function update_logstash_outputs() {
|
||||
# Generate updated JSON payload
|
||||
JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
|
||||
if logstash_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_logstash" --retry 3 --retry-delay 10 --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$logstash_policy" | jq -r '.item.ssl')
|
||||
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
|
||||
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
|
||||
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
# Revert escaped \\n to \n for jq
|
||||
LOGSTASH_PILLAR_CONFIG_YAML=$(printf '%b' "$LOGSTASH_PILLAR_CONFIG_YAML")
|
||||
|
||||
if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Reuse existing secret
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
# Update certs, creating new secret
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}')
|
||||
fi
|
||||
else
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Reuse existing ssl config
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG}')
|
||||
else
|
||||
# Update ssl config
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}')
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Update Logstash Outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
}
|
||||
function update_kafka_outputs() {
|
||||
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
|
||||
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
|
||||
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl')
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Update policy when fleet has secrets enabled
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
--argjson SECRETS "$SECRETS" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}')
|
||||
else
|
||||
# Update certs, creating new secret
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": {"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"secrets": {"ssl":{"key": $KAFKAKEY }}}')
|
||||
fi
|
||||
else
|
||||
if [[ "$UPDATE_CERTS" != "true" ]]; then
|
||||
# Update policy when fleet has secrets disabled or policy hasn't been force updated
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--argjson SSL_CONFIG "$SSL_CONFIG" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
|
||||
else
|
||||
# Update ssl config
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg UPDATEDLIST "$NEW_LIST_JSON" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }}')
|
||||
fi
|
||||
fi
|
||||
# Update Kafka outputs
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
|
||||
else
|
||||
printf "Failed to get current Kafka output policy..."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
{% if GLOBALS.pipeline == "KAFKA" %}
|
||||
# Get current list of Kafka Outputs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka' --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
@@ -46,7 +153,7 @@ function update_kafka_outputs() {
|
||||
|
||||
# Get the current list of kafka outputs & hash them
|
||||
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
||||
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
|
||||
declare -a NEW_LIST=()
|
||||
|
||||
@@ -61,7 +168,7 @@ function update_kafka_outputs() {
|
||||
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
|
||||
{% else %}
|
||||
# Get current list of Logstash Outputs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash' --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
@@ -69,10 +176,19 @@ function update_kafka_outputs() {
|
||||
printf "Failed to query for current Logstash Outputs..."
|
||||
exit 1
|
||||
fi
|
||||
# logstash adv config - compare pillar to last state file value
|
||||
if [[ -f "$LOGSTASH_PILLAR_STATE_FILE" ]]; then
|
||||
PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML=$(cat "$LOGSTASH_PILLAR_STATE_FILE")
|
||||
if [[ "$LOGSTASH_PILLAR_CONFIG_YAML" != "$PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML" ]]; then
|
||||
echo "Logstash pillar config has changed - forcing update"
|
||||
FORCE_UPDATE=true
|
||||
fi
|
||||
echo "$LOGSTASH_PILLAR_CONFIG_YAML" > "$LOGSTASH_PILLAR_STATE_FILE"
|
||||
fi
|
||||
|
||||
# Get the current list of Logstash outputs & hash them
|
||||
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
|
||||
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}')
|
||||
|
||||
declare -a NEW_LIST=()
|
||||
|
||||
@@ -121,10 +237,10 @@ function update_kafka_outputs() {
|
||||
|
||||
# Sort & hash the new list of Logstash Outputs
|
||||
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
||||
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||
NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||
|
||||
# Compare the current & new list of outputs - if different, update the Logstash outputs
|
||||
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
|
||||
if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then
|
||||
printf "\nHashes match - no update needed.\n"
|
||||
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
|
||||
|
||||
|
||||
@@ -10,8 +10,16 @@
|
||||
|
||||
{%- for PACKAGE in SUPPORTED_PACKAGES %}
|
||||
echo "Setting up {{ PACKAGE }} package..."
|
||||
VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}")
|
||||
elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
|
||||
if VERSION=$(elastic_fleet_package_version_check "{{ PACKAGE }}"); then
|
||||
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then
|
||||
# packages loaded by this script should never fail to install and REQUIRED before an installation of SO can be considered successful
|
||||
echo -e "\nERROR: Failed to install default integration package -- $PACKAGE $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
{%- endfor %}
|
||||
echo
|
||||
|
||||
@@ -10,8 +10,15 @@
|
||||
|
||||
{%- for PACKAGE in SUPPORTED_PACKAGES %}
|
||||
echo "Upgrading {{ PACKAGE }} package..."
|
||||
VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}")
|
||||
elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"
|
||||
if VERSION=$(elastic_fleet_package_latest_version_check "{{ PACKAGE }}"); then
|
||||
if ! elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION"; then
|
||||
# exit 1 on failure to upgrade a default package, allow salt to handle retries
|
||||
echo -e "\nERROR: Failed to upgrade $PACKAGE to version: $VERSION"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
echo -e "\nERROR: Failed to get version information for integration $PACKAGE"
|
||||
fi
|
||||
echo
|
||||
{%- endfor %}
|
||||
echo
|
||||
|
||||
@@ -23,18 +23,17 @@ if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
ALIASES=".fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest"
|
||||
for ALIAS in ${ALIASES}
|
||||
do
|
||||
ALIASES=(.fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest)
|
||||
for ALIAS in "${ALIASES[@]}"; do
|
||||
# Get all concrete indices from alias
|
||||
INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]')
|
||||
|
||||
# Delete all resolved indices
|
||||
for INDX in ${INDXS}
|
||||
do
|
||||
if INDXS_RAW=$(curl -sK /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" --fail 2>/dev/null); then
|
||||
INDXS=$(echo "$INDXS_RAW" | jq -r '.aliases[].indices[]')
|
||||
# Delete all resolved indices
|
||||
for INDX in ${INDXS}; do
|
||||
status "Deleting $INDX"
|
||||
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
|
||||
done
|
||||
done
|
||||
fi
|
||||
done
|
||||
|
||||
# Restarting Kibana...
|
||||
@@ -51,22 +50,61 @@ if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
fi
|
||||
|
||||
printf "\n### Create ES Token ###\n"
|
||||
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
|
||||
if ESTOKEN_RAW=$(fleet_api "service_tokens" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
ESTOKEN=$(echo "$ESTOKEN_RAW" | jq -r .value)
|
||||
else
|
||||
echo -e "\nFailed to create ES token..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
### Create Outputs, Fleet Policy and Fleet URLs ###
|
||||
# Create the Manager Elasticsearch Output first and set it as the default output
|
||||
printf "\nAdd Manager Elasticsearch Output...\n"
|
||||
ESCACRT=$(openssl x509 -in $INTCA)
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg ESCACRT "$ESCACRT" \
|
||||
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl":{"certificate_authorities": [$ESCACRT]}}' )
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
ESCACRT=$(openssl x509 -in "$INTCA" -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]')
|
||||
JSON_STRING=$(jq -n \
|
||||
--arg ESCACRT "$ESCACRT" \
|
||||
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ca_trusted_fingerprint": $ESCACRT}')
|
||||
|
||||
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to create so-elasticsearch_manager policy..."
|
||||
exit 1
|
||||
fi
|
||||
printf "\n\n"
|
||||
|
||||
# so-manager_elasticsearch should exist and be disabled. Now update it before checking its the only default policy
|
||||
MANAGER_OUTPUT_ENABLED=$(echo "$JSON_STRING" | jq 'del(.id) | .is_default = true | .is_default_monitoring = true')
|
||||
if ! curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$MANAGER_OUTPUT_ENABLED"; then
|
||||
echo -e "\n failed to update so-manager_elasticsearch"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# At this point there should only be two policies. fleet-default-output & so-manager_elasticsearch
|
||||
status "Verifying so-manager_elasticsearch policy is configured as the current default"
|
||||
|
||||
# Grab the fleet-default-output policy instead of so-manager_elasticsearch, because a weird state can exist where both fleet-default-output & so-elasticsearch_manager can be set as the active default output for logs / metrics. Resulting in logs not ingesting on import/eval nodes
|
||||
if DEFAULTPOLICY=$(fleet_api "outputs/fleet-default-output"); then
|
||||
fleet_default=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default')
|
||||
fleet_default_monitoring=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default_monitoring')
|
||||
# Check that fleet-default-output isn't configured as a default for anything ( both variables return false )
|
||||
if [[ $fleet_default == "false" ]] && [[ $fleet_default_monitoring == "false" ]]; then
|
||||
echo -e "\nso-manager_elasticsearch is configured as the current default policy..."
|
||||
else
|
||||
echo -e "\nVerification of so-manager_elasticsearch policy failed... The default 'fleet-default-output' output is still active..."
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# fleet-output-policy is created automatically by fleet when started. Should always exist on any installation type
|
||||
echo -e "\nDefault fleet-default-output policy doesn't exist...\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create the Manager Fleet Server Host Agent Policy
|
||||
# This has to be done while the Elasticsearch Output is set to the default Output
|
||||
printf "Create Manager Fleet Server Policy...\n"
|
||||
elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"
|
||||
if ! elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"; then
|
||||
echo -e "\n Failed to create Manager fleet server policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Modify the default integration policy to update the policy_id with the correct naming
|
||||
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
|
||||
@@ -74,7 +112,10 @@ UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname
|
||||
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
||||
|
||||
# Add the Fleet Server Integration to the new Fleet Policy
|
||||
elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"
|
||||
if ! elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"; then
|
||||
echo -e "\nFailed to create Fleet server integration for Manager.."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Now we can create the Logstash Output and set it to to be the default Output
|
||||
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
|
||||
@@ -86,9 +127,12 @@ JSON_STRING=$( jq -n \
|
||||
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
||||
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
||||
--arg LOGSTASHCA "$LOGSTASHCA" \
|
||||
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]},"proxy_id":null}'
|
||||
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }},"proxy_id":null}'
|
||||
)
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to create logstash fleet output"
|
||||
exit 1
|
||||
fi
|
||||
printf "\n\n"
|
||||
{%- endif %}
|
||||
|
||||
@@ -106,7 +150,10 @@ else
|
||||
fi
|
||||
|
||||
## This array replaces whatever URLs are currently configured
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/fleet_server_hosts" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "fleet_server_hosts" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to add manager fleet URL"
|
||||
exit 1
|
||||
fi
|
||||
printf "\n\n"
|
||||
|
||||
### Create Policies & Associated Integration Configuration ###
|
||||
@@ -117,13 +164,22 @@ printf "\n\n"
|
||||
/usr/sbin/so-elasticsearch-templates-load
|
||||
|
||||
# Initial Endpoints Policy
|
||||
elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"
|
||||
if ! elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"; then
|
||||
echo -e "\nFailed to create endpoints-initial policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Grid Nodes - General Policy
|
||||
elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"
|
||||
if ! elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"; then
|
||||
echo -e "\nFailed to create so-grid-nodes_general policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Grid Nodes - Heavy Node Policy
|
||||
elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"
|
||||
if ! elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"; then
|
||||
echo -e "\nFailed to create so-grid-nodes_heavy policy..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Load Integrations for default policies
|
||||
so-elastic-fleet-integration-policy-load
|
||||
@@ -135,14 +191,34 @@ JSON_STRING=$( jq -n \
|
||||
'{"name":$NAME,"host":$URL,"is_default":true}'
|
||||
)
|
||||
|
||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/agent_download_sources" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
||||
if ! fleet_api "agent_download_sources" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||
echo -e "\nFailed to update Elastic Agent artifact URL"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
### Finalization ###
|
||||
|
||||
# Query for Enrollment Tokens for default policies
|
||||
ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||
GRIDNODESENROLLMENTOKENGENERAL=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
|
||||
GRIDNODESENROLLMENTOKENHEAVY=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
|
||||
if ENDPOINTSENROLLMENTOKEN_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
ENDPOINTSENROLLMENTOKEN=$(echo "$ENDPOINTSENROLLMENTOKEN_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
||||
else
|
||||
echo -e "\nFailed to query for Endpoints enrollment token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if GRIDNODESENROLLMENTOKENGENERAL_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
GRIDNODESENROLLMENTOKENGENERAL=$(echo "$GRIDNODESENROLLMENTOKENGENERAL_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
|
||||
else
|
||||
echo -e "\nFailed to query for Grid nodes - General enrollment token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if GRIDNODESENROLLMENTOKENHEAVY_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||
GRIDNODESENROLLMENTOKENHEAVY=$(echo "$GRIDNODESENROLLMENTOKENHEAVY_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
|
||||
else
|
||||
echo -e "\nFailed to query for Grid nodes - Heavy enrollment token"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Store needed data in minion pillar
|
||||
pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
|
||||
@@ -165,9 +241,11 @@ printf '%s\n'\
|
||||
"" >> "$global_pillar_file"
|
||||
|
||||
# Call Elastic-Fleet Salt State
|
||||
printf "\nApplying elasticfleet state"
|
||||
salt-call state.apply elasticfleet queue=True
|
||||
|
||||
# Generate installers & install Elastic Agent on the node
|
||||
so-elastic-agent-gen-installers
|
||||
printf "\nApplying elasticfleet.install_agent_grid state"
|
||||
salt-call state.apply elasticfleet.install_agent_grid queue=True
|
||||
exit 0
|
||||
|
||||
@@ -23,7 +23,7 @@ function update_fleet_urls() {
|
||||
}
|
||||
|
||||
# Get current list of Fleet Server URLs
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default')
|
||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
||||
|
||||
# Check to make sure that the server responded with good data - else, bail from script
|
||||
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
|
||||
|
||||
@@ -5,46 +5,78 @@
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
|
||||
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch', 'so-managerhype'] %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
force=false
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-f|--force)
|
||||
force=true
|
||||
shift
|
||||
;;
|
||||
*)
|
||||
echo "Unknown option $1"
|
||||
echo "Usage: $0 [-f|--force]"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check to make sure that Kibana API is up & ready
|
||||
RETURN_CODE=0
|
||||
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
||||
RETURN_CODE=$?
|
||||
|
||||
if [[ "$RETURN_CODE" != "0" ]]; then
|
||||
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
|
||||
exit 1
|
||||
echo -e "\nKibana API not accessible, can't setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
|
||||
if ! echo "$output" | grep -q "so-manager_kafka"; then
|
||||
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
|
||||
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
|
||||
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
|
||||
KAFKA_OUTPUT_VERSION="2.6.0"
|
||||
if ! kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then
|
||||
# Create a new output policy for Kafka. Default is disabled 'is_default: false & is_default_monitoring: false'
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 10 }, "topics":[{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
|
||||
)
|
||||
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
|
||||
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
|
||||
|
||||
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
'{"name":"grid-kafka", "id":"so-manager_kafka","type":"kafka","hosts":[ $MANAGER_IP ],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
else
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
exit 0
|
||||
fi
|
||||
elif kafka_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null) && [[ "$force" == "true" ]]; then
|
||||
# force an update to Kafka policy. Keep the current value of Kafka output policy (enabled/disabled).
|
||||
ENABLED_DISABLED=$(echo "$kafka_output" | jq -e .item.is_default)
|
||||
HOSTS=$(echo "$kafka_output" | jq -r '.item.hosts')
|
||||
JSON_STRING=$( jq -n \
|
||||
--arg KAFKACRT "$KAFKACRT" \
|
||||
--arg KAFKAKEY "$KAFKAKEY" \
|
||||
--arg KAFKACA "$KAFKACA" \
|
||||
--arg ENABLED_DISABLED "$ENABLED_DISABLED"\
|
||||
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
|
||||
--argjson HOSTS "$HOSTS" \
|
||||
'{"name":"grid-kafka","type":"kafka","hosts":$HOSTS,"is_default":$ENABLED_DISABLED,"is_default_monitoring":$ENABLED_DISABLED,"config_yaml":"","ssl":{"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"proxy_id":null,"client_id":"Elastic","version": $KAFKA_OUTPUT_VERSION ,"compression":"none","auth_type":"ssl","partition":"round_robin","round_robin":{"group_events":10},"topic":"default-securityonion","headers":[{"key":"","value":""}],"timeout":30,"broker_timeout":30,"required_acks":1,"secrets":{"ssl":{"key": $KAFKAKEY }}}'
|
||||
)
|
||||
if ! response=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" --fail 2>/dev/null); then
|
||||
echo -e "\nFailed to force update to Elastic Fleet output policy for Kafka...\n"
|
||||
exit 1
|
||||
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
|
||||
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
|
||||
else
|
||||
echo -e "\nForced update to Elastic Fleet output policy for Kafka...\n"
|
||||
fi
|
||||
|
||||
elif echo "$output" | grep -q "so-manager_kafka"; then
|
||||
else
|
||||
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
|
||||
fi
|
||||
{% else %}
|
||||
|
||||
@@ -26,14 +26,14 @@ catrustscript:
|
||||
GLOBALS: {{ GLOBALS }}
|
||||
{% endif %}
|
||||
|
||||
cacertz:
|
||||
elasticsearch_cacerts:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/ca/cacerts
|
||||
- source: salt://elasticsearch/cacerts
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
capemz:
|
||||
elasticsearch_capems:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/ca/tls-ca-bundle.pem
|
||||
- source: salt://elasticsearch/tls-ca-bundle.pem
|
||||
|
||||
@@ -5,11 +5,6 @@
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
|
||||
include:
|
||||
- ssl
|
||||
- elasticsearch.ca
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
elasticsearch:
|
||||
enabled: false
|
||||
version: 8.18.4
|
||||
version: 9.0.8
|
||||
index_clean: true
|
||||
config:
|
||||
action:
|
||||
@@ -72,6 +72,8 @@ elasticsearch:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
allocate:
|
||||
number_of_replicas: ""
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
@@ -84,11 +86,25 @@ elasticsearch:
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
forcemerge:
|
||||
max_num_segments: ""
|
||||
shrink:
|
||||
max_primary_shard_size: ""
|
||||
method: COUNT
|
||||
number_of_shards: ""
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
forcemerge:
|
||||
max_num_segments: ""
|
||||
shrink:
|
||||
max_primary_shard_size: ""
|
||||
method: COUNT
|
||||
number_of_shards: ""
|
||||
allocate:
|
||||
number_of_replicas: ""
|
||||
min_age: 30d
|
||||
so-case:
|
||||
index_sorting: false
|
||||
@@ -245,7 +261,6 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-detection:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -284,6 +299,99 @@ elasticsearch:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
sos-backup:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of: []
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- sos-backup-*
|
||||
priority: 501
|
||||
template:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
so-assistant-chat:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- assistant-chat-mappings
|
||||
- assistant-chat-settings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-assistant-chat*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
dynamic_templates:
|
||||
- strings_as_keyword:
|
||||
mapping:
|
||||
ignore_above: 1024
|
||||
type: keyword
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-assistant-chat-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 1s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-assistant-session:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- assistant-session-mappings
|
||||
- assistant-session-settings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-assistant-session*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
dynamic_templates:
|
||||
- strings_as_keyword:
|
||||
mapping:
|
||||
ignore_above: 1024
|
||||
type: keyword
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-assistant-session-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 1s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-endgame:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -504,7 +612,6 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-import:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -750,53 +857,11 @@ elasticsearch:
|
||||
composed_of:
|
||||
- agent-mappings
|
||||
- dtc-agent-mappings
|
||||
- base-mappings
|
||||
- dtc-base-mappings
|
||||
- client-mappings
|
||||
- dtc-client-mappings
|
||||
- container-mappings
|
||||
- destination-mappings
|
||||
- dtc-destination-mappings
|
||||
- pb-override-destination-mappings
|
||||
- dll-mappings
|
||||
- dns-mappings
|
||||
- dtc-dns-mappings
|
||||
- ecs-mappings
|
||||
- dtc-ecs-mappings
|
||||
- error-mappings
|
||||
- event-mappings
|
||||
- dtc-event-mappings
|
||||
- file-mappings
|
||||
- dtc-file-mappings
|
||||
- group-mappings
|
||||
- host-mappings
|
||||
- dtc-host-mappings
|
||||
- http-mappings
|
||||
- dtc-http-mappings
|
||||
- log-mappings
|
||||
- metadata-mappings
|
||||
- network-mappings
|
||||
- dtc-network-mappings
|
||||
- observer-mappings
|
||||
- dtc-observer-mappings
|
||||
- organization-mappings
|
||||
- package-mappings
|
||||
- process-mappings
|
||||
- dtc-process-mappings
|
||||
- related-mappings
|
||||
- rule-mappings
|
||||
- dtc-rule-mappings
|
||||
- server-mappings
|
||||
- service-mappings
|
||||
- dtc-service-mappings
|
||||
- source-mappings
|
||||
- dtc-source-mappings
|
||||
- pb-override-source-mappings
|
||||
- threat-mappings
|
||||
- tls-mappings
|
||||
- url-mappings
|
||||
- user_agent-mappings
|
||||
- dtc-user_agent-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
data_stream:
|
||||
@@ -852,7 +917,6 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-hydra:
|
||||
close: 30
|
||||
delete: 365
|
||||
@@ -963,7 +1027,6 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-lists:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -1047,6 +1110,8 @@ elasticsearch:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
allocate:
|
||||
number_of_replicas: ""
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
@@ -1059,11 +1124,25 @@ elasticsearch:
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
forcemerge:
|
||||
max_num_segments: ""
|
||||
shrink:
|
||||
max_primary_shard_size: ""
|
||||
method: COUNT
|
||||
number_of_shards: ""
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
allocate:
|
||||
number_of_replicas: ""
|
||||
forcemerge:
|
||||
max_num_segments: ""
|
||||
shrink:
|
||||
max_primary_shard_size: ""
|
||||
method: COUNT
|
||||
number_of_shards: ""
|
||||
min_age: 30d
|
||||
so-logs-detections_x_alerts:
|
||||
index_sorting: false
|
||||
@@ -1243,6 +1322,68 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-elastic-agent-monitor:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- event-mappings
|
||||
- so-elastic-agent-monitor
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
index_patterns:
|
||||
- logs-agentmonitor-*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-elastic-agent-monitor-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
number_of_replicas: 0
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-elastic_agent_x_apm_server:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -1849,6 +1990,70 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-elasticsearch_x_server:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
- logs-elasticsearch.server@package
|
||||
- logs-elasticsearch.server@custom
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates:
|
||||
- logs-elasticsearch.server@custom
|
||||
index_patterns:
|
||||
- logs-elasticsearch.server-*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-logs-elasticsearch.server-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 5000
|
||||
number_of_replicas: 0
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
_meta:
|
||||
managed: true
|
||||
managed_by: security_onion
|
||||
package:
|
||||
name: elastic_agent
|
||||
phases:
|
||||
cold:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 0
|
||||
min_age: 60d
|
||||
delete:
|
||||
actions:
|
||||
delete: {}
|
||||
min_age: 365d
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
min_age: 0ms
|
||||
warm:
|
||||
actions:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-endpoint_x_actions:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -2917,7 +3122,6 @@ elasticsearch:
|
||||
set_priority:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
warm: 7
|
||||
so-logs-system_x_application:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -4031,7 +4235,7 @@ elasticsearch:
|
||||
hot:
|
||||
actions:
|
||||
rollover:
|
||||
max_age: 1d
|
||||
max_age: 30d
|
||||
max_primary_shard_size: 50gb
|
||||
set_priority:
|
||||
priority: 100
|
||||
|
||||
@@ -14,6 +14,9 @@
|
||||
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
- elasticsearch.ca
|
||||
- elasticsearch.ssl
|
||||
- elasticsearch.config
|
||||
- elasticsearch.sostatus
|
||||
|
||||
@@ -61,11 +64,7 @@ so-elasticsearch:
|
||||
- /nsm/elasticsearch:/usr/share/elasticsearch/data:rw
|
||||
- /opt/so/log/elasticsearch:/var/log/elasticsearch:rw
|
||||
- /opt/so/conf/ca/cacerts:/usr/share/elasticsearch/jdk/lib/security/cacerts:ro
|
||||
{% if GLOBALS.is_manager %}
|
||||
- /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro
|
||||
{% else %}
|
||||
- /etc/pki/tls/certs/intca.crt:/usr/share/elasticsearch/config/ca.crt:ro
|
||||
{% endif %}
|
||||
- /etc/pki/elasticsearch.crt:/usr/share/elasticsearch/config/elasticsearch.crt:ro
|
||||
- /etc/pki/elasticsearch.key:/usr/share/elasticsearch/config/elasticsearch.key:ro
|
||||
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
|
||||
@@ -82,22 +81,21 @@ so-elasticsearch:
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: cacertz
|
||||
- file: trusttheca
|
||||
- x509: elasticsearch_crt
|
||||
- x509: elasticsearch_key
|
||||
- file: elasticsearch_cacerts
|
||||
- file: esyml
|
||||
- require:
|
||||
- file: trusttheca
|
||||
- x509: elasticsearch_crt
|
||||
- x509: elasticsearch_key
|
||||
- file: elasticsearch_cacerts
|
||||
- file: esyml
|
||||
- file: eslog4jfile
|
||||
- file: nsmesdir
|
||||
- file: eslogdir
|
||||
- file: cacertz
|
||||
- x509: /etc/pki/elasticsearch.crt
|
||||
- x509: /etc/pki/elasticsearch.key
|
||||
- file: elasticp12perms
|
||||
{% if GLOBALS.is_manager %}
|
||||
- x509: pki_public_ca_crt
|
||||
{% else %}
|
||||
- x509: trusttheca
|
||||
{% endif %}
|
||||
- cmd: auth_users_roles_inode
|
||||
- cmd: auth_users_inode
|
||||
|
||||
|
||||
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
22
salt/elasticsearch/files/ingest/common.ip_validation
Normal file
@@ -0,0 +1,22 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"convert": {
|
||||
"field": "_ingest._value",
|
||||
"type": "ip",
|
||||
"target_field": "_ingest._temp_ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "temp._valid_ips",
|
||||
"allow_duplicates": false,
|
||||
"value": [
|
||||
"{{{_ingest._temp_ip}}}"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
36
salt/elasticsearch/files/ingest/elasticagent.monitor
Normal file
36
salt/elasticsearch/files/ingest/elasticagent.monitor
Normal file
@@ -0,0 +1,36 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "gridmetrics.agents",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.module",
|
||||
"value": "gridmetrics",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"host",
|
||||
"elastic_agent",
|
||||
"agent"
|
||||
],
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"add_to_root": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -23,8 +23,9 @@
|
||||
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
|
||||
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.event?.dataset != null && ctx.event?.dataset == 'elasticsearch.server'", "field": "event.module", "value":"elasticsearch" }},
|
||||
{"append": {"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"if":"ctx?.event?.dataset == 'endpoint.events.network' && ctx?.source?.ip != null","ignore_failure":true}},
|
||||
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint'","description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||
{"foreach": {"field":"host.ip","processor":{"append":{"field":"related.ip","value":"{{_ingest._value}}","allow_duplicates":false}},"if":"ctx?.event?.module == 'endpoint' && ctx?.host?.ip != null","ignore_missing":true, "description":"Extract IPs from Elastic Agent events (host.ip) and adds them to related.ip"}},
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp", "datastream_dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -1,9 +1,90 @@
|
||||
{
|
||||
"description" : "kratos",
|
||||
"processors" : [
|
||||
{"set":{"field":"audience","value":"access","override":false,"ignore_failure":true}},
|
||||
{"set":{"field":"event.dataset","ignore_empty_value":true,"ignore_failure":true,"value":"kratos.{{{audience}}}","media_type":"text/plain"}},
|
||||
{"set":{"field":"event.action","ignore_failure":true,"copy_from":"msg" }},
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
"description": "kratos",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "audience",
|
||||
"value": "access",
|
||||
"override": false,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"ignore_empty_value": true,
|
||||
"ignore_failure": true,
|
||||
"value": "kratos.{{{audience}}}",
|
||||
"media_type": "text/plain"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.action",
|
||||
"ignore_failure": true,
|
||||
"copy_from": "msg"
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http_request",
|
||||
"target_field": "http.request",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http_response",
|
||||
"target_field": "http.response",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http.request.path",
|
||||
"target_field": "http.uri",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http.request.method",
|
||||
"target_field": "http.method",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http.request.method",
|
||||
"target_field": "http.method",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http.request.query",
|
||||
"target_field": "http.query",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "http.request.headers.user-agent",
|
||||
"target_field": "http.useragent",
|
||||
"ignore_failure": true,
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -107,61 +107,61 @@
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.0-firewall",
|
||||
"name": "logs-pfsense.log-1.23.1-firewall",
|
||||
"if": "ctx.event.provider == 'filterlog'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.0-openvpn",
|
||||
"name": "logs-pfsense.log-1.23.1-openvpn",
|
||||
"if": "ctx.event.provider == 'openvpn'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.0-ipsec",
|
||||
"name": "logs-pfsense.log-1.23.1-ipsec",
|
||||
"if": "ctx.event.provider == 'charon'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.0-dhcp",
|
||||
"name": "logs-pfsense.log-1.23.1-dhcp",
|
||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.0-unbound",
|
||||
"name": "logs-pfsense.log-1.23.1-unbound",
|
||||
"if": "ctx.event.provider == 'unbound'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.0-haproxy",
|
||||
"name": "logs-pfsense.log-1.23.1-haproxy",
|
||||
"if": "ctx.event.provider == 'haproxy'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.0-php-fpm",
|
||||
"name": "logs-pfsense.log-1.23.1-php-fpm",
|
||||
"if": "ctx.event.provider == 'php-fpm'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.0-squid",
|
||||
"name": "logs-pfsense.log-1.23.1-squid",
|
||||
"if": "ctx.event.provider == 'squid'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.0-snort",
|
||||
"name": "logs-pfsense.log-1.23.1-snort",
|
||||
"if": "ctx.event.provider == 'snort'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.0-suricata",
|
||||
"name": "logs-pfsense.log-1.23.1-suricata",
|
||||
"if": "ctx.event.provider == 'suricata'"
|
||||
}
|
||||
},
|
||||
@@ -1,15 +1,79 @@
|
||||
{
|
||||
"description" : "suricata.alert",
|
||||
"processors" : [
|
||||
{ "set": { "if": "ctx.event?.imported != true", "field": "_index", "value": "logs-suricata.alerts-so" } },
|
||||
{ "set": { "field": "tags","value": "alert" }},
|
||||
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.ref", "target_field": "rule.version", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } },
|
||||
{ "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } },
|
||||
{ "dissect": { "field": "rule.rule", "pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}", "ignore_missing": true, "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "common.nids" } }
|
||||
]
|
||||
"description": "suricata.alert",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"if": "ctx.event?.imported != true",
|
||||
"field": "_index",
|
||||
"value": "logs-suricata.alerts-so"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "tags",
|
||||
"value": "alert"
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.alert",
|
||||
"target_field": "rule",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.signature",
|
||||
"target_field": "rule.name",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.ref",
|
||||
"target_field": "rule.version",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.signature_id",
|
||||
"target_field": "rule.uuid",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "rule.signature_id",
|
||||
"target_field": "rule.signature",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.payload_printable",
|
||||
"target_field": "network.data.decoded",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"dissect": {
|
||||
"field": "rule.rule",
|
||||
"pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "common.nids"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,30 +1,155 @@
|
||||
{
|
||||
"description" : "suricata.common",
|
||||
"processors" : [
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.in_iface", "target_field": "observer.ingress.interface.name", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.src_port", "target_field": "source.port", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.dest_ip", "target_field": "destination.ip", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.dest_port", "target_field": "destination.port", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.vlan", "target_field": "network.vlan.id", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.community_id", "target_field": "network.community_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.xff", "target_field": "xff.ip", "ignore_missing": true } },
|
||||
{ "set": { "field": "event.dataset", "value": "{{ message2.event_type }}" } },
|
||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" } },
|
||||
{ "set": { "field": "event.ingested", "value": "{{@timestamp}}" } },
|
||||
{ "date": { "field": "message2.timestamp", "target_field": "@timestamp", "formats": ["ISO8601", "UNIX"], "timezone": "UTC", "ignore_failure": true } },
|
||||
{ "remove":{ "field": "agent", "ignore_failure": true } },
|
||||
{"append":{"field":"related.ip","value":["{{source.ip}}","{{destination.ip}}"],"allow_duplicates":false,"ignore_failure":true}},
|
||||
{
|
||||
"script": {
|
||||
"source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
|
||||
"ignore_failure": false
|
||||
}
|
||||
},
|
||||
{ "pipeline": { "if": "ctx?.event?.dataset != null", "name": "suricata.{{event.dataset}}" } }
|
||||
]
|
||||
}
|
||||
"description": "suricata.common",
|
||||
"processors": [
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"target_field": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.pkt_src",
|
||||
"target_field": "network.packet_source",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.proto",
|
||||
"target_field": "network.transport",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.in_iface",
|
||||
"target_field": "observer.ingress.interface.name",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.flow_id",
|
||||
"target_field": "log.id.uid",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.src_ip",
|
||||
"target_field": "source.ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.src_port",
|
||||
"target_field": "source.port",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dest_ip",
|
||||
"target_field": "destination.ip",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dest_port",
|
||||
"target_field": "destination.port",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.vlan",
|
||||
"target_field": "network.vlan.id",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.community_id",
|
||||
"target_field": "network.community_id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.xff",
|
||||
"target_field": "xff.ip",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "{{ message2.event_type }}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "observer.name",
|
||||
"value": "{{agent.name}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "event.ingested",
|
||||
"value": "{{@timestamp}}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"field": "message2.timestamp",
|
||||
"target_field": "@timestamp",
|
||||
"formats": [
|
||||
"ISO8601",
|
||||
"UNIX"
|
||||
],
|
||||
"timezone": "UTC",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": "agent",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "related.ip",
|
||||
"value": [
|
||||
"{{source.ip}}",
|
||||
"{{destination.ip}}"
|
||||
],
|
||||
"allow_duplicates": false,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "boolean isPrivate(def ip) { if (ip == null) return false; int dot1 = ip.indexOf('.'); if (dot1 == -1) return false; int dot2 = ip.indexOf('.', dot1 + 1); if (dot2 == -1) return false; int first = Integer.parseInt(ip.substring(0, dot1)); if (first == 10) return true; if (first == 192 && ip.startsWith('168.', dot1 + 1)) return true; if (first == 172) { int second = Integer.parseInt(ip.substring(dot1 + 1, dot2)); return second >= 16 && second <= 31; } return false; } String[] fields = new String[] {\"source\", \"destination\"}; for (int i = 0; i < fields.length; i++) { def field = fields[i]; def ip = ctx[field]?.ip; if (ip != null) { if (ctx.network == null) ctx.network = new HashMap(); if (isPrivate(ip)) { if (ctx.network.private_ip == null) ctx.network.private_ip = new ArrayList(); if (!ctx.network.private_ip.contains(ip)) ctx.network.private_ip.add(ip); } else { if (ctx.network.public_ip == null) ctx.network.public_ip = new ArrayList(); if (!ctx.network.public_ip.contains(ip)) ctx.network.public_ip.add(ip); } } }",
|
||||
"ignore_failure": false
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.capture_file",
|
||||
"target_field": "suricata.capture_file",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"if": "ctx?.event?.dataset != null",
|
||||
"name": "suricata.{{event.dataset}}"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,21 +1,136 @@
|
||||
{
|
||||
"description" : "suricata.dns",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.type", "target_field": "dns.query.type", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rrtype", "target_field": "dns.query.type_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.flags", "target_field": "dns.flags", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.qr", "target_field": "dns.qr", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.dns.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||
{ "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
"description": "suricata.dns",
|
||||
"processors": [
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.proto",
|
||||
"target_field": "network.transport",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.app_proto",
|
||||
"target_field": "network.protocol",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.type",
|
||||
"target_field": "dns.query.type",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.tx_id",
|
||||
"target_field": "dns.tx_id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.id",
|
||||
"target_field": "dns.id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.version",
|
||||
"target_field": "dns.version",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "suricata.dnsv3",
|
||||
"ignore_missing_pipeline": true,
|
||||
"if": "ctx?.dns?.version != null && ctx?.dns?.version == 3",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rrname",
|
||||
"target_field": "dns.query.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rrtype",
|
||||
"target_field": "dns.query.type_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.flags",
|
||||
"target_field": "dns.flags",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.qr",
|
||||
"target_field": "dns.qr",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rd",
|
||||
"target_field": "dns.recursion.desired",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.ra",
|
||||
"target_field": "dns.recursion.available",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.opcode",
|
||||
"target_field": "dns.opcode",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.rcode",
|
||||
"target_field": "dns.response.code_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.grouped.A",
|
||||
"target_field": "dns.answers.data",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.grouped.CNAME",
|
||||
"target_field": "dns.answers.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')",
|
||||
"name": "dns.tld"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
56
salt/elasticsearch/files/ingest/suricata.dnsv3
Normal file
56
salt/elasticsearch/files/ingest/suricata.dnsv3
Normal file
@@ -0,0 +1,56 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.dns.queries",
|
||||
"target_field": "dns.queries",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.name = ctx?.dns?.queries[0].rrname;\n}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.type_name = ctx?.dns?.queries[0].rrtype;\n}"
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "dns.queries",
|
||||
"processor": {
|
||||
"rename": {
|
||||
"field": "_ingest._value.rrname",
|
||||
"target_field": "_ingest._value.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "dns.queries",
|
||||
"processor": {
|
||||
"rename": {
|
||||
"field": "_ingest._value.rrtype",
|
||||
"target_field": "_ingest._value.type_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "suricata.tld",
|
||||
"ignore_missing_pipeline": true,
|
||||
"if": "ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0",
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
52
salt/elasticsearch/files/ingest/suricata.tld
Normal file
52
salt/elasticsearch/files/ingest/suricata.tld
Normal file
@@ -0,0 +1,52 @@
|
||||
{
|
||||
"processors": [
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.top_level_domain = q.name.substring(q.name.lastIndexOf('.') + 1);\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.query_without_tld = q.name.substring(0, q.name.lastIndexOf('.'));\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.parent_domain = q.query_without_tld.substring(q.query_without_tld.lastIndexOf('.') + 1);\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.subdomain = q.query_without_tld.substring(0, q.query_without_tld.lastIndexOf('.'));\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null && q.top_level_domain != null) {\n q.highest_registered_domain = q.parent_domain + \".\" + q.top_level_domain;\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.subdomain != null) {\n q.subdomain_length = q.subdomain.length();\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null) {\n q.parent_domain_length = q.parent_domain.length();\n }\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n q.remove('query_without_tld');\n }\n}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
61
salt/elasticsearch/files/ingest/zeek.analyzer
Normal file
61
salt/elasticsearch/files/ingest/zeek.analyzer
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
"description": "zeek.analyzer",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "analyzer"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"host"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"target_field": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.protocol",
|
||||
"copy_from": "message2.analyzer_name",
|
||||
"ignore_empty_value": true,
|
||||
"if": "ctx?.message2?.analyzer_kind == 'protocol'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"field": "network.protocol",
|
||||
"ignore_empty_value": true,
|
||||
"if": "ctx?.message2?.analyzer_kind != 'protocol'",
|
||||
"copy_from": "message2.proto"
|
||||
}
|
||||
},
|
||||
{
|
||||
"lowercase": {
|
||||
"field": "network.protocol",
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.failure_reason",
|
||||
"target_field": "error.reason",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "zeek.common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,32 +1,227 @@
|
||||
{
|
||||
"description" : "zeek.dns",
|
||||
"processors" : [
|
||||
{ "set": { "field": "event.dataset", "value": "dns" } },
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.trans_id", "target_field": "dns.id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rtt", "target_field": "event.duration", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.query", "target_field": "dns.query.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qclass", "target_field": "dns.query.class", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qclass_name", "target_field": "dns.query.class_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qtype", "target_field": "dns.query.type", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.qtype_name", "target_field": "dns.query.type_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rcode", "target_field": "dns.response.code", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rcode_name", "target_field": "dns.response.code_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.AA", "target_field": "dns.authoritative", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.TC", "target_field": "dns.truncated", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.RD", "target_field": "dns.recursion.desired", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } },
|
||||
{ "script": { "lang": "painless", "if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null", "source": "def ips = []; for (item in ctx.dns.answers.name) { if (item =~ /^(?:[0-9]{1,3}\\.){3}[0-9]{1,3}$/ || item =~ /^([a-fA-F0-9:]+:+)+[a-fA-F0-9]+$/) { ips.add(item); } } ctx.dns.resolved_ip = ips;" } },
|
||||
{ "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } },
|
||||
{ "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx._index == 'so-zeek'", "field": "_index", "value": "so-zeek_dns", "override": true } },
|
||||
{ "pipeline": { "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } },
|
||||
{ "pipeline": { "name": "zeek.common" } }
|
||||
]
|
||||
"description": "zeek.dns",
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"field": "event.dataset",
|
||||
"value": "dns"
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"host"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"json": {
|
||||
"field": "message",
|
||||
"target_field": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"dot_expander": {
|
||||
"field": "id.orig_h",
|
||||
"path": "message2",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.proto",
|
||||
"target_field": "network.transport",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.trans_id",
|
||||
"target_field": "dns.id",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rtt",
|
||||
"target_field": "event.duration",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.query",
|
||||
"target_field": "dns.query.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qclass",
|
||||
"target_field": "dns.query.class",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qclass_name",
|
||||
"target_field": "dns.query.class_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qtype",
|
||||
"target_field": "dns.query.type",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.qtype_name",
|
||||
"target_field": "dns.query.type_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rcode",
|
||||
"target_field": "dns.response.code",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rcode_name",
|
||||
"target_field": "dns.response.code_name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.AA",
|
||||
"target_field": "dns.authoritative",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.TC",
|
||||
"target_field": "dns.truncated",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.RD",
|
||||
"target_field": "dns.recursion.desired",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.RA",
|
||||
"target_field": "dns.recursion.available",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.Z",
|
||||
"target_field": "dns.reserved",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.answers",
|
||||
"target_field": "dns.answers.name",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "dns.answers.name",
|
||||
"processor": {
|
||||
"pipeline": {
|
||||
"name": "common.ip_validation"
|
||||
}
|
||||
},
|
||||
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"foreach": {
|
||||
"field": "temp._valid_ips",
|
||||
"processor": {
|
||||
"append": {
|
||||
"field": "dns.resolved_ip",
|
||||
"allow_duplicates": false,
|
||||
"value": "{{{_ingest._value}}}",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"field": [
|
||||
"temp"
|
||||
],
|
||||
"ignore_missing": true,
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.TTLs",
|
||||
"target_field": "dns.ttls",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.rejected",
|
||||
"target_field": "dns.query.rejected",
|
||||
"ignore_missing": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"lang": "painless",
|
||||
"source": "ctx.dns.query.length = ctx.dns.query.name.length()",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"if": "ctx._index == 'so-zeek'",
|
||||
"field": "_index",
|
||||
"value": "so-zeek_dns",
|
||||
"override": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')",
|
||||
"name": "dns.tld"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "zeek.common"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
{
|
||||
"description" : "zeek.dpd",
|
||||
"processors" : [
|
||||
{ "set": { "field": "event.dataset", "value": "dpd" } },
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.analyzer", "target_field": "observer.analyzer", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.failure_reason", "target_field": "error.reason", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "zeek.common" } }
|
||||
]
|
||||
}
|
||||
@@ -20,8 +20,28 @@ appender.rolling.strategy.type = DefaultRolloverStrategy
|
||||
appender.rolling.strategy.action.type = Delete
|
||||
appender.rolling.strategy.action.basepath = /var/log/elasticsearch
|
||||
appender.rolling.strategy.action.condition.type = IfFileName
|
||||
appender.rolling.strategy.action.condition.glob = *.gz
|
||||
appender.rolling.strategy.action.condition.glob = *.log.gz
|
||||
appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified
|
||||
appender.rolling.strategy.action.condition.nested_condition.age = 7D
|
||||
|
||||
appender.rolling_json.type = RollingFile
|
||||
appender.rolling_json.name = rolling_json
|
||||
appender.rolling_json.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.json
|
||||
appender.rolling_json.layout.type = ECSJsonLayout
|
||||
appender.rolling_json.layout.dataset = elasticsearch.server
|
||||
appender.rolling_json.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.json.gz
|
||||
appender.rolling_json.policies.type = Policies
|
||||
appender.rolling_json.policies.time.type = TimeBasedTriggeringPolicy
|
||||
appender.rolling_json.policies.time.interval = 1
|
||||
appender.rolling_json.policies.time.modulate = true
|
||||
appender.rolling_json.strategy.type = DefaultRolloverStrategy
|
||||
appender.rolling_json.strategy.action.type = Delete
|
||||
appender.rolling_json.strategy.action.basepath = /var/log/elasticsearch
|
||||
appender.rolling_json.strategy.action.condition.type = IfFileName
|
||||
appender.rolling_json.strategy.action.condition.glob = *.json.gz
|
||||
appender.rolling_json.strategy.action.condition.nested_condition.type = IfLastModified
|
||||
appender.rolling_json.strategy.action.condition.nested_condition.age = 1D
|
||||
|
||||
rootLogger.level = info
|
||||
rootLogger.appenderRef.rolling.ref = rolling
|
||||
rootLogger.appenderRef.rolling_json.ref = rolling_json
|
||||
|
||||
@@ -131,6 +131,47 @@ elasticsearch:
|
||||
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
options:
|
||||
- COUNT
|
||||
- SIZE
|
||||
global: True
|
||||
advanced: True
|
||||
forcedType: string
|
||||
number_of_shards:
|
||||
title: shard count
|
||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
max_primary_shard_size:
|
||||
title: max shard size
|
||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
||||
global: True
|
||||
forcedType: string
|
||||
advanced: True
|
||||
allow_write_after_shrink:
|
||||
description: Allow writes after shrink.
|
||||
global: True
|
||||
forcedType: bool
|
||||
default: False
|
||||
advanced: True
|
||||
forcemerge:
|
||||
max_num_segments:
|
||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
index_codec:
|
||||
title: compression
|
||||
description: Use higher compression for stored fields at the cost of slower performance.
|
||||
forcedType: bool
|
||||
global: True
|
||||
default: False
|
||||
advanced: True
|
||||
cold:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
|
||||
@@ -144,6 +185,12 @@ elasticsearch:
|
||||
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
allocate:
|
||||
number_of_replicas:
|
||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
warm:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
|
||||
@@ -158,6 +205,52 @@ elasticsearch:
|
||||
forcedType: int
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
options:
|
||||
- COUNT
|
||||
- SIZE
|
||||
global: True
|
||||
advanced: True
|
||||
number_of_shards:
|
||||
title: shard count
|
||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
max_primary_shard_size:
|
||||
title: max shard size
|
||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
||||
global: True
|
||||
forcedType: string
|
||||
advanced: True
|
||||
allow_write_after_shrink:
|
||||
description: Allow writes after shrink.
|
||||
global: True
|
||||
forcedType: bool
|
||||
default: False
|
||||
advanced: True
|
||||
forcemerge:
|
||||
max_num_segments:
|
||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
index_codec:
|
||||
title: compression
|
||||
description: Use higher compression for stored fields at the cost of slower performance.
|
||||
forcedType: bool
|
||||
global: True
|
||||
default: False
|
||||
advanced: True
|
||||
allocate:
|
||||
number_of_replicas:
|
||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
delete:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
|
||||
@@ -287,6 +380,47 @@ elasticsearch:
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
options:
|
||||
- COUNT
|
||||
- SIZE
|
||||
global: True
|
||||
advanced: True
|
||||
forcedType: string
|
||||
number_of_shards:
|
||||
title: shard count
|
||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
max_primary_shard_size:
|
||||
title: max shard size
|
||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
||||
global: True
|
||||
forcedType: string
|
||||
advanced: True
|
||||
allow_write_after_shrink:
|
||||
description: Allow writes after shrink.
|
||||
global: True
|
||||
forcedType: bool
|
||||
default: False
|
||||
advanced: True
|
||||
forcemerge:
|
||||
max_num_segments:
|
||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
index_codec:
|
||||
title: compression
|
||||
description: Use higher compression for stored fields at the cost of slower performance.
|
||||
forcedType: bool
|
||||
global: True
|
||||
default: False
|
||||
advanced: True
|
||||
warm:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier.
|
||||
@@ -314,6 +448,52 @@ elasticsearch:
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
options:
|
||||
- COUNT
|
||||
- SIZE
|
||||
global: True
|
||||
advanced: True
|
||||
number_of_shards:
|
||||
title: shard count
|
||||
description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
max_primary_shard_size:
|
||||
title: max shard size
|
||||
description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'.
|
||||
regex: ^[0-9]+(?:gb|tb|pb)$
|
||||
global: True
|
||||
forcedType: string
|
||||
advanced: True
|
||||
allow_write_after_shrink:
|
||||
description: Allow writes after shrink.
|
||||
global: True
|
||||
forcedType: bool
|
||||
default: False
|
||||
advanced: True
|
||||
forcemerge:
|
||||
max_num_segments:
|
||||
description: Reduce the number of segments in each index shard and clean up deleted documents.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
index_codec:
|
||||
title: compression
|
||||
description: Use higher compression for stored fields at the cost of slower performance.
|
||||
forcedType: bool
|
||||
global: True
|
||||
default: False
|
||||
advanced: True
|
||||
allocate:
|
||||
number_of_replicas:
|
||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
cold:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier.
|
||||
@@ -330,6 +510,12 @@ elasticsearch:
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
allocate:
|
||||
number_of_replicas:
|
||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
delete:
|
||||
min_age:
|
||||
description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion.
|
||||
@@ -392,6 +578,7 @@ elasticsearch:
|
||||
so-logs-elastic_agent_x_metricbeat: *indexSettings
|
||||
so-logs-elastic_agent_x_osquerybeat: *indexSettings
|
||||
so-logs-elastic_agent_x_packetbeat: *indexSettings
|
||||
so-logs-elasticsearch_x_server: *indexSettings
|
||||
so-metrics-endpoint_x_metadata: *indexSettings
|
||||
so-metrics-endpoint_x_metrics: *indexSettings
|
||||
so-metrics-endpoint_x_policy: *indexSettings
|
||||
|
||||
66
salt/elasticsearch/ssl.sls
Normal file
66
salt/elasticsearch/ssl.sls
Normal file
@@ -0,0 +1,66 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'ca/map.jinja' import CA %}
|
||||
|
||||
# Create a cert for elasticsearch
|
||||
elasticsearch_key:
|
||||
x509.private_key_managed:
|
||||
- name: /etc/pki/elasticsearch.key
|
||||
- keysize: 4096
|
||||
- backup: True
|
||||
- new: True
|
||||
{% if salt['file.file_exists']('/etc/pki/elasticsearch.key') -%}
|
||||
- prereq:
|
||||
- x509: /etc/pki/elasticsearch.crt
|
||||
{%- endif %}
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
|
||||
elasticsearch_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/elasticsearch.crt
|
||||
- ca_server: {{ CA.server }}
|
||||
- signing_policy: registry
|
||||
- private_key: /etc/pki/elasticsearch.key
|
||||
- CN: {{ GLOBALS.hostname }}
|
||||
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||
- days_remaining: 7
|
||||
- days_valid: 820
|
||||
- backup: True
|
||||
- timeout: 30
|
||||
- retry:
|
||||
attempts: 5
|
||||
interval: 30
|
||||
cmd.run:
|
||||
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/elasticsearch.key -in /etc/pki/elasticsearch.crt -export -out /etc/pki/elasticsearch.p12 -nodes -passout pass:"
|
||||
- onchanges:
|
||||
- x509: /etc/pki/elasticsearch.key
|
||||
|
||||
elastickeyperms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticsearch.key
|
||||
- mode: 640
|
||||
- group: 930
|
||||
|
||||
elasticp12perms:
|
||||
file.managed:
|
||||
- replace: False
|
||||
- name: /etc/pki/elasticsearch.p12
|
||||
- mode: 640
|
||||
- group: 930
|
||||
|
||||
{% else %}
|
||||
|
||||
{{sls}}_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: {{sls}}_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
@@ -61,5 +61,55 @@
|
||||
{% do settings.index_template.template.settings.index.pop('sort') %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{# advanced ilm actions #}
|
||||
{% if settings.policy is defined and settings.policy.phases is defined %}
|
||||
{% set PHASE_NAMES = ["hot", "warm", "cold"] %}
|
||||
{% for P in PHASE_NAMES %}
|
||||
{% if settings.policy.phases[P] is defined and settings.policy.phases[P].actions is defined %}
|
||||
{% set PHASE = settings.policy.phases[P].actions %}
|
||||
{# remove allocate action if number_of_replicas isn't configured #}
|
||||
{% if PHASE.allocate is defined %}
|
||||
{% if PHASE.allocate.number_of_replicas is not defined or PHASE.allocate.number_of_replicas == "" %}
|
||||
{% do PHASE.pop('allocate', none) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{# start shrink action #}
|
||||
{% if PHASE.shrink is defined %}
|
||||
{% if PHASE.shrink.method is defined %}
|
||||
{% if PHASE.shrink.method == 'COUNT' and PHASE.shrink.number_of_shards is defined and PHASE.shrink.number_of_shards %}
|
||||
{# remove max_primary_shard_size value when doing shrink operation by count vs size #}
|
||||
{% do PHASE.shrink.pop('max_primary_shard_size', none) %}
|
||||
{% elif PHASE.shrink.method == 'SIZE' and PHASE.shrink.max_primary_shard_size is defined and PHASE.shrink.max_primary_shard_size %}
|
||||
{# remove number_of_shards value when doing shrink operation by size vs count #}
|
||||
{% do PHASE.shrink.pop('number_of_shards', none) %}
|
||||
{% else %}
|
||||
{# method isn't defined or missing a required config number_of_shards/max_primary_shard_size #}
|
||||
{% do PHASE.pop('shrink', none) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{# always remove shrink method since its only used for SOC config, not in the actual ilm policy #}
|
||||
{% if PHASE.shrink is defined %}
|
||||
{% do PHASE.shrink.pop('method', none) %}
|
||||
{% endif %}
|
||||
{# end shrink action #}
|
||||
{# start force merge #}
|
||||
{% if PHASE.forcemerge is defined %}
|
||||
{% if PHASE.forcemerge.index_codec is defined and PHASE.forcemerge.index_codec %}
|
||||
{% do PHASE.forcemerge.update({'index_codec': 'best_compression'}) %}
|
||||
{% else %}
|
||||
{% do PHASE.forcemerge.pop('index_codec', none) %}
|
||||
{% endif %}
|
||||
{% if PHASE.forcemerge.max_num_segments is not defined or not PHASE.forcemerge.max_num_segments %}
|
||||
{# max_num_segments is empty, drop it #}
|
||||
{% do PHASE.pop('forcemerge', none) %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{# end force merge #}
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
{% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %}
|
||||
{% endfor %}
|
||||
|
||||
@@ -841,6 +841,10 @@
|
||||
"type": "long"
|
||||
}
|
||||
}
|
||||
},
|
||||
"capture_file": {
|
||||
"type": "keyword",
|
||||
"ignore_above": 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -0,0 +1,43 @@
|
||||
{
|
||||
"template": {
|
||||
"mappings": {
|
||||
"properties": {
|
||||
"agent": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"hostname": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"last_checkin_status": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"last_checkin": {
|
||||
"type": "date"
|
||||
},
|
||||
"name": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"offline_duration_hours": {
|
||||
"type": "integer"
|
||||
},
|
||||
"policy_id": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
},
|
||||
"status": {
|
||||
"ignore_above": 1024,
|
||||
"type": "keyword"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user