mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Compare commits
639 Commits
2.3.60FBPI
...
2.3.80
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cfaa0e679c | ||
|
|
4ddf2b49ce | ||
|
|
b1d0e3e93f | ||
|
|
b069377c8a | ||
|
|
e9a44c6e1b | ||
|
|
275163f85d | ||
|
|
3064800820 | ||
|
|
f8bea82430 | ||
|
|
8b905b585d | ||
|
|
b44358fc26 | ||
|
|
8a9dcb7fdb | ||
|
|
a01d49981c | ||
|
|
b8b1867e52 | ||
|
|
292ce37ce4 | ||
|
|
73dacdcbff | ||
|
|
bea7555464 | ||
|
|
52c1298b9b | ||
|
|
cdb9dcbaec | ||
|
|
37153288e8 | ||
|
|
edf75255cf | ||
|
|
9eb6f5942e | ||
|
|
dae41d279a | ||
|
|
07288367cf | ||
|
|
f4186feffa | ||
|
|
d82e91f69e | ||
|
|
a2680fad0a | ||
|
|
5c2be487f5 | ||
|
|
531c9de488 | ||
|
|
19efa493ad | ||
|
|
0db3f14261 | ||
|
|
ed28e4d000 | ||
|
|
2c8cbf0db1 | ||
|
|
3924b8f5db | ||
|
|
a9049eccd4 | ||
|
|
1a7237bcdf | ||
|
|
1e5e1c9ef0 | ||
|
|
47cd1ddc0a | ||
|
|
aed73511e4 | ||
|
|
a3f62c81c3 | ||
|
|
730503b69c | ||
|
|
3508f3d8c1 | ||
|
|
5704906b11 | ||
|
|
357c1db445 | ||
|
|
5377a1a85e | ||
|
|
7f2d7eb038 | ||
|
|
30e781d076 | ||
|
|
01323cc192 | ||
|
|
109c83d8c3 | ||
|
|
e864bc5404 | ||
|
|
22eb82e950 | ||
|
|
b877aa44bc | ||
|
|
4d307c53e8 | ||
|
|
d0c87cd317 | ||
|
|
0d074dafd4 | ||
|
|
5b77dc109f | ||
|
|
3ce48acadd | ||
|
|
fbd9bab2f1 | ||
|
|
5526a2bc3a | ||
|
|
18d81352c6 | ||
|
|
889d235c45 | ||
|
|
3fc26312e0 | ||
|
|
b81d38e392 | ||
|
|
82da0041a4 | ||
|
|
782b01e76f | ||
|
|
3bf9685df8 | ||
|
|
4cf91f6c86 | ||
|
|
a43b37f234 | ||
|
|
e0dc62b6e9 | ||
|
|
c213834316 | ||
|
|
c06668c68e | ||
|
|
a75238bc3f | ||
|
|
ac417867ed | ||
|
|
1614b70853 | ||
|
|
0882158e03 | ||
|
|
1a03853a7c | ||
|
|
aff571faf2 | ||
|
|
e0faa4c75b | ||
|
|
e3e2e1d851 | ||
|
|
2affaf07a2 | ||
|
|
39e5ded58d | ||
|
|
4d41d3aee1 | ||
|
|
5c8067728e | ||
|
|
1d905124d3 | ||
|
|
e0a289182f | ||
|
|
551dba955c | ||
|
|
9970e54081 | ||
|
|
ff989b1c73 | ||
|
|
2ffb723bbd | ||
|
|
6ae2fba71f | ||
|
|
2cc25587d9 | ||
|
|
614a6dc9fe | ||
|
|
4b7667d87f | ||
|
|
74b0b365bd | ||
|
|
0b0d508585 | ||
|
|
0534a2dda3 | ||
|
|
f8ab0ac8a9 | ||
|
|
0ae09cc630 | ||
|
|
332c4dda22 | ||
|
|
679faddd52 | ||
|
|
0b42b19763 | ||
|
|
943bd3e902 | ||
|
|
4af6a901a1 | ||
|
|
9c310de459 | ||
|
|
4f6a3269cb | ||
|
|
6a2e1df7d4 | ||
|
|
db50ef71b4 | ||
|
|
4e2d5018a2 | ||
|
|
94688a9adb | ||
|
|
63f67b3500 | ||
|
|
eaa5e41651 | ||
|
|
c83f119cc0 | ||
|
|
5d235e932c | ||
|
|
93f2cd75a4 | ||
|
|
f06ab8b77d | ||
|
|
03b45512fa | ||
|
|
b8600be0f1 | ||
|
|
19a02baa7c | ||
|
|
3c59579f99 | ||
|
|
3f989590ad | ||
|
|
72cff7ec7a | ||
|
|
e3900606dc | ||
|
|
a2fd8ae200 | ||
|
|
b7591093cf | ||
|
|
51439cd1ab | ||
|
|
94ea1f856b | ||
|
|
fbbb7f4e85 | ||
|
|
7b3a0cd1e4 | ||
|
|
9fb28709d5 | ||
|
|
649f339934 | ||
|
|
f659079542 | ||
|
|
ce70380f0f | ||
|
|
c4d402d8b4 | ||
|
|
9f5dafd560 | ||
|
|
1cee603ee4 | ||
|
|
a14854d56d | ||
|
|
2bf471054b | ||
|
|
56894b9581 | ||
|
|
10126bb7ef | ||
|
|
6dfc943e8c | ||
|
|
84ecc3cba7 | ||
|
|
0ad3d826eb | ||
|
|
d785dafe2f | ||
|
|
e3dffcc2cb | ||
|
|
556bad6925 | ||
|
|
446821e9fd | ||
|
|
576c893eb3 | ||
|
|
34a5d6e56a | ||
|
|
324e6b12e2 | ||
|
|
007b15979a | ||
|
|
c168703e9f | ||
|
|
527a793e94 | ||
|
|
61ebedc0e9 | ||
|
|
e09aa4e5d4 | ||
|
|
e7b04b862f | ||
|
|
62edfd0b7f | ||
|
|
958575c22a | ||
|
|
0c8e11dc9f | ||
|
|
5b9ef3bc0d | ||
|
|
c12f380bc3 | ||
|
|
dc25ed2594 | ||
|
|
9f51f02ab4 | ||
|
|
f6f4375e13 | ||
|
|
ed116cf850 | ||
|
|
476ecccbc1 | ||
|
|
c09cebbd6b | ||
|
|
0ed92fd9bd | ||
|
|
c3454c9e8a | ||
|
|
3425a0fe78 | ||
|
|
9605eda559 | ||
|
|
ff09d9ca58 | ||
|
|
77b82bf2c0 | ||
|
|
ccc8f9ff0a | ||
|
|
43d20226a8 | ||
|
|
4fe0a1d7b4 | ||
|
|
7a48a94624 | ||
|
|
1aacc27cd4 | ||
|
|
92858cd13a | ||
|
|
99cb38362a | ||
|
|
bfd632e20a | ||
|
|
518f9fceb0 | ||
|
|
2b34da0fee | ||
|
|
72859adb13 | ||
|
|
a27263435a | ||
|
|
f8cdf5bca3 | ||
|
|
ca5339341f | ||
|
|
c5d120293d | ||
|
|
12b5c0899b | ||
|
|
09d5097837 | ||
|
|
de5f823abf | ||
|
|
7b93f355e2 | ||
|
|
a27569f20b | ||
|
|
fd1e632386 | ||
|
|
0681d29bb0 | ||
|
|
ef650c6ee6 | ||
|
|
24f36bb4c9 | ||
|
|
9783d13ea3 | ||
|
|
427ec98ce5 | ||
|
|
72ba29fb7b | ||
|
|
2859bff0e4 | ||
|
|
6e921415ea | ||
|
|
2f8b68e67a | ||
|
|
e762491039 | ||
|
|
11381e304b | ||
|
|
6d49bca0ac | ||
|
|
8ea89932ae | ||
|
|
f87cf123b0 | ||
|
|
80f4d03254 | ||
|
|
a9cc68f89e | ||
|
|
b053f29a89 | ||
|
|
19cfce5e0b | ||
|
|
c4a32ca631 | ||
|
|
b78da5c237 | ||
|
|
0abf7593ed | ||
|
|
aa420b914b | ||
|
|
f096b513b7 | ||
|
|
51b517581a | ||
|
|
936c998ecb | ||
|
|
02372d130a | ||
|
|
6f9a263af3 | ||
|
|
43ffaab82c | ||
|
|
dccfdb14e4 | ||
|
|
21f3b3d985 | ||
|
|
e2d74b115f | ||
|
|
13741400f1 | ||
|
|
d0f587858c | ||
|
|
acca8cc5d2 | ||
|
|
ef950955bd | ||
|
|
9a8ccef828 | ||
|
|
7b8e23fadd | ||
|
|
18335afa7f | ||
|
|
41e8be87b6 | ||
|
|
39f32a6e13 | ||
|
|
8e9f95652d | ||
|
|
30489e4117 | ||
|
|
9dc9f10003 | ||
|
|
1ced05c1d2 | ||
|
|
41b246b8b3 | ||
|
|
a12f19c533 | ||
|
|
f1c91555ae | ||
|
|
e39de8c7bc | ||
|
|
d0e312ec42 | ||
|
|
e492833453 | ||
|
|
9beacacd44 | ||
|
|
aad14b2461 | ||
|
|
4955b552df | ||
|
|
55e8a777d4 | ||
|
|
a98ed282c0 | ||
|
|
7504b1cb2e | ||
|
|
afab1cb1e6 | ||
|
|
cd0b9bbe4a | ||
|
|
3ea29e77a9 | ||
|
|
fb4c2c35e3 | ||
|
|
81ccce8659 | ||
|
|
0d5e3771f5 | ||
|
|
2030ef65f1 | ||
|
|
b6c361f83d | ||
|
|
9404cb635d | ||
|
|
da53b39c15 | ||
|
|
86569b0599 | ||
|
|
45aa2f72cb | ||
|
|
06b7434ca2 | ||
|
|
258cebda6e | ||
|
|
0cca43c4bd | ||
|
|
bf40a1038e | ||
|
|
3312a66e75 | ||
|
|
4a31d6b3bc | ||
|
|
64dfc6e191 | ||
|
|
95bd7f9861 | ||
|
|
983549711c | ||
|
|
5922dbdf22 | ||
|
|
9e48a5b57b | ||
|
|
3c1114403e | ||
|
|
8d2f614af6 | ||
|
|
1415de858c | ||
|
|
59e9fddf18 | ||
|
|
ad3b6cf629 | ||
|
|
b12e2eded5 | ||
|
|
26030d83eb | ||
|
|
3b01f6431e | ||
|
|
a646867593 | ||
|
|
768e61e11a | ||
|
|
e72ad9eb5a | ||
|
|
ac4faf673d | ||
|
|
dd1769fbef | ||
|
|
853a986082 | ||
|
|
727a3742f5 | ||
|
|
478a0b6a3f | ||
|
|
771688a70f | ||
|
|
40fa549353 | ||
|
|
84fdc1e690 | ||
|
|
71bbb41b5f | ||
|
|
52cb72ba67 | ||
|
|
54a3b754e0 | ||
|
|
2bc88e7750 | ||
|
|
ef59cb47dd | ||
|
|
9e5d3aa286 | ||
|
|
25bf25eae6 | ||
|
|
24f5fa66f3 | ||
|
|
1aeb2d7d4f | ||
|
|
ee176f5bfd | ||
|
|
eb093b8e6c | ||
|
|
f88fa6e3b2 | ||
|
|
724f7d4f3d | ||
|
|
19816d8814 | ||
|
|
d3b170c6df | ||
|
|
757091beeb | ||
|
|
8a49039b85 | ||
|
|
4f39cd1d7f | ||
|
|
2a6277c0c3 | ||
|
|
33bd6aed20 | ||
|
|
b9980c9d30 | ||
|
|
01bb94514c | ||
|
|
d71967ea1d | ||
|
|
0b06d0bfdb | ||
|
|
b2a83018ba | ||
|
|
ba265d94f4 | ||
|
|
af7b314cfe | ||
|
|
4c6447a3da | ||
|
|
b30f771fa2 | ||
|
|
837c0402a0 | ||
|
|
e38219aa2e | ||
|
|
9e92f6da3d | ||
|
|
44551ea9ee | ||
|
|
c53da9b1ff | ||
|
|
e1785dbd9a | ||
|
|
2560a9b78c | ||
|
|
d53e989c55 | ||
|
|
211a841cdb | ||
|
|
50e4365475 | ||
|
|
c524b54af1 | ||
|
|
7591bb115e | ||
|
|
3d2da303c8 | ||
|
|
f585eb6e62 | ||
|
|
4b6120a46b | ||
|
|
d946c6d5ed | ||
|
|
5894b85bd1 | ||
|
|
3fc43f7d92 | ||
|
|
8ed264460f | ||
|
|
811b32735e | ||
|
|
4b3db0c4d2 | ||
|
|
281ba21298 | ||
|
|
d4a177949a | ||
|
|
a42d8c9229 | ||
|
|
dd0e407935 | ||
|
|
7ef5b39b04 | ||
|
|
cf9121dfc2 | ||
|
|
fcfc2a65a9 | ||
|
|
91accb0bc6 | ||
|
|
e2abe8840f | ||
|
|
ead9ae8cb5 | ||
|
|
455719936b | ||
|
|
8d56fc71fa | ||
|
|
833d154bf4 | ||
|
|
f31dc5abc7 | ||
|
|
9a429230fe | ||
|
|
b36d46b7f2 | ||
|
|
fee89665fd | ||
|
|
d78a37f9e3 | ||
|
|
28c5c02ef1 | ||
|
|
8ffeae38bc | ||
|
|
f4fae7938e | ||
|
|
22920bc9a1 | ||
|
|
ceb82cb863 | ||
|
|
1caa361e22 | ||
|
|
da20790238 | ||
|
|
f359dd0cd4 | ||
|
|
bee442a21f | ||
|
|
a66765e99b | ||
|
|
0db7f91eb4 | ||
|
|
850315dc20 | ||
|
|
d35e4bea01 | ||
|
|
356b623148 | ||
|
|
3a022e7a83 | ||
|
|
64945cec16 | ||
|
|
26741bdb53 | ||
|
|
7aa5e857ed | ||
|
|
2e277bf487 | ||
|
|
e4f46c6e14 | ||
|
|
e9d90644fd | ||
|
|
5a06f0dce9 | ||
|
|
08e9a58f2e | ||
|
|
e1f0c8e87c | ||
|
|
17a532f7b5 | ||
|
|
c7306dda12 | ||
|
|
00d311cd6c | ||
|
|
f8d2a7f449 | ||
|
|
a02a928996 | ||
|
|
eb661b7a24 | ||
|
|
6aea607f21 | ||
|
|
41e747dcc1 | ||
|
|
d3d02faa1c | ||
|
|
7a85a3c7f7 | ||
|
|
fceb2851ef | ||
|
|
2f118781ea | ||
|
|
b8e3a45a7e | ||
|
|
61312397e1 | ||
|
|
8ea4682aab | ||
|
|
3b6befdb97 | ||
|
|
613979ea3f | ||
|
|
191def686b | ||
|
|
f986e0dc78 | ||
|
|
08e75567d4 | ||
|
|
668199f1a8 | ||
|
|
7a753a56ec | ||
|
|
7b38b4e280 | ||
|
|
7dc2e2ca73 | ||
|
|
44eb23615a | ||
|
|
d47566f667 | ||
|
|
9ae84c8108 | ||
|
|
578c7aac35 | ||
|
|
1c460cc19c | ||
|
|
ff436aea93 | ||
|
|
aa333794f7 | ||
|
|
3d3593a1a9 | ||
|
|
257062e20c | ||
|
|
fa9d7afb46 | ||
|
|
ae5f351e1a | ||
|
|
257a88ec8e | ||
|
|
e1e6304a8a | ||
|
|
a81ef0017c | ||
|
|
b89162e086 | ||
|
|
a6630540a4 | ||
|
|
a528c5d54b | ||
|
|
690699ddf7 | ||
|
|
cd8d9c657e | ||
|
|
f732b80b92 | ||
|
|
ad8c12afa5 | ||
|
|
479fcb6c46 | ||
|
|
74874dfff2 | ||
|
|
ceb108a5fe | ||
|
|
235d8b7cf0 | ||
|
|
7c9df2d75a | ||
|
|
43bf75217f | ||
|
|
9bf6d478c5 | ||
|
|
e2baa93270 | ||
|
|
37fcda3817 | ||
|
|
457ae54341 | ||
|
|
4cc3c5ada9 | ||
|
|
07d5736d61 | ||
|
|
a7551a44e5 | ||
|
|
f4d3e13c7f | ||
|
|
47d82b3d35 | ||
|
|
9d06aff1d1 | ||
|
|
5ea8c978a0 | ||
|
|
6809c3a9f6 | ||
|
|
761108964e | ||
|
|
e3e74a84f2 | ||
|
|
1fee4e87c4 | ||
|
|
0c4c59375d | ||
|
|
09165daab8 | ||
|
|
3393b77535 | ||
|
|
d050bc02e2 | ||
|
|
af60ddf404 | ||
|
|
1bb92f63d1 | ||
|
|
a405ca39fa | ||
|
|
852b686d81 | ||
|
|
608d5d3c26 | ||
|
|
6038ebb705 | ||
|
|
4bb350d37d | ||
|
|
d01ac55db1 | ||
|
|
fcde5c3c18 | ||
|
|
dbf19e134f | ||
|
|
b13c5a3b8b | ||
|
|
b0c5a352c1 | ||
|
|
d0b3cd5f66 | ||
|
|
24efdec9ea | ||
|
|
1bed818a8e | ||
|
|
3c4c52567d | ||
|
|
87ae14d11c | ||
|
|
258d303e7f | ||
|
|
458350e1a8 | ||
|
|
fe7ee1e2c7 | ||
|
|
d8910a0097 | ||
|
|
3b6e683d37 | ||
|
|
90f6bad6ce | ||
|
|
fcc6802f86 | ||
|
|
3b9bc77ecc | ||
|
|
0fb4500fcc | ||
|
|
93ca00c7fe | ||
|
|
522f2a3f9f | ||
|
|
40ddf5f49c | ||
|
|
60356eacce | ||
|
|
158f3bf092 | ||
|
|
ebf3c65bed | ||
|
|
df6d1d72e2 | ||
|
|
72542322ca | ||
|
|
fea4f3f973 | ||
|
|
7878180f54 | ||
|
|
0669aa6bbd | ||
|
|
2c4924a602 | ||
|
|
bde86e0383 | ||
|
|
bab18275bc | ||
|
|
7e86681509 | ||
|
|
c2fc2df54c | ||
|
|
0deb77468f | ||
|
|
9bf1d3e0c6 | ||
|
|
3a12d28d20 | ||
|
|
e8ba4bdc6c | ||
|
|
b552973e00 | ||
|
|
ac98e1fd0f | ||
|
|
4246aac51b | ||
|
|
33f396bdae | ||
|
|
ff25cecd54 | ||
|
|
e88b258208 | ||
|
|
1cbf895e0e | ||
|
|
7dc1f5c445 | ||
|
|
439e049948 | ||
|
|
fbf26bef8d | ||
|
|
c1f550382c | ||
|
|
23fb6a5c02 | ||
|
|
d632266092 | ||
|
|
4ea3ab9538 | ||
|
|
725161ea6e | ||
|
|
fccd86f676 | ||
|
|
0f0a977ed9 | ||
|
|
7f9d0b59b8 | ||
|
|
b0d510167c | ||
|
|
4971933201 | ||
|
|
693a9b30ae | ||
|
|
76c285158a | ||
|
|
08517e3732 | ||
|
|
59530f4263 | ||
|
|
4acebe7f59 | ||
|
|
a44a7b7161 | ||
|
|
be13f0a066 | ||
|
|
98ce77c2b1 | ||
|
|
275a491cac | ||
|
|
1c868f85c4 | ||
|
|
b6deacf86d | ||
|
|
294f91473c | ||
|
|
902f04efb4 | ||
|
|
ca2989c0e5 | ||
|
|
2d9697cd66 | ||
|
|
b4111a9f79 | ||
|
|
7f8212fdba | ||
|
|
7e1be8a3a4 | ||
|
|
05aad07bfc | ||
|
|
4b4ceb525a | ||
|
|
42ba9888d7 | ||
|
|
818f912a90 | ||
|
|
dae64b82ff | ||
|
|
53c6edcbdb | ||
|
|
723172bc1f | ||
|
|
323b5d6694 | ||
|
|
441cd3fc59 | ||
|
|
1d23d1b2e2 | ||
|
|
e41811fbd0 | ||
|
|
f111106a9f | ||
|
|
f9e29eaede | ||
|
|
e7a6172d7e | ||
|
|
ec8f9228e8 | ||
|
|
6c12e26632 | ||
|
|
9a6ac7bd20 | ||
|
|
5b3751da70 | ||
|
|
65127eb226 | ||
|
|
115e0a6fee | ||
|
|
ddfab44883 | ||
|
|
8900d52c33 | ||
|
|
bab72393e6 | ||
|
|
e059c25ebc | ||
|
|
c87ca8f5dc | ||
|
|
e01e3cdd43 | ||
|
|
2ab9ade761 | ||
|
|
0b35b8f6d6 | ||
|
|
9ff95f66dd | ||
|
|
c1523c4936 | ||
|
|
b6e31278a7 | ||
|
|
ca2b24f735 | ||
|
|
2b0bca8e55 | ||
|
|
98fe7e8700 | ||
|
|
0acc3cc537 | ||
|
|
8491ffde07 | ||
|
|
2ea3989497 | ||
|
|
e6f9592cde | ||
|
|
222d79bf53 | ||
|
|
19d9258717 | ||
|
|
b46456b78e | ||
|
|
cebc2ef09d | ||
|
|
c4ff8f6876 | ||
|
|
619022ef7f | ||
|
|
c0f3c5b3db | ||
|
|
860b8bf945 | ||
|
|
694db81b80 | ||
|
|
a895270bc8 | ||
|
|
7474b451ca | ||
|
|
e8eecc8bc1 | ||
|
|
28e33b413c | ||
|
|
78c58e61ea | ||
|
|
f3ecdf21bf | ||
|
|
ff656365d2 | ||
|
|
ea7c09bb00 | ||
|
|
e23f7cd3e7 | ||
|
|
c6bb32b862 | ||
|
|
0bde69b441 | ||
|
|
6fbafb74bd | ||
|
|
9572c1f663 | ||
|
|
0fedb0f2c5 | ||
|
|
33d3aef9f5 | ||
|
|
fb8ccedf66 | ||
|
|
efcf0accc1 | ||
|
|
f556d5c07d | ||
|
|
6c1f424c0b | ||
|
|
90970f97e8 | ||
|
|
d3137dc6b9 | ||
|
|
efaf53f2f7 | ||
|
|
beb7b89275 | ||
|
|
8c15fa1627 | ||
|
|
bc814c9be6 | ||
|
|
bac7ef71d8 | ||
|
|
dd199ea30f | ||
|
|
fc8acac1a5 | ||
|
|
fec269c3e7 | ||
|
|
8e366fd633 | ||
|
|
f7d54186dd | ||
|
|
ab92fb3910 | ||
|
|
6783e2e28b | ||
|
|
4e47d3f458 | ||
|
|
b265c7dcb7 | ||
|
|
f4fae89b8e | ||
|
|
45f0b4c85f | ||
|
|
7c80483f6e | ||
|
|
08ba4fdbee | ||
|
|
7085796601 | ||
|
|
091b5f73b1 | ||
|
|
0c079edc1a | ||
|
|
54cdfb89f6 | ||
|
|
f56514ed7d | ||
|
|
56697fde19 | ||
|
|
80525ee736 | ||
|
|
20360d0bb0 | ||
|
|
35f10518b2 | ||
|
|
03066c4674 | ||
|
|
e33a6892b3 | ||
|
|
87bb3f4a6b | ||
|
|
62bfaa4e45 | ||
|
|
dc1363aaf5 | ||
|
|
a5067718d2 | ||
|
|
98505a9a3f | ||
|
|
a16f733622 |
@@ -1,6 +1,6 @@
|
|||||||
## Security Onion 2.3.60
|
## Security Onion 2.3.80
|
||||||
|
|
||||||
Security Onion 2.3.60 is here!
|
Security Onion 2.3.80 is here!
|
||||||
|
|
||||||
## Screenshots
|
## Screenshots
|
||||||
|
|
||||||
|
|||||||
@@ -1,18 +1,18 @@
|
|||||||
### 2.3.60-FBPIPELINE ISO image built on 2021/07/13
|
### 2.3.80 ISO image built on 2021/09/27
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
### Download and Verify
|
### Download and Verify
|
||||||
|
|
||||||
2.3.60-FBPIPELINE ISO image:
|
2.3.80 ISO image:
|
||||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.60-FBPIPELINE.iso
|
https://download.securityonion.net/file/securityonion/securityonion-2.3.80.iso
|
||||||
|
|
||||||
MD5: 2EA2B337289D0CFF0C7488E8E88FE7BE
|
MD5: 24F38563860416F4A8ABE18746913E14
|
||||||
SHA1: 7C22F16AD395E079F4C5345093AF26C105E36D4C
|
SHA1: F923C005F54EA2A17AB225ADA0DA46042707AAD9
|
||||||
SHA256: 3B685BBD19711229C5FCD5D254BA5024AF0C36A3E379790B5E83037CE2668724
|
SHA256: 8E95D10AF664D9A406C168EC421D943CB23F0D0C1813C6C2DBA9B4E131984018
|
||||||
|
|
||||||
Signature for ISO image:
|
Signature for ISO image:
|
||||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.60-FBPIPELINE.iso.sig
|
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.80.iso.sig
|
||||||
|
|
||||||
Signing key:
|
Signing key:
|
||||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
||||||
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
|
|||||||
|
|
||||||
Download the signature file for the ISO:
|
Download the signature file for the ISO:
|
||||||
```
|
```
|
||||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.60-FBPIPELINE.iso.sig
|
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.80.iso.sig
|
||||||
```
|
```
|
||||||
|
|
||||||
Download the ISO image:
|
Download the ISO image:
|
||||||
```
|
```
|
||||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.60-FBPIPELINE.iso
|
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.80.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
Verify the downloaded ISO image using the signature file:
|
Verify the downloaded ISO image using the signature file:
|
||||||
```
|
```
|
||||||
gpg --verify securityonion-2.3.60-FBPIPELINE.iso.sig securityonion-2.3.60-FBPIPELINE.iso
|
gpg --verify securityonion-2.3.80.iso.sig securityonion-2.3.80.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||||
```
|
```
|
||||||
gpg: Signature made Tue 13 Jul 2021 04:12:08 PM EDT using RSA key ID FE507013
|
gpg: Signature made Mon 27 Sep 2021 08:55:01 AM EDT using RSA key ID FE507013
|
||||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||||
gpg: WARNING: This key is not certified with a trusted signature!
|
gpg: WARNING: This key is not certified with a trusted signature!
|
||||||
gpg: There is no indication that the signature belongs to the owner.
|
gpg: There is no indication that the signature belongs to the owner.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
elasticsearch:
|
elasticsearch:
|
||||||
templates:
|
templates:
|
||||||
- so/so-beats-template.json.jinja
|
- so/so-beats-template.json.jinja
|
||||||
- so/so-common-template.json
|
- so/so-common-template.json.jinja
|
||||||
- so/so-firewall-template.json.jinja
|
- so/so-firewall-template.json.jinja
|
||||||
- so/so-flow-template.json.jinja
|
- so/so-flow-template.json.jinja
|
||||||
- so/so-ids-template.json.jinja
|
- so/so-ids-template.json.jinja
|
||||||
@@ -10,4 +10,4 @@ elasticsearch:
|
|||||||
- so/so-ossec-template.json.jinja
|
- so/so-ossec-template.json.jinja
|
||||||
- so/so-strelka-template.json.jinja
|
- so/so-strelka-template.json.jinja
|
||||||
- so/so-syslog-template.json.jinja
|
- so/so-syslog-template.json.jinja
|
||||||
- so/so-zeek-template.json.jinja
|
- so/so-zeek-template.json.jinja
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
elasticsearch:
|
elasticsearch:
|
||||||
templates:
|
templates:
|
||||||
- so/so-beats-template.json.jinja
|
- so/so-beats-template.json.jinja
|
||||||
- so/so-common-template.json
|
- so/so-common-template.json.jinja
|
||||||
- so/so-firewall-template.json.jinja
|
- so/so-firewall-template.json.jinja
|
||||||
- so/so-flow-template.json.jinja
|
- so/so-flow-template.json.jinja
|
||||||
- so/so-ids-template.json.jinja
|
- so/so-ids-template.json.jinja
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
elasticsearch:
|
elasticsearch:
|
||||||
templates:
|
templates:
|
||||||
- so/so-beats-template.json.jinja
|
- so/so-beats-template.json.jinja
|
||||||
- so/so-common-template.json
|
- so/so-common-template.json.jinja
|
||||||
- so/so-firewall-template.json.jinja
|
- so/so-firewall-template.json.jinja
|
||||||
- so/so-flow-template.json.jinja
|
- so/so-flow-template.json.jinja
|
||||||
- so/so-ids-template.json.jinja
|
- so/so-ids-template.json.jinja
|
||||||
|
|||||||
@@ -13,3 +13,4 @@ logstash:
|
|||||||
- so/9500_output_beats.conf.jinja
|
- so/9500_output_beats.conf.jinja
|
||||||
- so/9600_output_ossec.conf.jinja
|
- so/9600_output_ossec.conf.jinja
|
||||||
- so/9700_output_strelka.conf.jinja
|
- so/9700_output_strelka.conf.jinja
|
||||||
|
- so/9800_output_logscan.conf.jinja
|
||||||
|
|||||||
@@ -45,7 +45,8 @@
|
|||||||
'schedule',
|
'schedule',
|
||||||
'soctopus',
|
'soctopus',
|
||||||
'tcpreplay',
|
'tcpreplay',
|
||||||
'docker_clean'
|
'docker_clean',
|
||||||
|
'learn'
|
||||||
],
|
],
|
||||||
'so-heavynode': [
|
'so-heavynode': [
|
||||||
'ca',
|
'ca',
|
||||||
@@ -108,7 +109,8 @@
|
|||||||
'zeek',
|
'zeek',
|
||||||
'schedule',
|
'schedule',
|
||||||
'tcpreplay',
|
'tcpreplay',
|
||||||
'docker_clean'
|
'docker_clean',
|
||||||
|
'learn'
|
||||||
],
|
],
|
||||||
'so-manager': [
|
'so-manager': [
|
||||||
'salt.master',
|
'salt.master',
|
||||||
@@ -127,7 +129,8 @@
|
|||||||
'utility',
|
'utility',
|
||||||
'schedule',
|
'schedule',
|
||||||
'soctopus',
|
'soctopus',
|
||||||
'docker_clean'
|
'docker_clean',
|
||||||
|
'learn'
|
||||||
],
|
],
|
||||||
'so-managersearch': [
|
'so-managersearch': [
|
||||||
'salt.master',
|
'salt.master',
|
||||||
@@ -146,7 +149,8 @@
|
|||||||
'utility',
|
'utility',
|
||||||
'schedule',
|
'schedule',
|
||||||
'soctopus',
|
'soctopus',
|
||||||
'docker_clean'
|
'docker_clean',
|
||||||
|
'learn'
|
||||||
],
|
],
|
||||||
'so-node': [
|
'so-node': [
|
||||||
'ca',
|
'ca',
|
||||||
@@ -178,7 +182,8 @@
|
|||||||
'schedule',
|
'schedule',
|
||||||
'soctopus',
|
'soctopus',
|
||||||
'tcpreplay',
|
'tcpreplay',
|
||||||
'docker_clean'
|
'docker_clean',
|
||||||
|
'learn'
|
||||||
],
|
],
|
||||||
'so-sensor': [
|
'so-sensor': [
|
||||||
'ca',
|
'ca',
|
||||||
@@ -237,7 +242,7 @@
|
|||||||
{% do allowed_states.append('kibana') %}
|
{% do allowed_states.append('kibana') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if CURATOR and grains.role in ['so-eval', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode'] %}
|
{% if grains.role in ['so-eval', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||||
{% do allowed_states.append('curator') %}
|
{% do allowed_states.append('curator') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
@@ -296,4 +301,4 @@
|
|||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{# all nodes can always run salt.minion state #}
|
{# all nodes can always run salt.minion state #}
|
||||||
{% do allowed_states.append('salt.minion') %}
|
{% do allowed_states.append('salt.minion') %}
|
||||||
|
|||||||
@@ -22,6 +22,7 @@
|
|||||||
/opt/so/log/salt/so-salt-minion-check
|
/opt/so/log/salt/so-salt-minion-check
|
||||||
/opt/so/log/salt/minion
|
/opt/so/log/salt/minion
|
||||||
/opt/so/log/salt/master
|
/opt/so/log/salt/master
|
||||||
|
/opt/so/log/logscan/*.log
|
||||||
{
|
{
|
||||||
{{ logrotate_conf | indent(width=4) }}
|
{{ logrotate_conf | indent(width=4) }}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -326,6 +326,16 @@ dockerreserveports:
|
|||||||
- name: /etc/sysctl.d/99-reserved-ports.conf
|
- name: /etc/sysctl.d/99-reserved-ports.conf
|
||||||
|
|
||||||
{% if salt['grains.get']('sosmodel', '') %}
|
{% if salt['grains.get']('sosmodel', '') %}
|
||||||
|
{% if grains['os'] == 'CentOS' %}
|
||||||
|
# Install Raid tools
|
||||||
|
raidpkgs:
|
||||||
|
pkg.installed:
|
||||||
|
- skip_suggestions: True
|
||||||
|
- pkgs:
|
||||||
|
- securityonion-raidtools
|
||||||
|
- securityonion-megactl
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
# Install raid check cron
|
# Install raid check cron
|
||||||
/usr/sbin/so-raid-status > /dev/null 2>&1:
|
/usr/sbin/so-raid-status > /dev/null 2>&1:
|
||||||
cron.present:
|
cron.present:
|
||||||
|
|||||||
@@ -17,4 +17,4 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
salt-call state.highstate -linfo
|
salt-call state.highstate -l info
|
||||||
|
|||||||
@@ -99,6 +99,15 @@ check_password() {
|
|||||||
return $?
|
return $?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
check_password_and_exit() {
|
||||||
|
local password=$1
|
||||||
|
if ! check_password "$password"; then
|
||||||
|
echo "Password is invalid. Do not include single quotes, double quotes, dollar signs, and backslashes in the password."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
check_elastic_license() {
|
check_elastic_license() {
|
||||||
|
|
||||||
[ -n "$TESTING" ] && return
|
[ -n "$TESTING" ] && return
|
||||||
@@ -372,6 +381,14 @@ set_version() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
has_uppercase() {
|
||||||
|
local string=$1
|
||||||
|
|
||||||
|
echo "$string" | grep -qP '[A-Z]' \
|
||||||
|
&& return 0 \
|
||||||
|
|| return 1
|
||||||
|
}
|
||||||
|
|
||||||
valid_cidr() {
|
valid_cidr() {
|
||||||
# Verify there is a backslash in the string
|
# Verify there is a backslash in the string
|
||||||
echo "$1" | grep -qP "^[^/]+/[^/]+$" || return 1
|
echo "$1" | grep -qP "^[^/]+/[^/]+$" || return 1
|
||||||
|
|||||||
57
salt/common/tools/sbin/so-elasticsearch-roles-load
Normal file
57
salt/common/tools/sbin/so-elasticsearch-roles-load
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
{%- set mainint = salt['pillar.get']('host:mainint') %}
|
||||||
|
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
|
||||||
|
|
||||||
|
default_conf_dir=/opt/so/conf
|
||||||
|
ELASTICSEARCH_HOST="{{ MYIP }}"
|
||||||
|
ELASTICSEARCH_PORT=9200
|
||||||
|
|
||||||
|
# Define a default directory to load roles from
|
||||||
|
ELASTICSEARCH_ROLES="$default_conf_dir/elasticsearch/roles/"
|
||||||
|
|
||||||
|
# Wait for ElasticSearch to initialize
|
||||||
|
echo -n "Waiting for ElasticSearch..."
|
||||||
|
COUNT=0
|
||||||
|
ELASTICSEARCH_CONNECTED="no"
|
||||||
|
while [[ "$COUNT" -le 240 ]]; do
|
||||||
|
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||||
|
if [ $? -eq 0 ]; then
|
||||||
|
ELASTICSEARCH_CONNECTED="yes"
|
||||||
|
echo "connected!"
|
||||||
|
break
|
||||||
|
else
|
||||||
|
((COUNT+=1))
|
||||||
|
sleep 1
|
||||||
|
echo -n "."
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||||
|
echo
|
||||||
|
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||||
|
echo
|
||||||
|
fi
|
||||||
|
|
||||||
|
cd ${ELASTICSEARCH_ROLES}
|
||||||
|
|
||||||
|
echo "Loading templates..."
|
||||||
|
for role in *; do
|
||||||
|
name=$(echo "$role" | cut -d. -f1)
|
||||||
|
so-elasticsearch-query _security/role/$name -XPUT -d @"$role"
|
||||||
|
done
|
||||||
|
|
||||||
|
cd - >/dev/null
|
||||||
@@ -35,6 +35,7 @@ def showUsage(options, args):
|
|||||||
print('')
|
print('')
|
||||||
print(' General commands:')
|
print(' General commands:')
|
||||||
print(' help - Prints this usage information.')
|
print(' help - Prints this usage information.')
|
||||||
|
print(' apply - Apply the firewall state.')
|
||||||
print('')
|
print('')
|
||||||
print(' Host commands:')
|
print(' Host commands:')
|
||||||
print(' listhostgroups - Lists the known host groups.')
|
print(' listhostgroups - Lists the known host groups.')
|
||||||
@@ -66,7 +67,7 @@ def checkDefaultPortsOption(options):
|
|||||||
|
|
||||||
def checkApplyOption(options):
|
def checkApplyOption(options):
|
||||||
if "--apply" in options:
|
if "--apply" in options:
|
||||||
return apply()
|
return apply(None, None)
|
||||||
|
|
||||||
def loadYaml(filename):
|
def loadYaml(filename):
|
||||||
file = open(filename, "r")
|
file = open(filename, "r")
|
||||||
@@ -328,7 +329,7 @@ def removehost(options, args):
|
|||||||
code = checkApplyOption(options)
|
code = checkApplyOption(options)
|
||||||
return code
|
return code
|
||||||
|
|
||||||
def apply():
|
def apply(options, args):
|
||||||
proc = subprocess.run(['salt-call', 'state.apply', 'firewall', 'queue=True'])
|
proc = subprocess.run(['salt-call', 'state.apply', 'firewall', 'queue=True'])
|
||||||
return proc.returncode
|
return proc.returncode
|
||||||
|
|
||||||
@@ -356,7 +357,8 @@ def main():
|
|||||||
"addport": addport,
|
"addport": addport,
|
||||||
"removeport": removeport,
|
"removeport": removeport,
|
||||||
"addhostgroup": addhostgroup,
|
"addhostgroup": addhostgroup,
|
||||||
"addportgroup": addportgroup
|
"addportgroup": addportgroup,
|
||||||
|
"apply": apply
|
||||||
}
|
}
|
||||||
|
|
||||||
code=1
|
code=1
|
||||||
|
|||||||
@@ -41,10 +41,7 @@ if [[ $? == 0 ]]; then
|
|||||||
fi
|
fi
|
||||||
read -rs FLEET_PASS
|
read -rs FLEET_PASS
|
||||||
|
|
||||||
if ! check_password "$FLEET_PASS"; then
|
check_password_and_exit "$FLEET_PASS"
|
||||||
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
|
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
|
||||||
if [[ $? -ne 0 ]]; then
|
if [[ $? -ne 0 ]]; then
|
||||||
|
|||||||
75
salt/common/tools/sbin/so-fleet-user-update
Executable file
75
salt/common/tools/sbin/so-fleet-user-update
Executable file
@@ -0,0 +1,75 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo "Usage: $0 <user-name>"
|
||||||
|
echo ""
|
||||||
|
echo "Update password for an existing Fleet user. The new password will be read from STDIN."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ $# -ne 1 ]; then
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
USER=$1
|
||||||
|
|
||||||
|
MYSQL_PASS=$(lookup_pillar_secret mysql)
|
||||||
|
FLEET_IP=$(lookup_pillar fleet_ip)
|
||||||
|
FLEET_USER=$USER
|
||||||
|
|
||||||
|
# test existence of user
|
||||||
|
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
|
||||||
|
"SELECT count(1) FROM users WHERE username='$FLEET_USER'" 2>/dev/null | tail -1)
|
||||||
|
if [[ $? -ne 0 ]] || [[ $MYSQL_OUTPUT -ne 1 ]] ; then
|
||||||
|
echo "Test for username [${FLEET_USER}] failed"
|
||||||
|
echo " expect 1 hit in users database, return $MYSQL_OUTPUT hit(s)."
|
||||||
|
echo "Unable to update Fleet user password."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Read password for new user from stdin
|
||||||
|
test -t 0
|
||||||
|
if [[ $? == 0 ]]; then
|
||||||
|
echo "Enter new password:"
|
||||||
|
fi
|
||||||
|
read -rs FLEET_PASS
|
||||||
|
|
||||||
|
if ! check_password "$FLEET_PASS"; then
|
||||||
|
echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
|
||||||
|
if [[ $? -ne 0 ]]; then
|
||||||
|
echo "Failed to generate Fleet password hash"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
|
||||||
|
"UPDATE users SET password='$FLEET_HASH', salt='' where username='$FLEET_USER'" 2>&1)
|
||||||
|
|
||||||
|
if [[ $? -eq 0 ]]; then
|
||||||
|
echo "Successfully updated Fleet user password"
|
||||||
|
else
|
||||||
|
echo "Unable to update Fleet user password"
|
||||||
|
echo "$MYSQL_OUTPUT"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
17
salt/common/tools/sbin/so-grafana-dashboard-folder-delete
Executable file
17
salt/common/tools/sbin/so-grafana-dashboard-folder-delete
Executable file
@@ -0,0 +1,17 @@
|
|||||||
|
# this script is used to delete the default Grafana dashboard folders that existed prior to Grafana dashboard and Salt management changes in 2.3.70
|
||||||
|
|
||||||
|
folders=$(curl -X GET http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders | jq -r '.[] | @base64')
|
||||||
|
delfolder=("Manager" "Manager Search" "Sensor Nodes" "Search Nodes" "Standalone" "Eval Mode")
|
||||||
|
|
||||||
|
for row in $folders; do
|
||||||
|
title=$(echo ${row} | base64 --decode | jq -r '.title')
|
||||||
|
uid=$(echo ${row} | base64 --decode | jq -r '.uid')
|
||||||
|
|
||||||
|
if [[ " ${delfolder[@]} " =~ " ${title} " ]]; then
|
||||||
|
curl -X DELETE http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders/$uid
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "so-grafana-dashboard-folder-delete has been run to delete default Grafana dashboard folders that existed prior to 2.3.70" > /opt/so/state/so-grafana-dashboard-folder-delete-complete
|
||||||
|
|
||||||
|
exit 0
|
||||||
@@ -17,6 +17,7 @@
|
|||||||
|
|
||||||
# NOTE: This script depends on so-common
|
# NOTE: This script depends on so-common
|
||||||
IMAGEREPO=security-onion-solutions
|
IMAGEREPO=security-onion-solutions
|
||||||
|
STATUS_CONF='/opt/so/conf/so-status/so-status.conf'
|
||||||
|
|
||||||
# shellcheck disable=SC2120
|
# shellcheck disable=SC2120
|
||||||
container_list() {
|
container_list() {
|
||||||
@@ -137,6 +138,11 @@ update_docker_containers() {
|
|||||||
if [[ $result -eq 0 ]]; then
|
if [[ $result -eq 0 ]]; then
|
||||||
cat $SIGNPATH/KEYS | gpg --import - >> "$LOG_FILE" 2>&1
|
cat $SIGNPATH/KEYS | gpg --import - >> "$LOG_FILE" 2>&1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# If downloading for soup, check if any optional images need to be pulled
|
||||||
|
if [[ $CURLTYPE == 'soup' ]]; then
|
||||||
|
grep -q "so-logscan" "$STATUS_CONF" && TRUSTED_CONTAINERS+=("so-logscan")
|
||||||
|
fi
|
||||||
|
|
||||||
# Download the containers from the interwebs
|
# Download the containers from the interwebs
|
||||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||||
|
|||||||
58
salt/common/tools/sbin/so-image-pull
Executable file
58
salt/common/tools/sbin/so-image-pull
Executable file
@@ -0,0 +1,58 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
. /usr/sbin/so-image-common
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
read -r -d '' message <<- EOM
|
||||||
|
usage: so-image-pull [-h] IMAGE [IMAGE ...]
|
||||||
|
|
||||||
|
positional arguments:
|
||||||
|
IMAGE One or more 'so-' prefixed images to download and verify.
|
||||||
|
|
||||||
|
optional arguments:
|
||||||
|
-h, --help Show this help message and exit.
|
||||||
|
EOM
|
||||||
|
echo "$message"
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
for arg; do
|
||||||
|
shift
|
||||||
|
[[ "$arg" = "--quiet" || "$arg" = "-q" ]] && quiet=true && continue
|
||||||
|
set -- "$@" "$arg"
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ $# -eq 0 || $# -gt 1 ]] || [[ $1 == '-h' || $1 == '--help' ]]; then
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
TRUSTED_CONTAINERS=("$@")
|
||||||
|
set_version
|
||||||
|
|
||||||
|
for image in "${TRUSTED_CONTAINERS[@]}"; do
|
||||||
|
if ! docker images | grep "$image" | grep ":5000" | grep -q "$VERSION"; then
|
||||||
|
if [[ $quiet == true ]]; then
|
||||||
|
update_docker_containers "$image" "" "" "/dev/null"
|
||||||
|
else
|
||||||
|
update_docker_containers "$image" "" "" ""
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "$image:$VERSION image exists."
|
||||||
|
fi
|
||||||
|
done
|
||||||
172
salt/common/tools/sbin/so-import-evtx
Normal file
172
salt/common/tools/sbin/so-import-evtx
Normal file
@@ -0,0 +1,172 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
{%- set MANAGER = salt['grains.get']('master') %}
|
||||||
|
{%- set VERSION = salt['pillar.get']('global:soversion') %}
|
||||||
|
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||||
|
{%- set MANAGERIP = salt['pillar.get']('global:managerip') -%}
|
||||||
|
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
|
||||||
|
{% set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||||
|
{% set ES_PW = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||||
|
|
||||||
|
INDEX_DATE=$(date +'%Y.%m.%d')
|
||||||
|
RUNID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
function usage {
|
||||||
|
cat << EOF
|
||||||
|
Usage: $0 <evtx-file-1> [evtx-file-2] [evtx-file-*]
|
||||||
|
|
||||||
|
Imports one or more evtx files into Security Onion. The evtx files will be analyzed and made available for review in the Security Onion toolset.
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
function evtx2es() {
|
||||||
|
EVTX=$1
|
||||||
|
HASH=$2
|
||||||
|
|
||||||
|
docker run --rm \
|
||||||
|
-v "$EVTX:/tmp/$RUNID.evtx" \
|
||||||
|
--entrypoint evtx2es \
|
||||||
|
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} \
|
||||||
|
--host {{ MANAGERIP }} --scheme https \
|
||||||
|
--index so-beats-$INDEX_DATE --pipeline import.wel \
|
||||||
|
--login {{ES_USER}} --pwd {{ES_PW}} \
|
||||||
|
"/tmp/$RUNID.evtx" 1>/dev/null 2>/dev/null
|
||||||
|
|
||||||
|
docker run --rm \
|
||||||
|
-v "$EVTX:/tmp/import.evtx" \
|
||||||
|
-v "/nsm/import/evtx-end_newest:/tmp/newest" \
|
||||||
|
-v "/nsm/import/evtx-start_oldest:/tmp/oldest" \
|
||||||
|
--entrypoint '/evtx_calc_timestamps.sh' \
|
||||||
|
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }}
|
||||||
|
}
|
||||||
|
|
||||||
|
# if no parameters supplied, display usage
|
||||||
|
if [ $# -eq 0 ]; then
|
||||||
|
usage
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# ensure this is a Manager node
|
||||||
|
require_manager
|
||||||
|
|
||||||
|
# verify that all parameters are files
|
||||||
|
for i in "$@"; do
|
||||||
|
if ! [ -f "$i" ]; then
|
||||||
|
usage
|
||||||
|
echo "\"$i\" is not a valid file!"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
# track if we have any valid or invalid evtx
|
||||||
|
INVALID_EVTXS="no"
|
||||||
|
VALID_EVTXS="no"
|
||||||
|
|
||||||
|
# track oldest start and newest end so that we can generate the Kibana search hyperlink at the end
|
||||||
|
START_OLDEST="2050-12-31"
|
||||||
|
END_NEWEST="1971-01-01"
|
||||||
|
|
||||||
|
touch /nsm/import/evtx-start_oldest
|
||||||
|
touch /nsm/import/evtx-end_newest
|
||||||
|
|
||||||
|
echo $START_OLDEST > /nsm/import/evtx-start_oldest
|
||||||
|
echo $END_NEWEST > /nsm/import/evtx-end_newest
|
||||||
|
|
||||||
|
# paths must be quoted in case they include spaces
|
||||||
|
for EVTX in "$@"; do
|
||||||
|
EVTX=$(/usr/bin/realpath "$EVTX")
|
||||||
|
echo "Processing Import: ${EVTX}"
|
||||||
|
|
||||||
|
# generate a unique hash to assist with dedupe checks
|
||||||
|
HASH=$(md5sum "${EVTX}" | awk '{ print $1 }')
|
||||||
|
HASH_DIR=/nsm/import/${HASH}
|
||||||
|
echo "- assigning unique identifier to import: $HASH"
|
||||||
|
|
||||||
|
if [ -d $HASH_DIR ]; then
|
||||||
|
echo "- this EVTX has already been imported; skipping"
|
||||||
|
INVALID_EVTXS="yes"
|
||||||
|
else
|
||||||
|
VALID_EVTXS="yes"
|
||||||
|
|
||||||
|
EVTX_DIR=$HASH_DIR/evtx
|
||||||
|
mkdir -p $EVTX_DIR
|
||||||
|
|
||||||
|
# import evtx and write them to import ingest pipeline
|
||||||
|
echo "- importing logs to Elasticsearch..."
|
||||||
|
evtx2es "${EVTX}" $HASH
|
||||||
|
|
||||||
|
# compare $START to $START_OLDEST
|
||||||
|
START=$(cat /nsm/import/evtx-start_oldest)
|
||||||
|
START_COMPARE=$(date -d $START +%s)
|
||||||
|
START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s)
|
||||||
|
if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then
|
||||||
|
START_OLDEST=$START
|
||||||
|
fi
|
||||||
|
|
||||||
|
# compare $ENDNEXT to $END_NEWEST
|
||||||
|
END=$(cat /nsm/import/evtx-end_newest)
|
||||||
|
ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"`
|
||||||
|
ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s)
|
||||||
|
END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s)
|
||||||
|
if [ $ENDNEXT_COMPARE -gt $END_NEWEST_COMPARE ]; then
|
||||||
|
END_NEWEST=$ENDNEXT
|
||||||
|
fi
|
||||||
|
|
||||||
|
cp -f "${EVTX}" "${EVTX_DIR}"/data.evtx
|
||||||
|
chmod 644 "${EVTX_DIR}"/data.evtx
|
||||||
|
|
||||||
|
fi # end of valid evtx
|
||||||
|
|
||||||
|
echo
|
||||||
|
|
||||||
|
done # end of for-loop processing evtx files
|
||||||
|
|
||||||
|
# remove temp files
|
||||||
|
echo "Cleaning up:"
|
||||||
|
for TEMP_EVTX in ${TEMP_EVTXS[@]}; do
|
||||||
|
echo "- removing temporary evtx $TEMP_EVTX"
|
||||||
|
rm -f $TEMP_EVTX
|
||||||
|
done
|
||||||
|
|
||||||
|
# output final messages
|
||||||
|
if [ "$INVALID_EVTXS" = "yes" ]; then
|
||||||
|
echo
|
||||||
|
echo "Please note! One or more evtx was invalid! You can scroll up to see which ones were invalid."
|
||||||
|
fi
|
||||||
|
|
||||||
|
START_OLDEST_FORMATTED=`date +%Y-%m-%d --date="$START_OLDEST"`
|
||||||
|
START_OLDEST_SLASH=$(echo $START_OLDEST_FORMATTED | sed -e 's/-/%2F/g')
|
||||||
|
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||||
|
|
||||||
|
if [ "$VALID_EVTXS" = "yes" ]; then
|
||||||
|
cat << EOF
|
||||||
|
|
||||||
|
Import complete!
|
||||||
|
|
||||||
|
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
|
||||||
|
https://{{ URLBASE }}/#/hunt?q=import.id:${RUNID}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC
|
||||||
|
|
||||||
|
or you can manually set your Time Range to be (in UTC):
|
||||||
|
From: $START_OLDEST_FORMATTED To: $END_NEWEST
|
||||||
|
|
||||||
|
Please note that it may take 30 seconds or more for events to appear in Hunt.
|
||||||
|
EOF
|
||||||
|
fi
|
||||||
0
salt/common/tools/sbin/so-influxdb-drop-autogen
Normal file → Executable file
0
salt/common/tools/sbin/so-influxdb-drop-autogen
Normal file → Executable file
303
salt/common/tools/sbin/so-learn
Executable file
303
salt/common/tools/sbin/so-learn
Executable file
@@ -0,0 +1,303 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
from itertools import chain
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
import signal
|
||||||
|
import sys
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
import argparse
|
||||||
|
import textwrap
|
||||||
|
import yaml
|
||||||
|
import multiprocessing
|
||||||
|
import docker
|
||||||
|
import pty
|
||||||
|
|
||||||
|
minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
|
||||||
|
so_status_conf = '/opt/so/conf/so-status/so-status.conf'
|
||||||
|
proc: subprocess.CompletedProcess = None
|
||||||
|
|
||||||
|
# Temp store of modules, will likely be broken out into salt
|
||||||
|
def get_learn_modules():
|
||||||
|
return {
|
||||||
|
'logscan': { 'cpu_period': get_cpu_period(fraction=0.25), 'enabled': False, 'description': 'Scan log files against pre-trained models to alert on anomalies.' }
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def get_cpu_period(fraction: float):
|
||||||
|
multiplier = 10000
|
||||||
|
|
||||||
|
num_cores = multiprocessing.cpu_count()
|
||||||
|
if num_cores <= 2:
|
||||||
|
fraction = 1.
|
||||||
|
|
||||||
|
num_used_cores = int(num_cores * fraction)
|
||||||
|
cpu_period = num_used_cores * multiplier
|
||||||
|
return cpu_period
|
||||||
|
|
||||||
|
|
||||||
|
def sigint_handler(*_):
|
||||||
|
print('Exiting gracefully on Ctrl-C')
|
||||||
|
if proc is not None: proc.send_signal(signal.SIGINT)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def find_minion_pillar() -> str:
|
||||||
|
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
|
||||||
|
|
||||||
|
result = []
|
||||||
|
for root, _, files in os.walk(minion_pillar_dir):
|
||||||
|
for f_minion_id in files:
|
||||||
|
if re.search(regex, f_minion_id):
|
||||||
|
result.append(os.path.join(root, f_minion_id))
|
||||||
|
|
||||||
|
if len(result) == 0:
|
||||||
|
print('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?', file=sys.stderr)
|
||||||
|
sys.exit(3)
|
||||||
|
elif len(result) > 1:
|
||||||
|
res_str = ', '.join(f'\"{result}\"')
|
||||||
|
print('(This should not happen, the system is in an error state if you see this message.)\n', file=sys.stderr)
|
||||||
|
print('More than one manager-type pillar exists, minion id\'s listed below:', file=sys.stderr)
|
||||||
|
print(f' {res_str}', file=sys.stderr)
|
||||||
|
sys.exit(3)
|
||||||
|
else:
|
||||||
|
return result[0]
|
||||||
|
|
||||||
|
|
||||||
|
def read_pillar(pillar: str):
|
||||||
|
try:
|
||||||
|
with open(pillar, 'r') as pillar_file:
|
||||||
|
loaded_yaml = yaml.safe_load(pillar_file.read())
|
||||||
|
if loaded_yaml is None:
|
||||||
|
print(f'Could not parse {pillar}', file=sys.stderr)
|
||||||
|
sys.exit(3)
|
||||||
|
return loaded_yaml
|
||||||
|
except:
|
||||||
|
print(f'Could not open {pillar}', file=sys.stderr)
|
||||||
|
sys.exit(3)
|
||||||
|
|
||||||
|
|
||||||
|
def write_pillar(pillar: str, content: dict):
|
||||||
|
try:
|
||||||
|
with open(pillar, 'w') as pillar_file:
|
||||||
|
yaml.dump(content, pillar_file, default_flow_style=False)
|
||||||
|
except:
|
||||||
|
print(f'Could not open {pillar}', file=sys.stderr)
|
||||||
|
sys.exit(3)
|
||||||
|
|
||||||
|
|
||||||
|
def mod_so_status(action: str, item: str):
|
||||||
|
with open(so_status_conf, 'a+') as conf:
|
||||||
|
conf.seek(0)
|
||||||
|
containers = conf.readlines()
|
||||||
|
|
||||||
|
if f'so-{item}\n' in containers:
|
||||||
|
if action == 'remove': containers.remove(f'so-{item}\n')
|
||||||
|
if action == 'add': pass
|
||||||
|
else:
|
||||||
|
if action == 'remove': pass
|
||||||
|
if action == 'add': containers.append(f'so-{item}\n')
|
||||||
|
|
||||||
|
[containers.remove(c_name) for c_name in containers if c_name == '\n'] # remove extra newlines
|
||||||
|
|
||||||
|
conf.seek(0)
|
||||||
|
conf.truncate(0)
|
||||||
|
conf.writelines(containers)
|
||||||
|
|
||||||
|
|
||||||
|
def create_pillar_if_not_exist(pillar:str, content: dict):
|
||||||
|
pillar_dict = content
|
||||||
|
|
||||||
|
if pillar_dict.get('learn', {}).get('modules') is None:
|
||||||
|
pillar_dict['learn'] = {}
|
||||||
|
pillar_dict['learn']['modules'] = get_learn_modules()
|
||||||
|
content.update()
|
||||||
|
write_pillar(pillar, content)
|
||||||
|
|
||||||
|
return content
|
||||||
|
|
||||||
|
|
||||||
|
def salt_call(module: str):
|
||||||
|
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', f'learn.{module}', 'queue=True']
|
||||||
|
|
||||||
|
print(f' Applying salt state for {module} module...')
|
||||||
|
proc = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||||
|
return_code = proc.returncode
|
||||||
|
if return_code != 0:
|
||||||
|
print(f' [ERROR] Failed to apply salt state for {module} module.')
|
||||||
|
|
||||||
|
return return_code
|
||||||
|
|
||||||
|
|
||||||
|
def pull_image(module: str):
|
||||||
|
container_basename = f'so-{module}'
|
||||||
|
|
||||||
|
client = docker.from_env()
|
||||||
|
image_list = client.images.list(filters={ 'dangling': False })
|
||||||
|
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
|
||||||
|
basename_match = list(filter(lambda x: f'{container_basename}' in x, tag_list))
|
||||||
|
local_registry_match = list(filter(lambda x: ':5000' in x, basename_match))
|
||||||
|
|
||||||
|
if len(local_registry_match) == 0:
|
||||||
|
print(f'Pulling and verifying missing image for {module} (may take several minutes) ...')
|
||||||
|
pull_command = ['so-image-pull', '--quiet', container_basename]
|
||||||
|
|
||||||
|
proc = subprocess.run(pull_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||||
|
return_code = proc.returncode
|
||||||
|
if return_code != 0:
|
||||||
|
print(f'[ERROR] Failed to pull image so-{module}, skipping state.')
|
||||||
|
else:
|
||||||
|
return_code = 0
|
||||||
|
return return_code
|
||||||
|
|
||||||
|
|
||||||
|
def apply(module_list: List):
|
||||||
|
return_code = 0
|
||||||
|
for module in module_list:
|
||||||
|
salt_ret = salt_call(module)
|
||||||
|
# Only update return_code if the command returned a non-zero return
|
||||||
|
if salt_ret != 0:
|
||||||
|
return_code = salt_ret
|
||||||
|
|
||||||
|
return return_code
|
||||||
|
|
||||||
|
|
||||||
|
def check_apply(args: dict):
|
||||||
|
if args.apply:
|
||||||
|
print('Configuration updated. Applying changes:')
|
||||||
|
return apply(args.modules)
|
||||||
|
else:
|
||||||
|
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
|
||||||
|
answer = input(message)
|
||||||
|
while answer.lower() not in [ 'y', 'n', '' ]:
|
||||||
|
answer = input(message)
|
||||||
|
if answer.lower() in [ 'n', '' ]:
|
||||||
|
return 0
|
||||||
|
else:
|
||||||
|
print('Applying changes:')
|
||||||
|
return apply(args.modules)
|
||||||
|
|
||||||
|
|
||||||
|
def enable_disable_modules(args, enable: bool):
|
||||||
|
pillar_modules = args.pillar_dict.get('learn', {}).get('modules')
|
||||||
|
pillar_mod_names = args.pillar_dict.get('learn', {}).get('modules').keys()
|
||||||
|
|
||||||
|
action_str = 'add' if enable else 'remove'
|
||||||
|
|
||||||
|
if 'all' in args.modules:
|
||||||
|
for module, details in pillar_modules.items():
|
||||||
|
details['enabled'] = enable
|
||||||
|
mod_so_status(action_str, module)
|
||||||
|
if enable: pull_image(module)
|
||||||
|
args.pillar_dict.update()
|
||||||
|
write_pillar(args.pillar, args.pillar_dict)
|
||||||
|
else:
|
||||||
|
write_needed = False
|
||||||
|
for module in args.modules:
|
||||||
|
if module in pillar_mod_names:
|
||||||
|
if pillar_modules[module]['enabled'] == enable:
|
||||||
|
state_str = 'enabled' if enable else 'disabled'
|
||||||
|
print(f'{module} module already {state_str}.', file=sys.stderr)
|
||||||
|
else:
|
||||||
|
if enable and pull_image(module) != 0:
|
||||||
|
continue
|
||||||
|
pillar_modules[module]['enabled'] = enable
|
||||||
|
mod_so_status(action_str, module)
|
||||||
|
write_needed = True
|
||||||
|
if write_needed:
|
||||||
|
args.pillar_dict.update()
|
||||||
|
write_pillar(args.pillar, args.pillar_dict)
|
||||||
|
|
||||||
|
cmd_ret = check_apply(args)
|
||||||
|
return cmd_ret
|
||||||
|
|
||||||
|
|
||||||
|
def enable_modules(args):
|
||||||
|
enable_disable_modules(args, enable=True)
|
||||||
|
|
||||||
|
|
||||||
|
def disable_modules(args):
|
||||||
|
enable_disable_modules(args, enable=False)
|
||||||
|
|
||||||
|
|
||||||
|
def list_modules(*_):
|
||||||
|
print('Available ML modules:')
|
||||||
|
for module, details in get_learn_modules().items():
|
||||||
|
print(f' - { module } : {details["description"]}')
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
beta_str = 'BETA - SUBJECT TO CHANGE\n'
|
||||||
|
|
||||||
|
apply_help='After ACTION the chosen modules, apply any necessary salt states.'
|
||||||
|
enable_apply_help = apply_help.replace('ACTION', 'enabling')
|
||||||
|
disable_apply_help = apply_help.replace('ACTION', 'disabling')
|
||||||
|
|
||||||
|
signal.signal(signal.SIGINT, sigint_handler)
|
||||||
|
|
||||||
|
if os.geteuid() != 0:
|
||||||
|
print('You must run this script as root', file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||||
|
|
||||||
|
subcommand_desc = textwrap.dedent(
|
||||||
|
"""\
|
||||||
|
enable Enable one or more ML modules.
|
||||||
|
disable Disable one or more ML modules.
|
||||||
|
list List all available ML modules.
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
|
||||||
|
|
||||||
|
module_help_str = 'One or more ML modules, which can be listed using \'so-learn list\'. Use the keyword \'all\' to apply the action to all available modules.'
|
||||||
|
|
||||||
|
enable = subparsers.add_parser('enable')
|
||||||
|
enable.set_defaults(func=enable_modules)
|
||||||
|
enable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
|
||||||
|
enable.add_argument('--apply', action='store_const', const=True, required=False, help=enable_apply_help)
|
||||||
|
|
||||||
|
disable = subparsers.add_parser('disable')
|
||||||
|
disable.set_defaults(func=disable_modules)
|
||||||
|
disable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
|
||||||
|
disable.add_argument('--apply', action='store_const', const=True, required=False, help=disable_apply_help)
|
||||||
|
|
||||||
|
list = subparsers.add_parser('list')
|
||||||
|
list.set_defaults(func=list_modules)
|
||||||
|
|
||||||
|
args = main_parser.parse_args(sys.argv[1:])
|
||||||
|
args.pillar = find_minion_pillar()
|
||||||
|
args.pillar_dict = create_pillar_if_not_exist(args.pillar, read_pillar(args.pillar))
|
||||||
|
|
||||||
|
if hasattr(args, 'func'):
|
||||||
|
exit_code = args.func(args)
|
||||||
|
else:
|
||||||
|
if args.command is None:
|
||||||
|
print(beta_str)
|
||||||
|
main_parser.print_help()
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
sys.exit(exit_code)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
22
salt/common/tools/sbin/so-playbook-import
Normal file
22
salt/common/tools/sbin/so-playbook-import
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
ENABLEPLAY=${1:-False}
|
||||||
|
|
||||||
|
docker exec so-soctopus /usr/local/bin/python -c "import playbook; print(playbook.play_import($ENABLEPLAY))"
|
||||||
@@ -17,53 +17,101 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
check_lsi_raid() {
|
appliance_check() {
|
||||||
# For use for LSI on Ubuntu
|
{%- if salt['grains.get']('sosmodel', '') %}
|
||||||
#MEGA=/opt/MegaRAID/MegeCli/MegaCli64
|
APPLIANCE=1
|
||||||
#LSIRC=$($MEGA -LDInfo -Lall -aALL | grep Optimal)
|
{%- if grains['sosmodel'] in ['SO2AMI01', 'SO2GCI01', 'SO2AZI01'] %}
|
||||||
# Open Source Centos
|
exit 0
|
||||||
MEGA=/opt/mega/megasasctl
|
{%- endif %}
|
||||||
LSIRC=$($MEGA | grep optimal)
|
DUDEYOUGOTADELL=$(dmidecode |grep Dell)
|
||||||
|
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||||
|
APPTYPE=dell
|
||||||
|
else
|
||||||
|
APPTYPE=sm
|
||||||
|
fi
|
||||||
|
mkdir -p /opt/so/log/raid
|
||||||
|
|
||||||
if [[ $LSIRC ]]; then
|
{%- else %}
|
||||||
# Raid is good
|
echo "This is not an appliance"
|
||||||
LSIRAID=0
|
exit 0
|
||||||
|
{%- endif %}
|
||||||
|
}
|
||||||
|
|
||||||
|
check_nsm_raid() {
|
||||||
|
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
|
||||||
|
MEGACTL=$(/opt/raidtools/megasasctl |grep optimal)
|
||||||
|
|
||||||
|
if [[ $APPLIANCE == '1' ]]; then
|
||||||
|
if [[ -n $PERCCLI ]]; then
|
||||||
|
HWRAID=0
|
||||||
|
elif [[ -n $MEGACTL ]]; then
|
||||||
|
HWRAID=0
|
||||||
else
|
else
|
||||||
LSIRAID=1
|
HWRAID=1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
check_boss_raid() {
|
||||||
|
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
|
||||||
|
|
||||||
|
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||||
|
if [[ -n $MVCLI ]]; then
|
||||||
|
BOSSRAID=0
|
||||||
|
else
|
||||||
|
BOSSRAID=1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
check_software_raid() {
|
check_software_raid() {
|
||||||
|
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||||
SWRC=$(grep "_" /proc/mdstat)
|
SWRC=$(grep "_" /proc/mdstat)
|
||||||
|
|
||||||
if [[ $SWRC ]]; then
|
if [[ -n $SWRC ]]; then
|
||||||
# RAID is failed in some way
|
# RAID is failed in some way
|
||||||
SWRAID=1
|
SWRAID=1
|
||||||
else
|
else
|
||||||
SWRAID=0
|
SWRAID=0
|
||||||
fi
|
fi
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
# This script checks raid status if you use SO appliances
|
# This script checks raid status if you use SO appliances
|
||||||
|
|
||||||
# See if this is an appliance
|
# See if this is an appliance
|
||||||
|
|
||||||
|
appliance_check
|
||||||
|
check_nsm_raid
|
||||||
|
check_boss_raid
|
||||||
{%- if salt['grains.get']('sosmodel', '') %}
|
{%- if salt['grains.get']('sosmodel', '') %}
|
||||||
mkdir -p /opt/so/log/raid
|
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
|
||||||
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
|
|
||||||
#check_boss_raid
|
|
||||||
check_software_raid
|
check_software_raid
|
||||||
echo "nsmraid=$SWRAID" > /opt/so/log/raid/status.log
|
{%- endif %}
|
||||||
{%- elif grains['sosmodel'] in ['SOS1000F', 'SOS1000', 'SOSSN7200', 'SOS10K', 'SOS4000'] %}
|
|
||||||
#check_boss_raid
|
|
||||||
check_lsi_raid
|
|
||||||
echo "nsmraid=$LSIRAID" > /opt/so/log/raid/status.log
|
|
||||||
{%- else %}
|
|
||||||
exit 0
|
|
||||||
{%- endif %}
|
|
||||||
{%- else %}
|
|
||||||
exit 0
|
|
||||||
{%- endif %}
|
{%- endif %}
|
||||||
|
|
||||||
|
if [[ -n $SWRAID ]]; then
|
||||||
|
if [[ $SWRAID == '0' && $BOSSRAID == '0' ]]; then
|
||||||
|
RAIDSTATUS=0
|
||||||
|
else
|
||||||
|
RAIDSTATUS=1
|
||||||
|
fi
|
||||||
|
elif [[ -n $DUDEYOUGOTADELL ]]; then
|
||||||
|
if [[ $BOSSRAID == '0' && $HWRAID == '0' ]]; then
|
||||||
|
RAIDSTATUS=0
|
||||||
|
else
|
||||||
|
RAIDSTATUS=1
|
||||||
|
fi
|
||||||
|
elif [[ "$APPTYPE" == 'sm' ]]; then
|
||||||
|
if [[ -n "$HWRAID" ]]; then
|
||||||
|
RAIDSTATUS=0
|
||||||
|
else
|
||||||
|
RAIDSTATUS=1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "nsmraid=$RAIDSTATUS" > /opt/so/log/raid/status.log
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,13 +1,10 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
got_root() {
|
|
||||||
|
|
||||||
# Make sure you are root
|
. /usr/sbin/so-common
|
||||||
if [ "$(id -u)" -ne 0 ]; then
|
|
||||||
echo "This script must be run using sudo!"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
}
|
argstr=""
|
||||||
|
for arg in "$@"; do
|
||||||
|
argstr="${argstr} \"${arg}\""
|
||||||
|
done
|
||||||
|
|
||||||
got_root
|
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"
|
||||||
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat $1"
|
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ if [[ $# -lt 1 ]]; then
|
|||||||
echo "Usage: $0 <pcap-sample(s)>"
|
echo "Usage: $0 <pcap-sample(s)>"
|
||||||
echo
|
echo
|
||||||
echo "All PCAPs must be placed in the /opt/so/samples directory unless replaying"
|
echo "All PCAPs must be placed in the /opt/so/samples directory unless replaying"
|
||||||
echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP sampes"
|
echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP samples"
|
||||||
echo "are located in the /opt/samples directory inside of the image."
|
echo "are located in the /opt/samples directory inside of the image."
|
||||||
echo
|
echo
|
||||||
echo "Customer provided PCAP example:"
|
echo "Customer provided PCAP example:"
|
||||||
|
|||||||
@@ -41,10 +41,7 @@ if [[ $? == 0 ]]; then
|
|||||||
fi
|
fi
|
||||||
read -rs THEHIVE_PASS
|
read -rs THEHIVE_PASS
|
||||||
|
|
||||||
if ! check_password "$THEHIVE_PASS"; then
|
check_password_and_exit "$THEHIVE_PASS"
|
||||||
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Create new user in TheHive
|
# Create new user in TheHive
|
||||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
|
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
|
||||||
|
|||||||
57
salt/common/tools/sbin/so-thehive-user-update
Executable file
57
salt/common/tools/sbin/so-thehive-user-update
Executable file
@@ -0,0 +1,57 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
|
#
|
||||||
|
# This program is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo "Usage: $0 <user-name>"
|
||||||
|
echo ""
|
||||||
|
echo "Update password for an existing TheHive user. The new password will be read from STDIN."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ $# -ne 1 ]; then
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
USER=$1
|
||||||
|
|
||||||
|
THEHIVE_KEY=$(lookup_pillar hivekey)
|
||||||
|
THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api"
|
||||||
|
THEHIVE_USER=$USER
|
||||||
|
|
||||||
|
# Read password for new user from stdin
|
||||||
|
test -t 0
|
||||||
|
if [[ $? == 0 ]]; then
|
||||||
|
echo "Enter new password:"
|
||||||
|
fi
|
||||||
|
read -rs THEHIVE_PASS
|
||||||
|
|
||||||
|
if ! check_password "$THEHIVE_PASS"; then
|
||||||
|
echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password."
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Change password for user in TheHive
|
||||||
|
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user/${THEHIVE_USER}/password/set" -d "{\"password\" : \"$THEHIVE_PASS\"}")
|
||||||
|
if [[ -z "$resp" ]]; then
|
||||||
|
echo "Successfully updated TheHive user password"
|
||||||
|
else
|
||||||
|
echo "Unable to update TheHive user password"
|
||||||
|
echo $resp
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
@@ -18,11 +18,17 @@
|
|||||||
|
|
||||||
source $(dirname $0)/so-common
|
source $(dirname $0)/so-common
|
||||||
|
|
||||||
if [[ $# -lt 1 || $# -gt 2 ]]; then
|
DEFAULT_ROLE=analyst
|
||||||
echo "Usage: $0 <list|add|update|enable|disable|validate|valemail|valpass> [email]"
|
|
||||||
|
if [[ $# -lt 1 || $# -gt 3 ]]; then
|
||||||
|
echo "Usage: $0 <operation> [email] [role]"
|
||||||
|
echo ""
|
||||||
|
echo " where <operation> is one of the following:"
|
||||||
echo ""
|
echo ""
|
||||||
echo " list: Lists all user email addresses currently defined in the identity system"
|
echo " list: Lists all user email addresses currently defined in the identity system"
|
||||||
echo " add: Adds a new user to the identity system; requires 'email' parameter"
|
echo " add: Adds a new user to the identity system; requires 'email' parameter, while 'role' parameter is optional and defaults to $DEFAULT_ROLE"
|
||||||
|
echo " addrole: Grants a role to an existing user; requires 'email' and 'role' parameters"
|
||||||
|
echo " delrole: Removes a role from an existing user; requires 'email' and 'role' parameters"
|
||||||
echo " update: Updates a user's password; requires 'email' parameter"
|
echo " update: Updates a user's password; requires 'email' parameter"
|
||||||
echo " enable: Enables a user; requires 'email' parameter"
|
echo " enable: Enables a user; requires 'email' parameter"
|
||||||
echo " disable: Disables a user; requires 'email' parameter"
|
echo " disable: Disables a user; requires 'email' parameter"
|
||||||
@@ -36,14 +42,18 @@ fi
|
|||||||
|
|
||||||
operation=$1
|
operation=$1
|
||||||
email=$2
|
email=$2
|
||||||
|
role=$3
|
||||||
|
|
||||||
kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434}
|
kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434}
|
||||||
databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite}
|
databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite}
|
||||||
bcryptRounds=${BCRYPT_ROUNDS:-12}
|
bcryptRounds=${BCRYPT_ROUNDS:-12}
|
||||||
elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
|
elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
|
||||||
elasticRolesFile=${ELASTIC_ROLES_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users_roles}
|
elasticRolesFile=${ELASTIC_ROLES_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users_roles}
|
||||||
|
socRolesFile=${SOC_ROLES_FILE:-/opt/so/conf/soc/soc_users_roles}
|
||||||
esUID=${ELASTIC_UID:-930}
|
esUID=${ELASTIC_UID:-930}
|
||||||
esGID=${ELASTIC_GID:-930}
|
esGID=${ELASTIC_GID:-930}
|
||||||
|
soUID=${SOCORE_UID:-939}
|
||||||
|
soGID=${SOCORE_GID:-939}
|
||||||
|
|
||||||
function lock() {
|
function lock() {
|
||||||
# Obtain file descriptor lock
|
# Obtain file descriptor lock
|
||||||
@@ -80,7 +90,7 @@ function findIdByEmail() {
|
|||||||
email=$1
|
email=$1
|
||||||
|
|
||||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||||
identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
|
identityId=$(echo "${response}" | jq -r ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
|
||||||
echo $identityId
|
echo $identityId
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,17 +99,20 @@ function validatePassword() {
|
|||||||
|
|
||||||
len=$(expr length "$password")
|
len=$(expr length "$password")
|
||||||
if [[ $len -lt 6 ]]; then
|
if [[ $len -lt 6 ]]; then
|
||||||
echo "Password does not meet the minimum requirements"
|
fail "Password does not meet the minimum requirements"
|
||||||
exit 2
|
|
||||||
fi
|
fi
|
||||||
|
check_password_and_exit "$password"
|
||||||
}
|
}
|
||||||
|
|
||||||
function validateEmail() {
|
function validateEmail() {
|
||||||
email=$1
|
email=$1
|
||||||
# (?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])
|
# (?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])
|
||||||
if [[ ! "$email" =~ ^[[:alnum:]._%+-]+@[[:alnum:].-]+\.[[:alpha:]]{2,}$ ]]; then
|
if [[ ! "$email" =~ ^[[:alnum:]._%+-]+@[[:alnum:].-]+\.[[:alpha:]]{2,}$ ]]; then
|
||||||
echo "Email address is invalid"
|
fail "Email address is invalid"
|
||||||
exit 3
|
fi
|
||||||
|
|
||||||
|
if [[ "$email" =~ [A-Z] ]]; then
|
||||||
|
fail "Email addresses cannot contain uppercase letters"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -127,21 +140,47 @@ function updatePassword() {
|
|||||||
validatePassword "$password"
|
validatePassword "$password"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -n $identityId ]]; then
|
if [[ -n "$identityId" ]]; then
|
||||||
# Generate password hash
|
# Generate password hash
|
||||||
passwordHash=$(hashPassword "$password")
|
passwordHash=$(hashPassword "$password")
|
||||||
# Update DB with new hash
|
# Update DB with new hash
|
||||||
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath"
|
||||||
[[ $? != 0 ]] && fail "Unable to update password"
|
[[ $? != 0 ]] && fail "Unable to update password"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function createElasticFile() {
|
function createFile() {
|
||||||
filename=$1
|
filename=$1
|
||||||
tmpFile=${filename}
|
uid=$2
|
||||||
truncate -s 0 "$tmpFile"
|
gid=$3
|
||||||
chmod 600 "$tmpFile"
|
|
||||||
chown "${esUID}:${esGID}" "$tmpFile"
|
mkdir -p $(dirname "$filename")
|
||||||
|
truncate -s 0 "$filename"
|
||||||
|
chmod 600 "$filename"
|
||||||
|
chown "${uid}:${gid}" "$filename"
|
||||||
|
}
|
||||||
|
|
||||||
|
function ensureRoleFileExists() {
|
||||||
|
if [[ ! -f "$socRolesFile" || ! -s "$socRolesFile" ]]; then
|
||||||
|
# Generate the new users file
|
||||||
|
rolesTmpFile="${socRolesFile}.tmp"
|
||||||
|
createFile "$rolesTmpFile" "$soUID" "$soGID"
|
||||||
|
|
||||||
|
if [[ -f "$databasePath" ]]; then
|
||||||
|
echo "Migrating roles to new file: $socRolesFile"
|
||||||
|
|
||||||
|
echo "select 'superuser:' || id from identities;" | sqlite3 "$databasePath" \
|
||||||
|
>> "$rolesTmpFile"
|
||||||
|
[[ $? != 0 ]] && fail "Unable to read identities from database"
|
||||||
|
|
||||||
|
echo "The following users have all been migrated with the super user role:"
|
||||||
|
cat "${rolesTmpFile}"
|
||||||
|
else
|
||||||
|
echo "Database file does not exist yet, installation is likely not yet complete."
|
||||||
|
fi
|
||||||
|
|
||||||
|
mv "${rolesTmpFile}" "${socRolesFile}"
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function syncElasticSystemUser() {
|
function syncElasticSystemUser() {
|
||||||
@@ -172,33 +211,31 @@ function syncElasticSystemRole() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function syncElastic() {
|
function syncElastic() {
|
||||||
echo "Syncing users between SOC and Elastic..."
|
echo "Syncing users and roles between SOC and Elastic..."
|
||||||
|
|
||||||
usersTmpFile="${elasticUsersFile}.tmp"
|
usersTmpFile="${elasticUsersFile}.tmp"
|
||||||
|
createFile "${usersTmpFile}" "$esUID" "$esGID"
|
||||||
rolesTmpFile="${elasticRolesFile}.tmp"
|
rolesTmpFile="${elasticRolesFile}.tmp"
|
||||||
createElasticFile "${usersTmpFile}"
|
createFile "${rolesTmpFile}" "$esUID" "$esGID"
|
||||||
createElasticFile "${rolesTmpFile}"
|
|
||||||
|
|
||||||
authPillarJson=$(lookup_salt_value "auth" "elasticsearch" "pillar" "json")
|
authPillarJson=$(lookup_salt_value "auth" "elasticsearch" "pillar" "json")
|
||||||
|
|
||||||
syncElasticSystemUser "$authPillarJson" "so_elastic_user" "$usersTmpFile"
|
syncElasticSystemUser "$authPillarJson" "so_elastic_user" "$usersTmpFile"
|
||||||
syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile"
|
|
||||||
|
|
||||||
syncElasticSystemUser "$authPillarJson" "so_kibana_user" "$usersTmpFile"
|
syncElasticSystemUser "$authPillarJson" "so_kibana_user" "$usersTmpFile"
|
||||||
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile"
|
|
||||||
|
|
||||||
syncElasticSystemUser "$authPillarJson" "so_logstash_user" "$usersTmpFile"
|
syncElasticSystemUser "$authPillarJson" "so_logstash_user" "$usersTmpFile"
|
||||||
syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile"
|
|
||||||
|
|
||||||
syncElasticSystemUser "$authPillarJson" "so_beats_user" "$usersTmpFile"
|
syncElasticSystemUser "$authPillarJson" "so_beats_user" "$usersTmpFile"
|
||||||
syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile"
|
|
||||||
|
|
||||||
syncElasticSystemUser "$authPillarJson" "so_monitor_user" "$usersTmpFile"
|
syncElasticSystemUser "$authPillarJson" "so_monitor_user" "$usersTmpFile"
|
||||||
|
|
||||||
|
syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile"
|
||||||
|
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile"
|
||||||
|
syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile"
|
||||||
|
syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile"
|
||||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_collector" "$rolesTmpFile"
|
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_collector" "$rolesTmpFile"
|
||||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_agent" "$rolesTmpFile"
|
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_agent" "$rolesTmpFile"
|
||||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "monitoring_user" "$rolesTmpFile"
|
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "monitoring_user" "$rolesTmpFile"
|
||||||
|
|
||||||
if [[ -f "$databasePath" ]]; then
|
if [[ -f "$databasePath" && -f "$socRolesFile" ]]; then
|
||||||
# Generate the new users file
|
# Append the SOC users
|
||||||
echo "select '{\"user\":\"' || ici.identifier || '\", \"data\":' || ic.config || '}'" \
|
echo "select '{\"user\":\"' || ici.identifier || '\", \"data\":' || ic.config || '}'" \
|
||||||
"from identity_credential_identifiers ici, identity_credentials ic " \
|
"from identity_credential_identifiers ici, identity_credentials ic " \
|
||||||
"where ici.identity_credential_id=ic.id and instr(ic.config, 'hashed_password') " \
|
"where ici.identity_credential_id=ic.id and instr(ic.config, 'hashed_password') " \
|
||||||
@@ -208,17 +245,18 @@ function syncElastic() {
|
|||||||
>> "$usersTmpFile"
|
>> "$usersTmpFile"
|
||||||
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
|
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
|
||||||
|
|
||||||
# Generate the new users_roles file
|
# Append the user roles
|
||||||
|
while IFS="" read -r rolePair || [ -n "$rolePair" ]; do
|
||||||
echo "select 'superuser:' || ici.identifier " \
|
userId=$(echo "$rolePair" | cut -d: -f2)
|
||||||
"from identity_credential_identifiers ici, identity_credentials ic " \
|
role=$(echo "$rolePair" | cut -d: -f1)
|
||||||
"where ici.identity_credential_id=ic.id and instr(ic.config, 'hashed_password') " \
|
echo "select '$role:' || ici.identifier " \
|
||||||
"order by ici.identifier;" | \
|
"from identity_credential_identifiers ici, identity_credentials ic " \
|
||||||
sqlite3 "$databasePath" \
|
"where ici.identity_credential_id=ic.id and ic.identity_id = '$userId';" | \
|
||||||
>> "$rolesTmpFile"
|
sqlite3 "$databasePath" >> "$rolesTmpFile"
|
||||||
[[ $? != 0 ]] && fail "Unable to read credential IDs from database"
|
done < "$socRolesFile"
|
||||||
|
|
||||||
else
|
else
|
||||||
echo "Database file does not exist yet, skipping users export"
|
echo "Database file or soc roles file does not exist yet, skipping users export"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [[ -s "${usersTmpFile}" ]]; then
|
if [[ -s "${usersTmpFile}" ]]; then
|
||||||
@@ -236,15 +274,22 @@ function syncElastic() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
function syncAll() {
|
function syncAll() {
|
||||||
|
ensureRoleFileExists
|
||||||
|
|
||||||
|
# Check if a sync is needed. Sync is not needed if the following are true:
|
||||||
|
# - user database entries are all older than the elastic users file
|
||||||
|
# - soc roles file last modify date is older than the elastic roles file
|
||||||
if [[ -z "$FORCE_SYNC" && -f "$databasePath" && -f "$elasticUsersFile" ]]; then
|
if [[ -z "$FORCE_SYNC" && -f "$databasePath" && -f "$elasticUsersFile" ]]; then
|
||||||
usersFileAgeSecs=$(echo $(($(date +%s) - $(date +%s -r "$elasticUsersFile"))))
|
usersFileAgeSecs=$(echo $(($(date +%s) - $(date +%s -r "$elasticUsersFile"))))
|
||||||
staleCount=$(echo "select count(*) from identity_credentials where updated_at >= Datetime('now', '-${usersFileAgeSecs} seconds');" \
|
staleCount=$(echo "select count(*) from identity_credentials where updated_at >= Datetime('now', '-${usersFileAgeSecs} seconds');" \
|
||||||
| sqlite3 "$databasePath")
|
| sqlite3 "$databasePath")
|
||||||
if [[ "$staleCount" == "0" ]]; then
|
if [[ "$staleCount" == "0" && "$elasticRolesFile" -nt "$socRolesFile" ]]; then
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
syncElastic
|
syncElastic
|
||||||
|
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -252,11 +297,64 @@ function listUsers() {
|
|||||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||||
|
|
||||||
echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
|
users=$(echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort)
|
||||||
|
for user in $users; do
|
||||||
|
roles=$(grep "$user" "$elasticRolesFile" | cut -d: -f1 | tr '\n' ' ')
|
||||||
|
echo "$user: $roles"
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
function addUserRole() {
|
||||||
|
email=$1
|
||||||
|
role=$2
|
||||||
|
|
||||||
|
adjustUserRole "$email" "$role" "add"
|
||||||
|
}
|
||||||
|
|
||||||
|
function deleteUserRole() {
|
||||||
|
email=$1
|
||||||
|
role=$2
|
||||||
|
|
||||||
|
adjustUserRole "$email" "$role" "del"
|
||||||
|
}
|
||||||
|
|
||||||
|
function adjustUserRole() {
|
||||||
|
email=$1
|
||||||
|
role=$2
|
||||||
|
op=$3
|
||||||
|
|
||||||
|
identityId=$(findIdByEmail "$email")
|
||||||
|
[[ ${identityId} == "" ]] && fail "User not found"
|
||||||
|
|
||||||
|
ensureRoleFileExists
|
||||||
|
|
||||||
|
filename="$socRolesFile"
|
||||||
|
hasRole=0
|
||||||
|
grep "$role:" "$socRolesFile" | grep -q "$identityId" && hasRole=1
|
||||||
|
if [[ "$op" == "add" ]]; then
|
||||||
|
if [[ "$hasRole" == "1" ]]; then
|
||||||
|
echo "User '$email' already has the role: $role"
|
||||||
|
return 1
|
||||||
|
else
|
||||||
|
echo "$role:$identityId" >> "$filename"
|
||||||
|
fi
|
||||||
|
elif [[ "$op" == "del" ]]; then
|
||||||
|
if [[ "$hasRole" -ne 1 ]]; then
|
||||||
|
fail "User '$email' does not have the role: $role"
|
||||||
|
else
|
||||||
|
sed "/^$role:$identityId\$/d" "$filename" > "$filename.tmp"
|
||||||
|
cat "$filename".tmp > "$filename"
|
||||||
|
rm -f "$filename".tmp
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
fail "Unsupported role adjustment operation: $op"
|
||||||
|
fi
|
||||||
|
return 0
|
||||||
}
|
}
|
||||||
|
|
||||||
function createUser() {
|
function createUser() {
|
||||||
email=$1
|
email=$1
|
||||||
|
role=$2
|
||||||
|
|
||||||
now=$(date -u +%FT%TZ)
|
now=$(date -u +%FT%TZ)
|
||||||
addUserJson=$(cat <<EOF
|
addUserJson=$(cat <<EOF
|
||||||
@@ -270,16 +368,17 @@ EOF
|
|||||||
response=$(curl -Ss -L ${kratosUrl}/identities -d "$addUserJson")
|
response=$(curl -Ss -L ${kratosUrl}/identities -d "$addUserJson")
|
||||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||||
|
|
||||||
identityId=$(echo "${response}" | jq ".id")
|
identityId=$(echo "${response}" | jq -r ".id")
|
||||||
if [[ ${identityId} == "null" ]]; then
|
if [[ "${identityId}" == "null" ]]; then
|
||||||
code=$(echo "${response}" | jq ".error.code")
|
code=$(echo "${response}" | jq ".error.code")
|
||||||
[[ "${code}" == "409" ]] && fail "User already exists"
|
[[ "${code}" == "409" ]] && fail "User already exists"
|
||||||
|
|
||||||
reason=$(echo "${response}" | jq ".error.message")
|
reason=$(echo "${response}" | jq ".error.message")
|
||||||
[[ $? == 0 ]] && fail "Unable to add user: ${reason}"
|
[[ $? == 0 ]] && fail "Unable to add user: ${reason}"
|
||||||
|
else
|
||||||
|
updatePassword "$identityId"
|
||||||
|
addUserRole "$email" "$role"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
updatePassword $identityId
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function updateStatus() {
|
function updateStatus() {
|
||||||
@@ -292,21 +391,21 @@ function updateStatus() {
|
|||||||
response=$(curl -Ss -L "${kratosUrl}/identities/$identityId")
|
response=$(curl -Ss -L "${kratosUrl}/identities/$identityId")
|
||||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||||
|
|
||||||
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
|
oldConfig=$(echo "select config from identity_credentials where identity_id='${identityId}';" | sqlite3 "$databasePath")
|
||||||
if [[ "$status" == "locked" ]]; then
|
if [[ "$status" == "locked" ]]; then
|
||||||
config=$(echo $oldConfig | sed -e 's/hashed/locked/')
|
config=$(echo $oldConfig | sed -e 's/hashed/locked/')
|
||||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath"
|
||||||
[[ $? != 0 ]] && fail "Unable to lock credential record"
|
[[ $? != 0 ]] && fail "Unable to lock credential record"
|
||||||
|
|
||||||
echo "delete from sessions where identity_id=${identityId};" | sqlite3 "$databasePath"
|
echo "delete from sessions where identity_id='${identityId}';" | sqlite3 "$databasePath"
|
||||||
[[ $? != 0 ]] && fail "Unable to invalidate sessions"
|
[[ $? != 0 ]] && fail "Unable to invalidate sessions"
|
||||||
else
|
else
|
||||||
config=$(echo $oldConfig | sed -e 's/locked/hashed/')
|
config=$(echo $oldConfig | sed -e 's/locked/hashed/')
|
||||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath"
|
||||||
[[ $? != 0 ]] && fail "Unable to unlock credential record"
|
[[ $? != 0 ]] && fail "Unable to unlock credential record"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
|
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url) | del(.created_at) | del(.updated_at)")
|
||||||
response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson")
|
response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson")
|
||||||
[[ $? != 0 ]] && fail "Unable to mark user as locked"
|
[[ $? != 0 ]] && fail "Unable to mark user as locked"
|
||||||
|
|
||||||
@@ -318,7 +417,7 @@ function updateUser() {
|
|||||||
identityId=$(findIdByEmail "$email")
|
identityId=$(findIdByEmail "$email")
|
||||||
[[ ${identityId} == "" ]] && fail "User not found"
|
[[ ${identityId} == "" ]] && fail "User not found"
|
||||||
|
|
||||||
updatePassword $identityId
|
updatePassword "$identityId"
|
||||||
}
|
}
|
||||||
|
|
||||||
function deleteUser() {
|
function deleteUser() {
|
||||||
@@ -329,6 +428,11 @@ function deleteUser() {
|
|||||||
|
|
||||||
response=$(curl -Ss -XDELETE -L "${kratosUrl}/identities/$identityId")
|
response=$(curl -Ss -XDELETE -L "${kratosUrl}/identities/$identityId")
|
||||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||||
|
|
||||||
|
rolesTmpFile="${socRolesFile}.tmp"
|
||||||
|
createFile "$rolesTmpFile" "$soUID" "$soGID"
|
||||||
|
grep -v "$id" "$socRolesFile" > "$rolesTmpFile"
|
||||||
|
mv "$rolesTmpFile" "$socRolesFile"
|
||||||
}
|
}
|
||||||
|
|
||||||
case "${operation}" in
|
case "${operation}" in
|
||||||
@@ -339,7 +443,7 @@ case "${operation}" in
|
|||||||
lock
|
lock
|
||||||
validateEmail "$email"
|
validateEmail "$email"
|
||||||
updatePassword
|
updatePassword
|
||||||
createUser "$email"
|
createUser "$email" "${role:-$DEFAULT_ROLE}"
|
||||||
syncAll
|
syncAll
|
||||||
echo "Successfully added new user to SOC"
|
echo "Successfully added new user to SOC"
|
||||||
check_container thehive && echo "$password" | so-thehive-user-add "$email"
|
check_container thehive && echo "$password" | so-thehive-user-add "$email"
|
||||||
@@ -351,6 +455,31 @@ case "${operation}" in
|
|||||||
listUsers
|
listUsers
|
||||||
;;
|
;;
|
||||||
|
|
||||||
|
"addrole")
|
||||||
|
verifyEnvironment
|
||||||
|
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||||
|
[[ "$role" == "" ]] && fail "Role must be provided"
|
||||||
|
|
||||||
|
lock
|
||||||
|
validateEmail "$email"
|
||||||
|
if addUserRole "$email" "$role"; then
|
||||||
|
syncElastic
|
||||||
|
echo "Successfully added role to user"
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
|
||||||
|
"delrole")
|
||||||
|
verifyEnvironment
|
||||||
|
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||||
|
[[ "$role" == "" ]] && fail "Role must be provided"
|
||||||
|
|
||||||
|
lock
|
||||||
|
validateEmail "$email"
|
||||||
|
deleteUserRole "$email" "$role"
|
||||||
|
syncElastic
|
||||||
|
echo "Successfully removed role from user"
|
||||||
|
;;
|
||||||
|
|
||||||
"update")
|
"update")
|
||||||
verifyEnvironment
|
verifyEnvironment
|
||||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||||
#
|
#
|
||||||
# This program is free software: you can redistribute it and/or modify
|
# This program is free software: you can redistribute it and/or modify
|
||||||
@@ -20,13 +19,8 @@ echo "Starting to check for yara rule updates at $(date)..."
|
|||||||
|
|
||||||
output_dir="/opt/so/saltstack/default/salt/strelka/rules"
|
output_dir="/opt/so/saltstack/default/salt/strelka/rules"
|
||||||
mkdir -p $output_dir
|
mkdir -p $output_dir
|
||||||
|
|
||||||
repos="$output_dir/repos.txt"
|
repos="$output_dir/repos.txt"
|
||||||
ignorefile="$output_dir/ignore.txt"
|
|
||||||
|
|
||||||
deletecounter=0
|
|
||||||
newcounter=0
|
newcounter=0
|
||||||
updatecounter=0
|
|
||||||
|
|
||||||
{% if ISAIRGAP is sameas true %}
|
{% if ISAIRGAP is sameas true %}
|
||||||
|
|
||||||
@@ -35,58 +29,21 @@ echo "Airgap mode enabled."
|
|||||||
clone_dir="/nsm/repo/rules/strelka"
|
clone_dir="/nsm/repo/rules/strelka"
|
||||||
repo_name="signature-base"
|
repo_name="signature-base"
|
||||||
mkdir -p /opt/so/saltstack/default/salt/strelka/rules/signature-base
|
mkdir -p /opt/so/saltstack/default/salt/strelka/rules/signature-base
|
||||||
|
# Ensure a copy of the license is available for the rules
|
||||||
[ -f $clone_dir/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
|
[ -f $clone_dir/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
|
||||||
|
|
||||||
# Copy over rules
|
# Copy over rules
|
||||||
for i in $(find $clone_dir/yara -name "*.yar*"); do
|
for i in $(find $clone_dir/yara -name "*.yar*"); do
|
||||||
rule_name=$(echo $i | awk -F '/' '{print $NF}')
|
rule_name=$(echo $i | awk -F '/' '{print $NF}')
|
||||||
repo_sum=$(sha256sum $i | awk '{print $1}')
|
echo "Adding rule: $rule_name..."
|
||||||
|
cp $i $output_dir/$repo_name
|
||||||
# Check rules against those in ignore list -- don't copy if ignored.
|
((newcounter++))
|
||||||
if ! grep -iq $rule_name $ignorefile; then
|
|
||||||
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
|
|
||||||
|
|
||||||
# For existing rules, check to see if they need to be updated, by comparing checksums
|
|
||||||
if [ $existing_rules -gt 0 ];then
|
|
||||||
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
|
|
||||||
if [ "$repo_sum" != "$local_sum" ]; then
|
|
||||||
echo "Checksums do not match!"
|
|
||||||
echo "Updating $rule_name..."
|
|
||||||
cp $i $output_dir/$repo_name;
|
|
||||||
((updatecounter++))
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# If rule doesn't exist already, we'll add it
|
|
||||||
echo "Adding new rule: $rule_name..."
|
|
||||||
cp $i $output_dir/$repo_name
|
|
||||||
((newcounter++))
|
|
||||||
fi
|
|
||||||
fi;
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check to see if we have any old rules that need to be removed
|
|
||||||
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
|
|
||||||
is_repo_rule=$(find $clone_dir -name "$i" | wc -l)
|
|
||||||
if [ $is_repo_rule -eq 0 ]; then
|
|
||||||
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
|
|
||||||
rm $output_dir/$repo_name/$i
|
|
||||||
((deletecounter++))
|
|
||||||
fi
|
|
||||||
done
|
done
|
||||||
|
|
||||||
echo "Done!"
|
echo "Done!"
|
||||||
|
|
||||||
if [ "$newcounter" -gt 0 ];then
|
if [ "$newcounter" -gt 0 ];then
|
||||||
echo "$newcounter new rules added."
|
echo "$newcounter rules added."
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$updatecounter" -gt 0 ];then
|
|
||||||
echo "$updatecounter rules updated."
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$deletecounter" -gt 0 ];then
|
|
||||||
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
@@ -99,69 +56,32 @@ if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
|
|||||||
if ! $(echo "$repo" | grep -qE '^#'); then
|
if ! $(echo "$repo" | grep -qE '^#'); then
|
||||||
# Remove old repo if existing bc of previous error condition or unexpected disruption
|
# Remove old repo if existing bc of previous error condition or unexpected disruption
|
||||||
repo_name=`echo $repo | awk -F '/' '{print $NF}'`
|
repo_name=`echo $repo | awk -F '/' '{print $NF}'`
|
||||||
[ -d $repo_name ] && rm -rf $repo_name
|
[ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name
|
||||||
|
|
||||||
# Clone repo and make appropriate directories for rules
|
# Clone repo and make appropriate directories for rules
|
||||||
|
|
||||||
git clone $repo $clone_dir/$repo_name
|
git clone $repo $clone_dir/$repo_name
|
||||||
echo "Analyzing rules from $clone_dir/$repo_name..."
|
echo "Analyzing rules from $clone_dir/$repo_name..."
|
||||||
mkdir -p $output_dir/$repo_name
|
mkdir -p $output_dir/$repo_name
|
||||||
|
# Ensure a copy of the license is available for the rules
|
||||||
[ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
|
[ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
|
||||||
|
|
||||||
# Copy over rules
|
# Copy over rules
|
||||||
for i in $(find $clone_dir/$repo_name -name "*.yar*"); do
|
for i in $(find $clone_dir/$repo_name -name "*.yar*"); do
|
||||||
rule_name=$(echo $i | awk -F '/' '{print $NF}')
|
rule_name=$(echo $i | awk -F '/' '{print $NF}')
|
||||||
repo_sum=$(sha256sum $i | awk '{print $1}')
|
echo "Adding rule: $rule_name..."
|
||||||
|
cp $i $output_dir/$repo_name
|
||||||
# Check rules against those in ignore list -- don't copy if ignored.
|
((newcounter++))
|
||||||
if ! grep -iq $rule_name $ignorefile; then
|
done
|
||||||
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
|
rm -rf $clone_dir/$repo_name
|
||||||
|
fi
|
||||||
# For existing rules, check to see if they need to be updated, by comparing checksums
|
done < $repos
|
||||||
if [ $existing_rules -gt 0 ];then
|
|
||||||
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
|
|
||||||
if [ "$repo_sum" != "$local_sum" ]; then
|
|
||||||
echo "Checksums do not match!"
|
|
||||||
echo "Updating $rule_name..."
|
|
||||||
cp $i $output_dir/$repo_name;
|
|
||||||
((updatecounter++))
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
# If rule doesn't exist already, we'll add it
|
|
||||||
echo "Adding new rule: $rule_name..."
|
|
||||||
cp $i $output_dir/$repo_name
|
|
||||||
((newcounter++))
|
|
||||||
fi
|
|
||||||
fi;
|
|
||||||
done
|
|
||||||
|
|
||||||
# Check to see if we have any old rules that need to be removed
|
|
||||||
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
|
|
||||||
is_repo_rule=$(find $clone_dir/$repo_name -name "$i" | wc -l)
|
|
||||||
if [ $is_repo_rule -eq 0 ]; then
|
|
||||||
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
|
|
||||||
rm $output_dir/$repo_name/$i
|
|
||||||
((deletecounter++))
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
rm -rf $clone_dir/$repo_name
|
|
||||||
fi
|
|
||||||
done < $repos
|
|
||||||
|
|
||||||
echo "Done!"
|
echo "Done!"
|
||||||
|
|
||||||
if [ "$newcounter" -gt 0 ];then
|
if [ "$newcounter" -gt 0 ];then
|
||||||
echo "$newcounter new rules added."
|
echo "$newcounter rules added."
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ "$updatecounter" -gt 0 ];then
|
|
||||||
echo "$updatecounter rules updated."
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$deletecounter" -gt 0 ];then
|
|
||||||
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
|
|
||||||
fi
|
|
||||||
|
|
||||||
else
|
else
|
||||||
echo "Server returned $gh_status status code."
|
echo "Server returned $gh_status status code."
|
||||||
echo "No connectivity to Github...exiting..."
|
echo "No connectivity to Github...exiting..."
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ SOUP_LOG=/root/soup.log
|
|||||||
INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log
|
INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log
|
||||||
WHATWOULDYOUSAYYAHDOHERE=soup
|
WHATWOULDYOUSAYYAHDOHERE=soup
|
||||||
whiptail_title='Security Onion UPdater'
|
whiptail_title='Security Onion UPdater'
|
||||||
|
NOTIFYCUSTOMELASTICCONFIG=false
|
||||||
|
|
||||||
check_err() {
|
check_err() {
|
||||||
local exit_code=$1
|
local exit_code=$1
|
||||||
@@ -105,17 +106,20 @@ add_common() {
|
|||||||
|
|
||||||
airgap_mounted() {
|
airgap_mounted() {
|
||||||
# Let's see if the ISO is already mounted.
|
# Let's see if the ISO is already mounted.
|
||||||
if [ -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then
|
||||||
echo "The ISO is already mounted"
|
echo "The ISO is already mounted"
|
||||||
else
|
else
|
||||||
echo ""
|
if [[ -z $ISOLOC ]]; then
|
||||||
cat << EOF
|
echo "This is airgap. Ask for a location."
|
||||||
|
echo ""
|
||||||
|
cat << EOF
|
||||||
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
|
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
|
||||||
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
|
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
|
||||||
Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom.
|
Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom.
|
||||||
|
|
||||||
EOF
|
EOF
|
||||||
read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
|
read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
|
||||||
|
fi
|
||||||
if [[ -f $ISOLOC ]]; then
|
if [[ -f $ISOLOC ]]; then
|
||||||
# Mounting the ISO image
|
# Mounting the ISO image
|
||||||
mkdir -p /tmp/soagupdate
|
mkdir -p /tmp/soagupdate
|
||||||
@@ -131,7 +135,7 @@ EOF
|
|||||||
elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
|
elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
|
||||||
ln -s $ISOLOC /tmp/soagupdate
|
ln -s $ISOLOC /tmp/soagupdate
|
||||||
echo "Found the update content"
|
echo "Found the update content"
|
||||||
else
|
elif [[ -b $ISOLOC ]]; then
|
||||||
mkdir -p /tmp/soagupdate
|
mkdir -p /tmp/soagupdate
|
||||||
mount $ISOLOC /tmp/soagupdate
|
mount $ISOLOC /tmp/soagupdate
|
||||||
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||||||
@@ -140,7 +144,11 @@ EOF
|
|||||||
exit 0
|
exit 0
|
||||||
else
|
else
|
||||||
echo "Device has been mounted!"
|
echo "Device has been mounted!"
|
||||||
fi
|
fi
|
||||||
|
else
|
||||||
|
echo "Could not find Security Onion ISO content at ${ISOLOC}"
|
||||||
|
echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded."
|
||||||
|
exit 0
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
@@ -150,7 +158,7 @@ airgap_update_dockers() {
|
|||||||
# Let's copy the tarball
|
# Let's copy the tarball
|
||||||
if [[ ! -f $AGDOCKER/registry.tar ]]; then
|
if [[ ! -f $AGDOCKER/registry.tar ]]; then
|
||||||
echo "Unable to locate registry. Exiting"
|
echo "Unable to locate registry. Exiting"
|
||||||
exit 1
|
exit 0
|
||||||
else
|
else
|
||||||
echo "Stopping the registry docker"
|
echo "Stopping the registry docker"
|
||||||
docker stop so-dockerregistry
|
docker stop so-dockerregistry
|
||||||
@@ -182,6 +190,37 @@ check_airgap() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# {% raw %}
|
||||||
|
|
||||||
|
check_local_mods() {
|
||||||
|
local salt_local=/opt/so/saltstack/local
|
||||||
|
|
||||||
|
local_mod_arr=()
|
||||||
|
|
||||||
|
while IFS= read -r -d '' local_file; do
|
||||||
|
stripped_path=${local_file#"$salt_local"}
|
||||||
|
default_file="${DEFAULT_SALT_DIR}${stripped_path}"
|
||||||
|
if [[ -f $default_file ]]; then
|
||||||
|
file_diff=$(diff "$default_file" "$local_file" )
|
||||||
|
if [[ $(echo "$file_diff" | grep -c "^<") -gt 0 ]]; then
|
||||||
|
local_mod_arr+=( "$local_file" )
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done< <(find $salt_local -type f -print0)
|
||||||
|
|
||||||
|
if [[ ${#local_mod_arr} -gt 0 ]]; then
|
||||||
|
echo "Potentially breaking changes found in the following files (check ${DEFAULT_SALT_DIR} for original copy):"
|
||||||
|
for file_str in "${local_mod_arr[@]}"; do
|
||||||
|
echo " $file_str"
|
||||||
|
done
|
||||||
|
echo ""
|
||||||
|
echo "To reference this list later, check $SOUP_LOG"
|
||||||
|
sleep 10
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
# {% endraw %}
|
||||||
|
|
||||||
check_sudoers() {
|
check_sudoers() {
|
||||||
if grep -q "so-setup" /etc/sudoers; then
|
if grep -q "so-setup" /etc/sudoers; then
|
||||||
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
||||||
@@ -251,25 +290,31 @@ check_os_updates() {
|
|||||||
OSUPDATES=$(yum -q list updates | wc -l)
|
OSUPDATES=$(yum -q list updates | wc -l)
|
||||||
fi
|
fi
|
||||||
if [[ "$OSUPDATES" -gt 0 ]]; then
|
if [[ "$OSUPDATES" -gt 0 ]]; then
|
||||||
echo $NEEDUPDATES
|
if [[ -z $UNATTENDED ]]; then
|
||||||
echo ""
|
echo "$NEEDUPDATES"
|
||||||
read -p "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
|
echo ""
|
||||||
|
read -rp "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
|
||||||
if [[ "$confirm" == [cC] ]]; then
|
if [[ "$confirm" == [cC] ]]; then
|
||||||
echo "Continuing without updating packages"
|
echo "Continuing without updating packages"
|
||||||
elif [[ "$confirm" == [uU] ]]; then
|
elif [[ "$confirm" == [uU] ]]; then
|
||||||
echo "Applying Grid Updates"
|
echo "Applying Grid Updates"
|
||||||
set +e
|
update_flag=true
|
||||||
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
|
else
|
||||||
set -e
|
|
||||||
else
|
|
||||||
echo "Exiting soup"
|
echo "Exiting soup"
|
||||||
exit 0
|
exit 0
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
update_flag=true
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "Looks like you have an updated OS"
|
echo "Looks like you have an updated OS"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $update_flag == true ]]; then
|
||||||
|
set +e
|
||||||
|
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
|
||||||
|
set -e
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
clean_dockers() {
|
clean_dockers() {
|
||||||
@@ -341,6 +386,7 @@ preupgrade_changes() {
|
|||||||
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20
|
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20
|
||||||
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
|
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
|
||||||
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50
|
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50
|
||||||
|
[[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_2.3.5X_to_2.3.80
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -578,6 +624,46 @@ EOF
|
|||||||
INSTALLEDVERSION=2.3.50
|
INSTALLEDVERSION=2.3.50
|
||||||
}
|
}
|
||||||
|
|
||||||
|
up_2.3.5X_to_2.3.80() {
|
||||||
|
|
||||||
|
# Remove watermark settings from global.sls
|
||||||
|
sed -i '/ cluster_routing_allocation_disk/d' /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
|
||||||
|
# Add new indices to the global
|
||||||
|
sed -i '/ index_settings:/a \\ so-elasticsearch: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
sed -i '/ index_settings:/a \\ so-logstash: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
sed -i '/ index_settings:/a \\ so-kibana: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
sed -i '/ index_settings:/a \\ so-redis: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
|
||||||
|
# Do some pillar formatting
|
||||||
|
tc=$(grep -w true_cluster /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print tolower($2)'}| xargs)
|
||||||
|
|
||||||
|
if [[ "$tc" == "true" ]]; then
|
||||||
|
tcname=$(grep -w true_cluster_name /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print $2'})
|
||||||
|
sed -i "/^elasticsearch:/a \\ config: \n cluster: \n name: $tcname" /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
sed -i '/ true_cluster_name/d' /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
sed -i '/ esclustername/d' /opt/so/saltstack/local/pillar/global.sls
|
||||||
|
|
||||||
|
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
|
||||||
|
if [[ ${file} != *"manager.sls"* ]]; then
|
||||||
|
noderoutetype=$(grep -w node_route_type $file | awk -F: {'print $2'})
|
||||||
|
if [ -n "$noderoutetype" ]; then
|
||||||
|
sed -i "/^elasticsearch:/a \\ config: \n node: \n attr: \n box_type: $noderoutetype" $file
|
||||||
|
sed -i '/ node_route_type/d' $file
|
||||||
|
noderoutetype=''
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
# check for local es config to inform user that the config in local is now ignored and those options need to be placed in the pillar
|
||||||
|
if [ -f "/opt/so/saltstack/local/salt/elasticsearch/files/elasticsearch.yml" ]; then
|
||||||
|
NOTIFYCUSTOMELASTICCONFIG=true
|
||||||
|
fi
|
||||||
|
|
||||||
|
INSTALLEDVERSION=2.3.80
|
||||||
|
}
|
||||||
|
|
||||||
verify_upgradespace() {
|
verify_upgradespace() {
|
||||||
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
||||||
if [ "$CURRENTSPACE" -lt "10" ]; then
|
if [ "$CURRENTSPACE" -lt "10" ]; then
|
||||||
@@ -593,7 +679,7 @@ upgrade_space() {
|
|||||||
clean_dockers
|
clean_dockers
|
||||||
if ! verify_upgradespace; then
|
if ! verify_upgradespace; then
|
||||||
echo "There is not enough space to perform the upgrade. Please free up space and try again"
|
echo "There is not enough space to perform the upgrade. Please free up space and try again"
|
||||||
exit 1
|
exit 0
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "You have enough space for upgrade. Proceeding with soup."
|
echo "You have enough space for upgrade. Proceeding with soup."
|
||||||
@@ -618,8 +704,8 @@ thehive_maint() {
|
|||||||
done
|
done
|
||||||
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
||||||
echo "Migrating thehive databases if needed."
|
echo "Migrating thehive databases if needed."
|
||||||
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate"
|
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1
|
||||||
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate"
|
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -743,39 +829,23 @@ verify_latest_update_script() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
main() {
|
main() {
|
||||||
set -e
|
|
||||||
set +e
|
|
||||||
trap 'check_err $?' EXIT
|
trap 'check_err $?' EXIT
|
||||||
|
|
||||||
echo "### Preparing soup at $(date) ###"
|
echo "Checking to see if this is an airgap install."
|
||||||
while getopts ":b" opt; do
|
echo ""
|
||||||
case "$opt" in
|
check_airgap
|
||||||
b ) # process option b
|
if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then
|
||||||
shift
|
echo "Missing file argument (-f <FILENAME>) for unattended airgap upgrade."
|
||||||
BATCHSIZE=$1
|
exit 0
|
||||||
if ! [[ "$BATCHSIZE" =~ ^[0-9]+$ ]]; then
|
fi
|
||||||
echo "Batch size must be a number greater than 0."
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
;;
|
|
||||||
\? )
|
|
||||||
echo "Usage: cmd [-b]"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Checking to see if this is a manager."
|
echo "Checking to see if this is a manager."
|
||||||
echo ""
|
echo ""
|
||||||
require_manager
|
require_manager
|
||||||
set_minionid
|
set_minionid
|
||||||
echo "Checking to see if this is an airgap install."
|
|
||||||
echo ""
|
|
||||||
check_airgap
|
|
||||||
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
||||||
echo ""
|
echo ""
|
||||||
if [[ $is_airgap -eq 0 ]]; then
|
if [[ $is_airgap -eq 0 ]]; then
|
||||||
# Let's mount the ISO since this is airgap
|
# Let's mount the ISO since this is airgap
|
||||||
echo "This is airgap. Ask for a location."
|
|
||||||
airgap_mounted
|
airgap_mounted
|
||||||
else
|
else
|
||||||
echo "Cloning Security Onion github repo into $UPDATE_DIR."
|
echo "Cloning Security Onion github repo into $UPDATE_DIR."
|
||||||
@@ -863,7 +933,7 @@ main() {
|
|||||||
echo "Once the issue is resolved, run soup again."
|
echo "Once the issue is resolved, run soup again."
|
||||||
echo "Exiting."
|
echo "Exiting."
|
||||||
echo ""
|
echo ""
|
||||||
exit 1
|
exit 0
|
||||||
else
|
else
|
||||||
echo "Salt upgrade success."
|
echo "Salt upgrade success."
|
||||||
echo ""
|
echo ""
|
||||||
@@ -922,8 +992,6 @@ main() {
|
|||||||
set +e
|
set +e
|
||||||
salt-call state.highstate -l info queue=True
|
salt-call state.highstate -l info queue=True
|
||||||
set -e
|
set -e
|
||||||
echo ""
|
|
||||||
echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete."
|
|
||||||
|
|
||||||
echo ""
|
echo ""
|
||||||
echo "Stopping Salt Master to remove ACL"
|
echo "Stopping Salt Master to remove ACL"
|
||||||
@@ -946,6 +1014,13 @@ main() {
|
|||||||
[[ $is_airgap -eq 0 ]] && unmount_update
|
[[ $is_airgap -eq 0 ]] && unmount_update
|
||||||
thehive_maint
|
thehive_maint
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
echo "Upgrade to $NEWVERSION complete."
|
||||||
|
|
||||||
|
# Everything beyond this is post-upgrade checking, don't fail past this point if something here causes an error
|
||||||
|
set +e
|
||||||
|
|
||||||
|
echo "Checking the number of minions."
|
||||||
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
|
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
|
||||||
if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
|
if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
|
||||||
if [[ $is_airgap -eq 0 ]]; then
|
if [[ $is_airgap -eq 0 ]]; then
|
||||||
@@ -956,6 +1031,10 @@ main() {
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
echo "Checking for local modifications."
|
||||||
|
check_local_mods
|
||||||
|
|
||||||
|
echo "Checking sudoers file."
|
||||||
check_sudoers
|
check_sudoers
|
||||||
|
|
||||||
if [[ -n $lsl_msg ]]; then
|
if [[ -n $lsl_msg ]]; then
|
||||||
@@ -993,10 +1072,56 @@ EOF
|
|||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "$NOTIFYCUSTOMELASTICCONFIG" = true ] ; then
|
||||||
|
|
||||||
|
cat << EOF
|
||||||
|
|
||||||
|
|
||||||
|
A custom Elasticsearch configuration has been found at /opt/so/saltstack/local/elasticsearch/files/elasticsearch.yml. This file is no longer referenced in Security Onion versions >= 2.3.80.
|
||||||
|
|
||||||
|
If you still need those customizations, you'll need to manually migrate them to the new Elasticsearch config as shown at https://docs.securityonion.net/en/2.3/elasticsearch.html.
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
echo "### soup has been served at $(date) ###"
|
echo "### soup has been served at $(date) ###"
|
||||||
}
|
}
|
||||||
|
|
||||||
cat << EOF
|
while getopts ":b:f:y" opt; do
|
||||||
|
case ${opt} in
|
||||||
|
b )
|
||||||
|
BATCHSIZE="$OPTARG"
|
||||||
|
if ! [[ "$BATCHSIZE" =~ ^[1-9][0-9]*$ ]]; then
|
||||||
|
echo "Batch size must be a number greater than 0."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
y )
|
||||||
|
if [[ ! -f /opt/so/state/yeselastic.txt ]]; then
|
||||||
|
echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License."
|
||||||
|
exit 1
|
||||||
|
else
|
||||||
|
UNATTENDED=true
|
||||||
|
fi
|
||||||
|
;;
|
||||||
|
f )
|
||||||
|
ISOLOC="$OPTARG"
|
||||||
|
;;
|
||||||
|
\? )
|
||||||
|
echo "Usage: soup [-b] [-y] [-f <iso location>]"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
: )
|
||||||
|
echo "Invalid option: $OPTARG requires an argument"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
done
|
||||||
|
shift $((OPTIND - 1))
|
||||||
|
|
||||||
|
if [[ -z $UNATTENDED ]]; then
|
||||||
|
cat << EOF
|
||||||
|
|
||||||
SOUP - Security Onion UPdater
|
SOUP - Security Onion UPdater
|
||||||
|
|
||||||
@@ -1008,7 +1133,9 @@ Press Enter to continue or Ctrl-C to cancel.
|
|||||||
|
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
read -r input
|
read -r input
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "### Preparing soup at $(date) ###"
|
||||||
main "$@" | tee -a $SOUP_LOG
|
main "$@" | tee -a $SOUP_LOG
|
||||||
|
|
||||||
|
|||||||
29
salt/curator/files/action/so-aws-close.yml
Normal file
29
salt/curator/files/action/so-aws-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-aws:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close aws indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-aws.*|so-aws.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-aws-delete.yml
Normal file
29
salt/curator/files/action/so-aws-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete aws indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-aws.*|so-aws.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-aws-warm.yml
Normal file
24
salt/curator/files/action/so-aws-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-aws
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-azure-close.yml
Normal file
29
salt/curator/files/action/so-azure-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-azure:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close azure indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-azure.*|so-azure.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-azure-delete.yml
Normal file
29
salt/curator/files/action/so-azure-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-azure:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete azure indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-azure.*|so-azure.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-azure-warm.yml
Normal file
24
salt/curator/files/action/so-azure-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-azure:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-azure
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-barracuda-close.yml
Normal file
29
salt/curator/files/action/so-barracuda-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close barracuda indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-barracuda.*|so-barracuda.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-barracuda-delete.yml
Normal file
29
salt/curator/files/action/so-barracuda-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete barracuda indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-barracuda.*|so-barracuda.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-barracuda-warm.yml
Normal file
24
salt/curator/files/action/so-barracuda-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-barracuda
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-beats-delete.yml
Normal file
29
salt/curator/files/action/so-beats-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-beats:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete beats indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-beats.*|so-beats.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-beats-warm.yml
Normal file
24
salt/curator/files/action/so-beats-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-beats:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-beats
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-bluecoat-close.yml
Normal file
29
salt/curator/files/action/so-bluecoat-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close bluecoat indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-bluecoat.*|so-bluecoat.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-bluecoat-delete.yml
Normal file
29
salt/curator/files/action/so-bluecoat-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete bluecoat indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-bluecoat.*|so-bluecoat.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-bluecoat-warm.yml
Normal file
24
salt/curator/files/action/so-bluecoat-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-bluecoat
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-cef-close.yml
Normal file
29
salt/curator/files/action/so-cef-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cef:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close cef indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-cef.*|so-cef.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-cef-delete.yml
Normal file
29
salt/curator/files/action/so-cef-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cef:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete cef indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-cef.*|so-cef.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-cef-warm.yml
Normal file
24
salt/curator/files/action/so-cef-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cef:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-cef
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-checkpoint-close.yml
Normal file
29
salt/curator/files/action/so-checkpoint-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close checkpoint indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-checkpoint.*|so-checkpoint.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-checkpoint-delete.yml
Normal file
29
salt/curator/files/action/so-checkpoint-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete checkpoint indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-checkpoint.*|so-checkpoint.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-checkpoint-warm.yml
Normal file
24
salt/curator/files/action/so-checkpoint-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-checkpoint
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-cisco-close.yml
Normal file
29
salt/curator/files/action/so-cisco-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cisco:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close cisco indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-cisco.*|so-cisco.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-cisco-delete.yml
Normal file
29
salt/curator/files/action/so-cisco-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete cisco indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-cisco.*|so-cisco.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-cisco-warm.yml
Normal file
24
salt/curator/files/action/so-cisco-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-cisco
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-cyberark-close.yml
Normal file
29
salt/curator/files/action/so-cyberark-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close cyberark indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-cyberark.*|so-cyberark.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-cyberark-delete.yml
Normal file
29
salt/curator/files/action/so-cyberark-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete cyberark indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-cyberark.*|so-cyberark.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-cyberark-warm.yml
Normal file
24
salt/curator/files/action/so-cyberark-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-cyberark
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-cylance-close.yml
Normal file
29
salt/curator/files/action/so-cylance-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cylance:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close cylance indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-cylance.*|so-cylance.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-cylance-delete.yml
Normal file
29
salt/curator/files/action/so-cylance-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete cylance indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-cylance.*|so-cylance.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-cylance-warm.yml
Normal file
24
salt/curator/files/action/so-cylance-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-cylance
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-elasticsearch-close.yml
Normal file
29
salt/curator/files/action/so-elasticsearch-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close elasticsearch indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-elasticsearch.*|so-elasticsearch.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-elasticsearch-delete.yml
Normal file
29
salt/curator/files/action/so-elasticsearch-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete elasticsearch indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-elasticsearch.*|so-elasticsearch.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-elasticsearch-warm.yml
Normal file
24
salt/curator/files/action/so-elasticsearch-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-elasticsearch
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-f5-close.yml
Normal file
29
salt/curator/files/action/so-f5-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-f5:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close f5 indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-f5.*|so-f5.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-f5-delete.yml
Normal file
29
salt/curator/files/action/so-f5-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-f5:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete f5 indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-f5.*|so-f5.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-f5-warm.yml
Normal file
24
salt/curator/files/action/so-f5-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-f5:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-f5
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-firewall-delete.yml
Normal file
29
salt/curator/files/action/so-firewall-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-firewall:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete firewall indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-firewall.*|so-firewall.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-firewall-warm.yml
Normal file
24
salt/curator/files/action/so-firewall-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-firewall:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-firewall
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-fortinet-close.yml
Normal file
29
salt/curator/files/action/so-fortinet-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close fortinet indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-fortinet.*|so-fortinet.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-fortinet-delete.yml
Normal file
29
salt/curator/files/action/so-fortinet-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete fortinet indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-fortinet.*|so-fortinet.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-fortinet-warm.yml
Normal file
24
salt/curator/files/action/so-fortinet-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-fortinet
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-gcp-close.yml
Normal file
29
salt/curator/files/action/so-gcp-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-gcp:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close gcp indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-gcp.*|so-gcp.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-gcp-delete.yml
Normal file
29
salt/curator/files/action/so-gcp-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-gcp:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete gcp indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-gcp.*|so-gcp.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-gcp-warm.yml
Normal file
24
salt/curator/files/action/so-gcp-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-gcp:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-gcp
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-google_workspace-close.yml
Normal file
29
salt/curator/files/action/so-google_workspace-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close google_workspace indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-google_workspace.*|so-google_workspace.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-google_workspace-delete.yml
Normal file
29
salt/curator/files/action/so-google_workspace-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete google_workspace indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-google_workspace.*|so-google_workspace.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-google_workspace-warm.yml
Normal file
24
salt/curator/files/action/so-google_workspace-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-google_workspace
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-ids-delete.yml
Normal file
29
salt/curator/files/action/so-ids-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-ids:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete IDS indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-ids.*|so-ids.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-ids-warm.yml
Normal file
24
salt/curator/files/action/so-ids-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-ids:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-ids
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-imperva-close.yml
Normal file
29
salt/curator/files/action/so-imperva-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-imperva:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close imperva indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-imperva.*|so-imperva.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-imperva-delete.yml
Normal file
29
salt/curator/files/action/so-imperva-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-imperva:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete imperva indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-imperva.*|so-imperva.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-imperva-warm.yml
Normal file
24
salt/curator/files/action/so-imperva-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-imperva:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-imperva
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-import-delete.yml
Normal file
29
salt/curator/files/action/so-import-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-import:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete import indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-import.*|so-import.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-import-warm.yml
Normal file
24
salt/curator/files/action/so-import-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-import:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-import
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-infoblox-close.yml
Normal file
29
salt/curator/files/action/so-infoblox-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close infoblox indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-infoblox.*|so-infoblox.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-infoblox-delete.yml
Normal file
29
salt/curator/files/action/so-infoblox-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete infoblox indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-infoblox.*|so-infoblox.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-infoblox-warm.yml
Normal file
24
salt/curator/files/action/so-infoblox-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-infoblox:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-infoblox
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-juniper-close.yml
Normal file
29
salt/curator/files/action/so-juniper-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-juniper:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close juniper indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-juniper.*|so-juniper.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-juniper-delete.yml
Normal file
29
salt/curator/files/action/so-juniper-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-juniper:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete juniper indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-juniper.*|so-juniper.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-juniper-warm.yml
Normal file
24
salt/curator/files/action/so-juniper-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-aws
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-kibana-close.yml
Normal file
29
salt/curator/files/action/so-kibana-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-kibana:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close kibana indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-kibana.*|so-kibana.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-kibana-delete.yml
Normal file
29
salt/curator/files/action/so-kibana-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-kibana:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete kibana indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-kibana.*|so-kibana.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-kibana-warm.yml
Normal file
24
salt/curator/files/action/so-kibana-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-kibana:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-kibana
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-logstash-close.yml
Normal file
29
salt/curator/files/action/so-logstash-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-logstash:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close logstash indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-logstash.*|so-logstash.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-logstash-delete.yml
Normal file
29
salt/curator/files/action/so-logstash-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-logstash:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete logstash indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-logstash.*|so-logstash.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-logstash-warm.yml
Normal file
24
salt/curator/files/action/so-logstash-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-logstash:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-logstash
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
29
salt/curator/files/action/so-microsoft-close.yml
Normal file
29
salt/curator/files/action/so-microsoft-close.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:close', 30) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: close
|
||||||
|
description: >-
|
||||||
|
Close microsoft indices older than {{cur_close_days}} days.
|
||||||
|
options:
|
||||||
|
delete_aliases: False
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: False
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-microsoft.*|so-microsoft.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{cur_close_days}}
|
||||||
|
exclude:
|
||||||
29
salt/curator/files/action/so-microsoft-delete.yml
Normal file
29
salt/curator/files/action/so-microsoft-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:delete', 365) -%}
|
||||||
|
---
|
||||||
|
# Remember, leave a key empty if there is no value. None will be a string,
|
||||||
|
# not a Python "NoneType"
|
||||||
|
#
|
||||||
|
# Also remember that all examples have 'disable_action' set to True. If you
|
||||||
|
# want to use this action as a template, be sure to set this to False after
|
||||||
|
# copying it.
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: delete_indices
|
||||||
|
description: >-
|
||||||
|
Delete microsoft indices when older than {{ DELETE_DAYS }} days.
|
||||||
|
options:
|
||||||
|
ignore_empty_list: True
|
||||||
|
disable_action: False
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: regex
|
||||||
|
value: '^(logstash-microsoft.*|so-microsoft.*)$'
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ DELETE_DAYS }}
|
||||||
|
exclude:
|
||||||
|
|
||||||
|
|
||||||
24
salt/curator/files/action/so-microsoft-warm.yml
Normal file
24
salt/curator/files/action/so-microsoft-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-microsoft:warm', 7) -%}
|
||||||
|
actions:
|
||||||
|
1:
|
||||||
|
action: allocation
|
||||||
|
description: "Apply shard allocation filtering rules to the specified indices"
|
||||||
|
options:
|
||||||
|
key: box_type
|
||||||
|
value: warm
|
||||||
|
allocation_type: require
|
||||||
|
wait_for_completion: true
|
||||||
|
timeout_override:
|
||||||
|
continue_if_exception: false
|
||||||
|
disable_action: false
|
||||||
|
filters:
|
||||||
|
- filtertype: pattern
|
||||||
|
kind: prefix
|
||||||
|
value: so-microsoft
|
||||||
|
- filtertype: age
|
||||||
|
source: name
|
||||||
|
direction: older
|
||||||
|
timestring: '%Y.%m.%d'
|
||||||
|
unit: days
|
||||||
|
unit_count: {{ WARM_DAYS }}
|
||||||
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user