mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Compare commits
1270 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cfaa0e679c | ||
|
|
4ddf2b49ce | ||
|
|
b1d0e3e93f | ||
|
|
b069377c8a | ||
|
|
e9a44c6e1b | ||
|
|
275163f85d | ||
|
|
3064800820 | ||
|
|
f8bea82430 | ||
|
|
8b905b585d | ||
|
|
b44358fc26 | ||
|
|
8a9dcb7fdb | ||
|
|
a01d49981c | ||
|
|
b8b1867e52 | ||
|
|
292ce37ce4 | ||
|
|
73dacdcbff | ||
|
|
bea7555464 | ||
|
|
52c1298b9b | ||
|
|
cdb9dcbaec | ||
|
|
37153288e8 | ||
|
|
edf75255cf | ||
|
|
9eb6f5942e | ||
|
|
dae41d279a | ||
|
|
07288367cf | ||
|
|
f4186feffa | ||
|
|
d82e91f69e | ||
|
|
a2680fad0a | ||
|
|
5c2be487f5 | ||
|
|
531c9de488 | ||
|
|
19efa493ad | ||
|
|
0db3f14261 | ||
|
|
ed28e4d000 | ||
|
|
2c8cbf0db1 | ||
|
|
3924b8f5db | ||
|
|
a9049eccd4 | ||
|
|
1a7237bcdf | ||
|
|
1e5e1c9ef0 | ||
|
|
47cd1ddc0a | ||
|
|
aed73511e4 | ||
|
|
a3f62c81c3 | ||
|
|
730503b69c | ||
|
|
3508f3d8c1 | ||
|
|
5704906b11 | ||
|
|
357c1db445 | ||
|
|
5377a1a85e | ||
|
|
7f2d7eb038 | ||
|
|
30e781d076 | ||
|
|
01323cc192 | ||
|
|
109c83d8c3 | ||
|
|
e864bc5404 | ||
|
|
22eb82e950 | ||
|
|
b877aa44bc | ||
|
|
4d307c53e8 | ||
|
|
d0c87cd317 | ||
|
|
0d074dafd4 | ||
|
|
5b77dc109f | ||
|
|
3ce48acadd | ||
|
|
fbd9bab2f1 | ||
|
|
5526a2bc3a | ||
|
|
18d81352c6 | ||
|
|
889d235c45 | ||
|
|
3fc26312e0 | ||
|
|
b81d38e392 | ||
|
|
82da0041a4 | ||
|
|
782b01e76f | ||
|
|
3bf9685df8 | ||
|
|
4cf91f6c86 | ||
|
|
a43b37f234 | ||
|
|
e0dc62b6e9 | ||
|
|
c213834316 | ||
|
|
c06668c68e | ||
|
|
a75238bc3f | ||
|
|
ac417867ed | ||
|
|
1614b70853 | ||
|
|
0882158e03 | ||
|
|
1a03853a7c | ||
|
|
aff571faf2 | ||
|
|
e0faa4c75b | ||
|
|
e3e2e1d851 | ||
|
|
2affaf07a2 | ||
|
|
39e5ded58d | ||
|
|
4d41d3aee1 | ||
|
|
5c8067728e | ||
|
|
1d905124d3 | ||
|
|
e0a289182f | ||
|
|
551dba955c | ||
|
|
9970e54081 | ||
|
|
ff989b1c73 | ||
|
|
2ffb723bbd | ||
|
|
6ae2fba71f | ||
|
|
2cc25587d9 | ||
|
|
614a6dc9fe | ||
|
|
4b7667d87f | ||
|
|
74b0b365bd | ||
|
|
0b0d508585 | ||
|
|
0534a2dda3 | ||
|
|
f8ab0ac8a9 | ||
|
|
0ae09cc630 | ||
|
|
332c4dda22 | ||
|
|
679faddd52 | ||
|
|
0b42b19763 | ||
|
|
943bd3e902 | ||
|
|
4af6a901a1 | ||
|
|
9c310de459 | ||
|
|
4f6a3269cb | ||
|
|
6a2e1df7d4 | ||
|
|
db50ef71b4 | ||
|
|
4e2d5018a2 | ||
|
|
94688a9adb | ||
|
|
63f67b3500 | ||
|
|
eaa5e41651 | ||
|
|
c83f119cc0 | ||
|
|
5d235e932c | ||
|
|
93f2cd75a4 | ||
|
|
f06ab8b77d | ||
|
|
03b45512fa | ||
|
|
b8600be0f1 | ||
|
|
19a02baa7c | ||
|
|
3c59579f99 | ||
|
|
3f989590ad | ||
|
|
72cff7ec7a | ||
|
|
e3900606dc | ||
|
|
a2fd8ae200 | ||
|
|
b7591093cf | ||
|
|
51439cd1ab | ||
|
|
94ea1f856b | ||
|
|
fbbb7f4e85 | ||
|
|
7b3a0cd1e4 | ||
|
|
9fb28709d5 | ||
|
|
649f339934 | ||
|
|
f659079542 | ||
|
|
ce70380f0f | ||
|
|
c4d402d8b4 | ||
|
|
9f5dafd560 | ||
|
|
1cee603ee4 | ||
|
|
a14854d56d | ||
|
|
2bf471054b | ||
|
|
56894b9581 | ||
|
|
10126bb7ef | ||
|
|
6dfc943e8c | ||
|
|
84ecc3cba7 | ||
|
|
0ad3d826eb | ||
|
|
d785dafe2f | ||
|
|
e3dffcc2cb | ||
|
|
556bad6925 | ||
|
|
446821e9fd | ||
|
|
576c893eb3 | ||
|
|
34a5d6e56a | ||
|
|
324e6b12e2 | ||
|
|
007b15979a | ||
|
|
c168703e9f | ||
|
|
527a793e94 | ||
|
|
61ebedc0e9 | ||
|
|
e09aa4e5d4 | ||
|
|
e7b04b862f | ||
|
|
62edfd0b7f | ||
|
|
958575c22a | ||
|
|
0c8e11dc9f | ||
|
|
5b9ef3bc0d | ||
|
|
c12f380bc3 | ||
|
|
dc25ed2594 | ||
|
|
9f51f02ab4 | ||
|
|
f6f4375e13 | ||
|
|
ed116cf850 | ||
|
|
476ecccbc1 | ||
|
|
c09cebbd6b | ||
|
|
0ed92fd9bd | ||
|
|
c3454c9e8a | ||
|
|
3425a0fe78 | ||
|
|
9605eda559 | ||
|
|
ff09d9ca58 | ||
|
|
77b82bf2c0 | ||
|
|
ccc8f9ff0a | ||
|
|
43d20226a8 | ||
|
|
4fe0a1d7b4 | ||
|
|
7a48a94624 | ||
|
|
1aacc27cd4 | ||
|
|
92858cd13a | ||
|
|
99cb38362a | ||
|
|
bfd632e20a | ||
|
|
518f9fceb0 | ||
|
|
2b34da0fee | ||
|
|
72859adb13 | ||
|
|
a27263435a | ||
|
|
f8cdf5bca3 | ||
|
|
ca5339341f | ||
|
|
c5d120293d | ||
|
|
12b5c0899b | ||
|
|
09d5097837 | ||
|
|
de5f823abf | ||
|
|
7b93f355e2 | ||
|
|
a27569f20b | ||
|
|
fd1e632386 | ||
|
|
0681d29bb0 | ||
|
|
ef650c6ee6 | ||
|
|
24f36bb4c9 | ||
|
|
9783d13ea3 | ||
|
|
427ec98ce5 | ||
|
|
72ba29fb7b | ||
|
|
2859bff0e4 | ||
|
|
6e921415ea | ||
|
|
2f8b68e67a | ||
|
|
e762491039 | ||
|
|
11381e304b | ||
|
|
6d49bca0ac | ||
|
|
8ea89932ae | ||
|
|
f87cf123b0 | ||
|
|
80f4d03254 | ||
|
|
a9cc68f89e | ||
|
|
b053f29a89 | ||
|
|
19cfce5e0b | ||
|
|
c4a32ca631 | ||
|
|
b78da5c237 | ||
|
|
0abf7593ed | ||
|
|
aa420b914b | ||
|
|
f096b513b7 | ||
|
|
51b517581a | ||
|
|
936c998ecb | ||
|
|
02372d130a | ||
|
|
6f9a263af3 | ||
|
|
43ffaab82c | ||
|
|
dccfdb14e4 | ||
|
|
21f3b3d985 | ||
|
|
e2d74b115f | ||
|
|
13741400f1 | ||
|
|
d0f587858c | ||
|
|
acca8cc5d2 | ||
|
|
ef950955bd | ||
|
|
9a8ccef828 | ||
|
|
7b8e23fadd | ||
|
|
18335afa7f | ||
|
|
41e8be87b6 | ||
|
|
39f32a6e13 | ||
|
|
8e9f95652d | ||
|
|
30489e4117 | ||
|
|
9dc9f10003 | ||
|
|
1ced05c1d2 | ||
|
|
41b246b8b3 | ||
|
|
a12f19c533 | ||
|
|
f1c91555ae | ||
|
|
e39de8c7bc | ||
|
|
d0e312ec42 | ||
|
|
e492833453 | ||
|
|
9beacacd44 | ||
|
|
aad14b2461 | ||
|
|
4955b552df | ||
|
|
55e8a777d4 | ||
|
|
a98ed282c0 | ||
|
|
7504b1cb2e | ||
|
|
afab1cb1e6 | ||
|
|
cd0b9bbe4a | ||
|
|
3ea29e77a9 | ||
|
|
fb4c2c35e3 | ||
|
|
81ccce8659 | ||
|
|
0d5e3771f5 | ||
|
|
2030ef65f1 | ||
|
|
b6c361f83d | ||
|
|
9404cb635d | ||
|
|
da53b39c15 | ||
|
|
86569b0599 | ||
|
|
45aa2f72cb | ||
|
|
06b7434ca2 | ||
|
|
258cebda6e | ||
|
|
0cca43c4bd | ||
|
|
bf40a1038e | ||
|
|
3312a66e75 | ||
|
|
4a31d6b3bc | ||
|
|
64dfc6e191 | ||
|
|
95bd7f9861 | ||
|
|
983549711c | ||
|
|
5922dbdf22 | ||
|
|
9e48a5b57b | ||
|
|
3c1114403e | ||
|
|
8d2f614af6 | ||
|
|
1415de858c | ||
|
|
59e9fddf18 | ||
|
|
ad3b6cf629 | ||
|
|
b12e2eded5 | ||
|
|
26030d83eb | ||
|
|
3b01f6431e | ||
|
|
a646867593 | ||
|
|
768e61e11a | ||
|
|
e72ad9eb5a | ||
|
|
ac4faf673d | ||
|
|
dd1769fbef | ||
|
|
853a986082 | ||
|
|
727a3742f5 | ||
|
|
478a0b6a3f | ||
|
|
771688a70f | ||
|
|
40fa549353 | ||
|
|
84fdc1e690 | ||
|
|
71bbb41b5f | ||
|
|
52cb72ba67 | ||
|
|
54a3b754e0 | ||
|
|
2bc88e7750 | ||
|
|
ef59cb47dd | ||
|
|
9e5d3aa286 | ||
|
|
25bf25eae6 | ||
|
|
24f5fa66f3 | ||
|
|
1aeb2d7d4f | ||
|
|
ee176f5bfd | ||
|
|
eb093b8e6c | ||
|
|
f88fa6e3b2 | ||
|
|
724f7d4f3d | ||
|
|
19816d8814 | ||
|
|
d3b170c6df | ||
|
|
757091beeb | ||
|
|
8a49039b85 | ||
|
|
4f39cd1d7f | ||
|
|
2a6277c0c3 | ||
|
|
33bd6aed20 | ||
|
|
b9980c9d30 | ||
|
|
01bb94514c | ||
|
|
d71967ea1d | ||
|
|
0b06d0bfdb | ||
|
|
b2a83018ba | ||
|
|
ba265d94f4 | ||
|
|
af7b314cfe | ||
|
|
4c6447a3da | ||
|
|
b30f771fa2 | ||
|
|
837c0402a0 | ||
|
|
e38219aa2e | ||
|
|
9e92f6da3d | ||
|
|
44551ea9ee | ||
|
|
c53da9b1ff | ||
|
|
e1785dbd9a | ||
|
|
2560a9b78c | ||
|
|
d53e989c55 | ||
|
|
211a841cdb | ||
|
|
50e4365475 | ||
|
|
c524b54af1 | ||
|
|
7591bb115e | ||
|
|
3d2da303c8 | ||
|
|
f585eb6e62 | ||
|
|
4b6120a46b | ||
|
|
d946c6d5ed | ||
|
|
5894b85bd1 | ||
|
|
3fc43f7d92 | ||
|
|
8ed264460f | ||
|
|
811b32735e | ||
|
|
4b3db0c4d2 | ||
|
|
281ba21298 | ||
|
|
d4a177949a | ||
|
|
a42d8c9229 | ||
|
|
dd0e407935 | ||
|
|
7ef5b39b04 | ||
|
|
cf9121dfc2 | ||
|
|
fcfc2a65a9 | ||
|
|
91accb0bc6 | ||
|
|
e2abe8840f | ||
|
|
ead9ae8cb5 | ||
|
|
455719936b | ||
|
|
8d56fc71fa | ||
|
|
833d154bf4 | ||
|
|
f31dc5abc7 | ||
|
|
9a429230fe | ||
|
|
b36d46b7f2 | ||
|
|
fee89665fd | ||
|
|
d78a37f9e3 | ||
|
|
28c5c02ef1 | ||
|
|
8ffeae38bc | ||
|
|
f4fae7938e | ||
|
|
22920bc9a1 | ||
|
|
ceb82cb863 | ||
|
|
1caa361e22 | ||
|
|
da20790238 | ||
|
|
f359dd0cd4 | ||
|
|
bee442a21f | ||
|
|
a66765e99b | ||
|
|
0db7f91eb4 | ||
|
|
850315dc20 | ||
|
|
d35e4bea01 | ||
|
|
356b623148 | ||
|
|
3a022e7a83 | ||
|
|
64945cec16 | ||
|
|
26741bdb53 | ||
|
|
7aa5e857ed | ||
|
|
2e277bf487 | ||
|
|
e4f46c6e14 | ||
|
|
e9d90644fd | ||
|
|
5a06f0dce9 | ||
|
|
08e9a58f2e | ||
|
|
e1f0c8e87c | ||
|
|
17a532f7b5 | ||
|
|
c7306dda12 | ||
|
|
00d311cd6c | ||
|
|
f8d2a7f449 | ||
|
|
a02a928996 | ||
|
|
eb661b7a24 | ||
|
|
6aea607f21 | ||
|
|
41e747dcc1 | ||
|
|
d3d02faa1c | ||
|
|
7a85a3c7f7 | ||
|
|
fceb2851ef | ||
|
|
2f118781ea | ||
|
|
b8e3a45a7e | ||
|
|
61312397e1 | ||
|
|
8ea4682aab | ||
|
|
3b6befdb97 | ||
|
|
613979ea3f | ||
|
|
191def686b | ||
|
|
f986e0dc78 | ||
|
|
08e75567d4 | ||
|
|
668199f1a8 | ||
|
|
7a753a56ec | ||
|
|
7b38b4e280 | ||
|
|
7dc2e2ca73 | ||
|
|
44eb23615a | ||
|
|
d47566f667 | ||
|
|
9ae84c8108 | ||
|
|
578c7aac35 | ||
|
|
1c460cc19c | ||
|
|
ff436aea93 | ||
|
|
aa333794f7 | ||
|
|
3d3593a1a9 | ||
|
|
257062e20c | ||
|
|
fa9d7afb46 | ||
|
|
ae5f351e1a | ||
|
|
257a88ec8e | ||
|
|
e1e6304a8a | ||
|
|
a81ef0017c | ||
|
|
b89162e086 | ||
|
|
a6630540a4 | ||
|
|
a528c5d54b | ||
|
|
690699ddf7 | ||
|
|
cd8d9c657e | ||
|
|
f732b80b92 | ||
|
|
ad8c12afa5 | ||
|
|
479fcb6c46 | ||
|
|
74874dfff2 | ||
|
|
ceb108a5fe | ||
|
|
235d8b7cf0 | ||
|
|
7c9df2d75a | ||
|
|
43bf75217f | ||
|
|
9bf6d478c5 | ||
|
|
e2baa93270 | ||
|
|
37fcda3817 | ||
|
|
457ae54341 | ||
|
|
4cc3c5ada9 | ||
|
|
07d5736d61 | ||
|
|
a7551a44e5 | ||
|
|
f4d3e13c7f | ||
|
|
47d82b3d35 | ||
|
|
9d06aff1d1 | ||
|
|
5ea8c978a0 | ||
|
|
6809c3a9f6 | ||
|
|
761108964e | ||
|
|
e3e74a84f2 | ||
|
|
1fee4e87c4 | ||
|
|
0c4c59375d | ||
|
|
09165daab8 | ||
|
|
3393b77535 | ||
|
|
d050bc02e2 | ||
|
|
af60ddf404 | ||
|
|
1bb92f63d1 | ||
|
|
a405ca39fa | ||
|
|
852b686d81 | ||
|
|
608d5d3c26 | ||
|
|
6038ebb705 | ||
|
|
4bb350d37d | ||
|
|
d01ac55db1 | ||
|
|
fcde5c3c18 | ||
|
|
dbf19e134f | ||
|
|
b13c5a3b8b | ||
|
|
b0c5a352c1 | ||
|
|
d0b3cd5f66 | ||
|
|
24efdec9ea | ||
|
|
1bed818a8e | ||
|
|
3c4c52567d | ||
|
|
87ae14d11c | ||
|
|
258d303e7f | ||
|
|
458350e1a8 | ||
|
|
fe7ee1e2c7 | ||
|
|
d8910a0097 | ||
|
|
3b6e683d37 | ||
|
|
90f6bad6ce | ||
|
|
fcc6802f86 | ||
|
|
3b9bc77ecc | ||
|
|
0fb4500fcc | ||
|
|
93ca00c7fe | ||
|
|
522f2a3f9f | ||
|
|
40ddf5f49c | ||
|
|
60356eacce | ||
|
|
158f3bf092 | ||
|
|
ebf3c65bed | ||
|
|
df6d1d72e2 | ||
|
|
72542322ca | ||
|
|
fea4f3f973 | ||
|
|
7878180f54 | ||
|
|
0669aa6bbd | ||
|
|
2c4924a602 | ||
|
|
bde86e0383 | ||
|
|
bab18275bc | ||
|
|
7e86681509 | ||
|
|
c2fc2df54c | ||
|
|
0deb77468f | ||
|
|
9bf1d3e0c6 | ||
|
|
3a12d28d20 | ||
|
|
e8ba4bdc6c | ||
|
|
b552973e00 | ||
|
|
ac98e1fd0f | ||
|
|
4246aac51b | ||
|
|
33f396bdae | ||
|
|
ff25cecd54 | ||
|
|
e88b258208 | ||
|
|
1cbf895e0e | ||
|
|
7dc1f5c445 | ||
|
|
439e049948 | ||
|
|
fbf26bef8d | ||
|
|
c1f550382c | ||
|
|
23fb6a5c02 | ||
|
|
d632266092 | ||
|
|
4ea3ab9538 | ||
|
|
725161ea6e | ||
|
|
fccd86f676 | ||
|
|
0f0a977ed9 | ||
|
|
7f9d0b59b8 | ||
|
|
b0d510167c | ||
|
|
4971933201 | ||
|
|
693a9b30ae | ||
|
|
76c285158a | ||
|
|
08517e3732 | ||
|
|
59530f4263 | ||
|
|
5d48fb41ba | ||
|
|
4acebe7f59 | ||
|
|
a44a7b7161 | ||
|
|
be13f0a066 | ||
|
|
98ce77c2b1 | ||
|
|
275a491cac | ||
|
|
1c868f85c4 | ||
|
|
b6deacf86d | ||
|
|
ebe5ef6535 | ||
|
|
294f91473c | ||
|
|
902f04efb4 | ||
|
|
ca2989c0e5 | ||
|
|
2d9697cd66 | ||
|
|
b4111a9f79 | ||
|
|
7f8212fdba | ||
|
|
7e1be8a3a4 | ||
|
|
05aad07bfc | ||
|
|
92a80f9a58 | ||
|
|
4b4ceb525a | ||
|
|
42ba9888d7 | ||
|
|
818f912a90 | ||
|
|
dae64b82ff | ||
|
|
53c6edcbdb | ||
|
|
723172bc1f | ||
|
|
323b5d6694 | ||
|
|
441cd3fc59 | ||
|
|
1d23d1b2e2 | ||
|
|
1dd81b6d49 | ||
|
|
741e825ab9 | ||
|
|
e41811fbd0 | ||
|
|
f111106a9f | ||
|
|
f9e29eaede | ||
|
|
e7a6172d7e | ||
|
|
ec8f9228e8 | ||
|
|
6c12e26632 | ||
|
|
9a6ac7bd20 | ||
|
|
5b3751da70 | ||
|
|
65127eb226 | ||
|
|
115e0a6fee | ||
|
|
ddfab44883 | ||
|
|
6eab390962 | ||
|
|
35388056d3 | ||
|
|
e2c5967191 | ||
|
|
7cdb967810 | ||
|
|
8900d52c33 | ||
|
|
bab72393e6 | ||
|
|
e059c25ebc | ||
|
|
c87ca8f5dc | ||
|
|
e01e3cdd43 | ||
|
|
2ab9ade761 | ||
|
|
0b35b8f6d6 | ||
|
|
9ff95f66dd | ||
|
|
c1523c4936 | ||
|
|
b6e31278a7 | ||
|
|
ca2b24f735 | ||
|
|
2b0bca8e55 | ||
|
|
98fe7e8700 | ||
|
|
0acc3cc537 | ||
|
|
8491ffde07 | ||
|
|
2ea3989497 | ||
|
|
e6f9592cde | ||
|
|
222d79bf53 | ||
|
|
19d9258717 | ||
|
|
b46456b78e | ||
|
|
cebc2ef09d | ||
|
|
c4ff8f6876 | ||
|
|
619022ef7f | ||
|
|
c0f3c5b3db | ||
|
|
860b8bf945 | ||
|
|
694db81b80 | ||
|
|
a895270bc8 | ||
|
|
7474b451ca | ||
|
|
e8eecc8bc1 | ||
|
|
28e33b413c | ||
|
|
78c58e61ea | ||
|
|
f3ecdf21bf | ||
|
|
ff656365d2 | ||
|
|
ea7c09bb00 | ||
|
|
e23f7cd3e7 | ||
|
|
c6bb32b862 | ||
|
|
0bde69b441 | ||
|
|
6fbafb74bd | ||
|
|
9572c1f663 | ||
|
|
0fedb0f2c5 | ||
|
|
33d3aef9f5 | ||
|
|
fb8ccedf66 | ||
|
|
efcf0accc1 | ||
|
|
f556d5c07d | ||
|
|
6c1f424c0b | ||
|
|
90970f97e8 | ||
|
|
d3137dc6b9 | ||
|
|
efaf53f2f7 | ||
|
|
beb7b89275 | ||
|
|
8c15fa1627 | ||
|
|
bc814c9be6 | ||
|
|
bac7ef71d8 | ||
|
|
dd199ea30f | ||
|
|
fc8acac1a5 | ||
|
|
fec269c3e7 | ||
|
|
8e366fd633 | ||
|
|
f7d54186dd | ||
|
|
ab92fb3910 | ||
|
|
6783e2e28b | ||
|
|
4e47d3f458 | ||
|
|
b265c7dcb7 | ||
|
|
f4fae89b8e | ||
|
|
45f0b4c85f | ||
|
|
7c80483f6e | ||
|
|
08ba4fdbee | ||
|
|
7085796601 | ||
|
|
091b5f73b1 | ||
|
|
0c079edc1a | ||
|
|
54cdfb89f6 | ||
|
|
f56514ed7d | ||
|
|
56697fde19 | ||
|
|
80525ee736 | ||
|
|
a43bdd9aad | ||
|
|
20360d0bb0 | ||
|
|
70d7513f84 | ||
|
|
12b7fd3ab4 | ||
|
|
c32b5b5429 | ||
|
|
ea2a748dba | ||
|
|
c1d7d8c55a | ||
|
|
a3c58d8445 | ||
|
|
cfc5c2aef6 | ||
|
|
313260a0c5 | ||
|
|
ee548aaf83 | ||
|
|
5eab57e500 | ||
|
|
6f48fdad42 | ||
|
|
98fb5109d7 | ||
|
|
9c2ead16cc | ||
|
|
c4293c6119 | ||
|
|
13c392d758 | ||
|
|
35f10518b2 | ||
|
|
03066c4674 | ||
|
|
e33a6892b3 | ||
|
|
87bb3f4a6b | ||
|
|
62bfaa4e45 | ||
|
|
9e94e605ee | ||
|
|
f8dc647b1f | ||
|
|
fc727d6909 | ||
|
|
c1d61dc624 | ||
|
|
0627ca2fc2 | ||
|
|
ce0b064972 | ||
|
|
2f3f04e4ca | ||
|
|
2e91f27336 | ||
|
|
10b1829830 | ||
|
|
4946f32d88 | ||
|
|
dc1363aaf5 | ||
|
|
a5067718d2 | ||
|
|
98505a9a3f | ||
|
|
e054fdb464 | ||
|
|
3c8ad18693 | ||
|
|
0a91f571c1 | ||
|
|
8db5284f6e | ||
|
|
22aa695508 | ||
|
|
a16f733622 | ||
|
|
af7d6c8cb5 | ||
|
|
693f455862 | ||
|
|
b0abd290a9 | ||
|
|
0a9686f584 | ||
|
|
0b11bf6266 | ||
|
|
d26056d272 | ||
|
|
724f9ec76f | ||
|
|
d583c79936 | ||
|
|
73b47716bc | ||
|
|
4eaef94454 | ||
|
|
21c9c7b8f4 | ||
|
|
108fb12612 | ||
|
|
eb8a030966 | ||
|
|
9235bb35a1 | ||
|
|
7b281abf0c | ||
|
|
b5fecd30cf | ||
|
|
26ff50f85c | ||
|
|
2eb1ba565f | ||
|
|
4dbb869952 | ||
|
|
f3041a8d7e | ||
|
|
4109cdec53 | ||
|
|
cdced887d1 | ||
|
|
77ca922f62 | ||
|
|
a08166f27d | ||
|
|
b9c56d1885 | ||
|
|
fcbacd473d | ||
|
|
06d77d9972 | ||
|
|
ee9c4f130e | ||
|
|
ada729087d | ||
|
|
aa47a72656 | ||
|
|
857ec70abb | ||
|
|
149f837223 | ||
|
|
37d6529ae0 | ||
|
|
8d3ae65e04 | ||
|
|
649e539ca6 | ||
|
|
45e90750a0 | ||
|
|
ce2a8917a6 | ||
|
|
b22cd2d27c | ||
|
|
813ef7d81a | ||
|
|
88275cd968 | ||
|
|
3a47563b27 | ||
|
|
ebb45a866b | ||
|
|
1433822437 | ||
|
|
4a5b416a0b | ||
|
|
cad4efdded | ||
|
|
f73a8d4d80 | ||
|
|
dac19d224f | ||
|
|
fa3e5eebe2 | ||
|
|
b64749c9d7 | ||
|
|
822165f168 | ||
|
|
2d16463fc6 | ||
|
|
3d8cbe9427 | ||
|
|
f18b64faaf | ||
|
|
95c7a7e9de | ||
|
|
ca152ab04c | ||
|
|
bf8bba7b84 | ||
|
|
3f2f699449 | ||
|
|
6b68a39cbe | ||
|
|
8867840215 | ||
|
|
1c516daa96 | ||
|
|
21c9388ee6 | ||
|
|
c72146587a | ||
|
|
0ba685d0e2 | ||
|
|
ce98f46331 | ||
|
|
d6aa672556 | ||
|
|
6d2761b155 | ||
|
|
127afe1582 | ||
|
|
a3d7f4e35d | ||
|
|
8eb163532d | ||
|
|
ea50023ca5 | ||
|
|
846aef1bd6 | ||
|
|
143f2eb1a8 | ||
|
|
3f8cb23cf6 | ||
|
|
f92709b03b | ||
|
|
81bb7c6534 | ||
|
|
bdd1074be7 | ||
|
|
42a63f8ea5 | ||
|
|
3c85db1769 | ||
|
|
930d5b3627 | ||
|
|
a1ec40b547 | ||
|
|
022f9ea76e | ||
|
|
2681903c93 | ||
|
|
403d10cc75 | ||
|
|
66e88cef42 | ||
|
|
8f9d1b99e2 | ||
|
|
4af2f6d84a | ||
|
|
78fa4feac6 | ||
|
|
5189f38766 | ||
|
|
243e888717 | ||
|
|
c5b81f2f4b | ||
|
|
caa14e0cad | ||
|
|
d411a9e1ff | ||
|
|
3fbc850774 | ||
|
|
d16febcae1 | ||
|
|
26bb6cc011 | ||
|
|
bc80ef9a80 | ||
|
|
9fad0876c5 | ||
|
|
914e635b4a | ||
|
|
85bb234cf9 | ||
|
|
f7675a5dea | ||
|
|
7b662055dd | ||
|
|
d78c6f1a74 | ||
|
|
9fa83d1cee | ||
|
|
6e780164ea | ||
|
|
2ca8da0710 | ||
|
|
c3deabae36 | ||
|
|
9cdbcb72ac | ||
|
|
bc86590411 | ||
|
|
cb167f3d74 | ||
|
|
8ddc99e91f | ||
|
|
dcc9af946a | ||
|
|
e4e3b199fc | ||
|
|
bf61c82cf2 | ||
|
|
c9ee28ce01 | ||
|
|
5135beb036 | ||
|
|
f36ef86ccc | ||
|
|
5e042bf4b8 | ||
|
|
130ce34686 | ||
|
|
591ef540a6 | ||
|
|
697f6ab538 | ||
|
|
ba5b5db2c4 | ||
|
|
e7afbab6a1 | ||
|
|
5298cb8cfb | ||
|
|
777bece2eb | ||
|
|
7daad1a52a | ||
|
|
60fd3c6bd3 | ||
|
|
dc1c82f347 | ||
|
|
c7a58816b6 | ||
|
|
48c3cb4816 | ||
|
|
6e7f2107cb | ||
|
|
101b835cf6 | ||
|
|
558a90aaf8 | ||
|
|
1d4161ba31 | ||
|
|
78d53af27c | ||
|
|
188b4424e4 | ||
|
|
0615d635eb | ||
|
|
85d7e75fb1 | ||
|
|
833559dde6 | ||
|
|
b294cee278 | ||
|
|
afe7ddb480 | ||
|
|
98526af82a | ||
|
|
0cb4562254 | ||
|
|
70f0ee719c | ||
|
|
63b120e9e2 | ||
|
|
d587120613 | ||
|
|
0dc4bc3cee | ||
|
|
79aad225a4 | ||
|
|
8cd2bc7c13 | ||
|
|
2a5198cae4 | ||
|
|
b8c463db82 | ||
|
|
059b016c62 | ||
|
|
f1429632d2 | ||
|
|
2d34208269 | ||
|
|
36c9054744 | ||
|
|
5e11efb0b9 | ||
|
|
703988b376 | ||
|
|
fefd2677fb | ||
|
|
a323aeb8fa | ||
|
|
8d6b0e23ce | ||
|
|
edac99e5a9 | ||
|
|
dd14235e31 | ||
|
|
15eadd4f89 | ||
|
|
09fbb045a1 | ||
|
|
7bdd0d3bf1 | ||
|
|
ebea9a7198 | ||
|
|
ad9441bb60 | ||
|
|
989f9dce42 | ||
|
|
b95437347e | ||
|
|
2d27e0d9a9 | ||
|
|
c3c078e5be | ||
|
|
dd8eb29a18 | ||
|
|
2d5591a87f | ||
|
|
71b079eb54 | ||
|
|
ca6f3807fc | ||
|
|
c2f6a6983d | ||
|
|
3891ca2929 | ||
|
|
20437ef2c7 | ||
|
|
7de02d541f | ||
|
|
68e4c5e469 | ||
|
|
62187807f0 | ||
|
|
37f4caf536 | ||
|
|
fca1c6e957 | ||
|
|
0de7e71fa0 | ||
|
|
fd5d540c78 | ||
|
|
d2069dc5f2 | ||
|
|
2ac832678f | ||
|
|
5941332d49 | ||
|
|
45732bd87a | ||
|
|
f7600af89b | ||
|
|
5108121b59 | ||
|
|
c2339c84e7 | ||
|
|
7205c5cb7b | ||
|
|
ff807c9a6f | ||
|
|
0341eb5d8f | ||
|
|
a2e1b1de3a | ||
|
|
e64059bd7b | ||
|
|
46b1de97f5 | ||
|
|
ca7d2c6d64 | ||
|
|
12d4d4a4f7 | ||
|
|
7c92054f13 | ||
|
|
1bef1d5652 | ||
|
|
89a02383b8 | ||
|
|
7fba904f75 | ||
|
|
1c7741fdbe | ||
|
|
4c90a0ed7e | ||
|
|
a82b174826 | ||
|
|
579ff8c0b4 | ||
|
|
264080546c | ||
|
|
a0c65e2333 | ||
|
|
dd73ad544c | ||
|
|
33db9023eb | ||
|
|
88eea03f97 | ||
|
|
a959ec1eb1 | ||
|
|
3e138cbc6d | ||
|
|
9b61723194 | ||
|
|
d2381b0209 | ||
|
|
4972f69dd6 | ||
|
|
56eb220ed6 | ||
|
|
343c47d67a | ||
|
|
e53f2217ec | ||
|
|
016a5a5914 | ||
|
|
9f2adfb67a | ||
|
|
6e92e7283d | ||
|
|
e3c16147ce | ||
|
|
14aa9805b4 | ||
|
|
fdab17a3b9 | ||
|
|
bebba7d280 | ||
|
|
11b2b2a893 | ||
|
|
84141082ab | ||
|
|
ba29b5e036 | ||
|
|
e22421ec99 | ||
|
|
416b38fc71 | ||
|
|
fd5fcfeaae | ||
|
|
75ff268ecc | ||
|
|
9f98b8ad2f | ||
|
|
316035910f | ||
|
|
d1d09d4aab | ||
|
|
31365b266a | ||
|
|
2f34e7eeed | ||
|
|
3aff3ac7e4 | ||
|
|
d1a185aaae | ||
|
|
ff10432124 | ||
|
|
bb5b805983 | ||
|
|
58ae3479dc | ||
|
|
d55e007032 | ||
|
|
2af43d62eb | ||
|
|
5c527b2c48 | ||
|
|
e6165f0046 | ||
|
|
70427bc676 | ||
|
|
9ec7cbef8e | ||
|
|
719d841353 | ||
|
|
fa6af06204 | ||
|
|
cba719b3a0 | ||
|
|
4241bb08b8 | ||
|
|
901242f7e9 | ||
|
|
4c74e7f308 | ||
|
|
db48c15f1d | ||
|
|
a1b34e7a88 | ||
|
|
fc6b3726a4 | ||
|
|
9c9bcac61b | ||
|
|
588da4d7dc | ||
|
|
e42db3cd2d | ||
|
|
e8cc88174f | ||
|
|
7b7111e12c | ||
|
|
b3f2c60065 | ||
|
|
20e896cacf | ||
|
|
afbf7de9e3 | ||
|
|
4ff85ab0c4 | ||
|
|
dd7388e577 | ||
|
|
77f13961ad | ||
|
|
e00fe0a732 | ||
|
|
c757d21360 | ||
|
|
3a134cc706 | ||
|
|
7aede4d058 | ||
|
|
5983eae3a8 | ||
|
|
9d6dca9c64 | ||
|
|
7b68c1bc9b | ||
|
|
9d905368ca | ||
|
|
867613669d | ||
|
|
fd1de624c8 | ||
|
|
2a2247e1da | ||
|
|
7a59bee315 | ||
|
|
91c8a7c65b | ||
|
|
73a0b31380 | ||
|
|
ef00695b07 | ||
|
|
bfaffbc87e | ||
|
|
e800d62df4 | ||
|
|
6fe765434e | ||
|
|
7e48740ea7 | ||
|
|
d25a439bd4 | ||
|
|
ed8c85df2b | ||
|
|
c4ae8c3418 | ||
|
|
f87dce8ec1 | ||
|
|
5d2f1c8e11 | ||
|
|
1aa2852ed6 | ||
|
|
a42a406f53 | ||
|
|
47b56e78b3 | ||
|
|
52db7b32ef | ||
|
|
3aad5a30e9 | ||
|
|
b8a10f2e86 | ||
|
|
4e8dc0e3b9 | ||
|
|
edf60f80f7 | ||
|
|
a94c598d00 | ||
|
|
68abaa5e3c | ||
|
|
63b31de2b8 | ||
|
|
eac5c604bd | ||
|
|
e7d8df499c | ||
|
|
35845440c6 | ||
|
|
18926009d3 | ||
|
|
d55a9e6274 | ||
|
|
ba011581ef | ||
|
|
1788ceccea | ||
|
|
ada8255af0 | ||
|
|
f1a6f66d49 | ||
|
|
423793ecf9 | ||
|
|
94cfa3c9d0 | ||
|
|
0134ceef16 | ||
|
|
b23ce7462e | ||
|
|
cf3dda6869 | ||
|
|
dc8520df42 | ||
|
|
d9c5976ed0 | ||
|
|
aeea5701e4 | ||
|
|
7263e35a89 | ||
|
|
4d991d3773 | ||
|
|
bfcde15a24 | ||
|
|
ee675546ac | ||
|
|
b43e6c5d6b | ||
|
|
c531ef0773 | ||
|
|
a6a4c03029 | ||
|
|
b525cfc787 | ||
|
|
842aa97f7e | ||
|
|
34d4eedf67 | ||
|
|
4a109d6af1 | ||
|
|
cb40a76247 | ||
|
|
ed249600d3 | ||
|
|
0187c9d6df | ||
|
|
6da37966d9 | ||
|
|
525d4325c7 | ||
|
|
ecf7e25a51 | ||
|
|
ec2f8fe6c8 | ||
|
|
dfaf40f583 | ||
|
|
543154f037 | ||
|
|
cd3e355f84 | ||
|
|
2eee6b45bc | ||
|
|
0de5c6f204 | ||
|
|
9363fc153c | ||
|
|
2aacd5b9b6 | ||
|
|
c3b2e1e8b2 | ||
|
|
e261c197f3 | ||
|
|
747dc77c92 | ||
|
|
35cc7b27e9 | ||
|
|
67828a86c1 | ||
|
|
58ec31d6c7 | ||
|
|
6da0b57ce1 | ||
|
|
8d9d5a267a | ||
|
|
94af55a951 | ||
|
|
192cec1825 | ||
|
|
1e564c2140 | ||
|
|
7e008378ba | ||
|
|
dbc4ffd69a | ||
|
|
5a1e8d9fe9 | ||
|
|
5e5d30a377 | ||
|
|
3bc0def02a | ||
|
|
bd301880ad | ||
|
|
2deb703272 | ||
|
|
8c6489a49a | ||
|
|
87609ba5d1 | ||
|
|
ba3a51387c | ||
|
|
ffd5bfc480 | ||
|
|
a4226cc39a | ||
|
|
dcb89b704a | ||
|
|
686c7c5a6c | ||
|
|
409eea677d | ||
|
|
99d41d1606 | ||
|
|
915b7aa2df | ||
|
|
e2d5102a0e | ||
|
|
e5a41b60ef | ||
|
|
0572ea4095 | ||
|
|
71032150c5 | ||
|
|
36d13dd414 | ||
|
|
946e369a44 | ||
|
|
18922ed6f5 | ||
|
|
c1dd4dafe4 | ||
|
|
fe3aec173f | ||
|
|
c5d0286e24 | ||
|
|
7aed01658f | ||
|
|
de4fde4ee3 | ||
|
|
3450219bc7 | ||
|
|
b440f73336 | ||
|
|
6af126b872 | ||
|
|
ac42cba50b | ||
|
|
5d263f63cb | ||
|
|
f445186f1e | ||
|
|
25e2edc6d2 | ||
|
|
bdd53ed5e3 | ||
|
|
c207504657 | ||
|
|
fe155222c2 | ||
|
|
9b4325662b | ||
|
|
0de1c9a669 | ||
|
|
ef32bff302 | ||
|
|
e50002e0ca | ||
|
|
dbd5ef70c9 | ||
|
|
ce9554281e | ||
|
|
4e1fba5b38 | ||
|
|
3f238f7a4a | ||
|
|
b89091cc7d | ||
|
|
d001597e52 | ||
|
|
4c7cee4ebc | ||
|
|
6eed730209 | ||
|
|
992b76a0f0 | ||
|
|
2bcd51b21c | ||
|
|
3625453668 | ||
|
|
5821a122cc | ||
|
|
891e414cb6 | ||
|
|
54f9e3ff9d | ||
|
|
1c0cc15fdb | ||
|
|
231e07dbbd | ||
|
|
3859f6464a | ||
|
|
71a74a6656 | ||
|
|
3668d1aadf | ||
|
|
d3af06e7a4 | ||
|
|
74f2a61b25 | ||
|
|
68a667ee7c | ||
|
|
192b5db25a | ||
|
|
9ced391c11 | ||
|
|
807b525c79 | ||
|
|
7bd04deae7 | ||
|
|
c379822bf0 | ||
|
|
ad67167e97 | ||
|
|
4012a8276c | ||
|
|
efc028d0a5 | ||
|
|
01a121e029 | ||
|
|
f793450d97 | ||
|
|
fec868432f | ||
|
|
d3b08beb53 | ||
|
|
a75d4841d0 | ||
|
|
8b3730748b | ||
|
|
de5552c91a | ||
|
|
a7e6dec51d | ||
|
|
26335a9b42 | ||
|
|
f8dd6890b2 | ||
|
|
1c103f92f2 | ||
|
|
e3ce683970 | ||
|
|
9eb63b17f9 | ||
|
|
755370eff0 | ||
|
|
407ad51244 | ||
|
|
293fb0a76d | ||
|
|
2e228c8355 | ||
|
|
009f7617c1 | ||
|
|
b39c8c1f1f | ||
|
|
7b29c6427b | ||
|
|
d0e084b8ea | ||
|
|
46223e0b30 | ||
|
|
5d3b147b42 | ||
|
|
6474c296e1 | ||
|
|
b8ad80ae35 | ||
|
|
78240b4b52 | ||
|
|
e7c716ede4 | ||
|
|
fb986b5cff | ||
|
|
a49f2e2d98 | ||
|
|
90b3462ead | ||
|
|
da528e802f | ||
|
|
23b4327c28 | ||
|
|
1de768c182 | ||
|
|
9f6dfa4d2e | ||
|
|
96c20ea3cf | ||
|
|
728d1f7540 | ||
|
|
ee92ba20b0 | ||
|
|
1b749cf004 | ||
|
|
37929dbd7d | ||
|
|
865ba912f8 | ||
|
|
9dbb9f519b | ||
|
|
20188549f7 | ||
|
|
925be17d51 | ||
|
|
0ea4c99102 | ||
|
|
db98b7ed27 | ||
|
|
44de611097 | ||
|
|
a5ee8fb59d | ||
|
|
e532804474 | ||
|
|
ce24781446 | ||
|
|
c867d6648a | ||
|
|
8ae5ae7e57 | ||
|
|
6a639edb05 | ||
|
|
a1a79719fc | ||
|
|
c5f99b012e | ||
|
|
fcd1bea4a3 | ||
|
|
0622c77a7f | ||
|
|
8aaf3e1052 | ||
|
|
3dcaa1f6fb | ||
|
|
2d91e509fa | ||
|
|
a0f1839162 | ||
|
|
e2f52765e4 | ||
|
|
f186a3dde9 | ||
|
|
10c4a7fd98 | ||
|
|
9b065155f4 | ||
|
|
12306368cf | ||
|
|
ffa9001df4 | ||
|
|
e113e75f4d | ||
|
|
9066959945 | ||
|
|
6768e8ddf6 | ||
|
|
a489b369d7 | ||
|
|
074fe46e90 | ||
|
|
f56244d708 | ||
|
|
cedcf05751 | ||
|
|
f04ed94627 | ||
|
|
296c1c5a3c | ||
|
|
d4e8ea8e72 | ||
|
|
619402cc67 | ||
|
|
b01bfda862 | ||
|
|
da19df5174 | ||
|
|
19dd9b97d2 | ||
|
|
21b92ac077 | ||
|
|
b80dd1ef3e | ||
|
|
d6b9154a88 | ||
|
|
f9573f7972 | ||
|
|
038cadeae8 | ||
|
|
e32ca284c5 | ||
|
|
a56426010d | ||
|
|
dda07af4d4 | ||
|
|
81bfb202f7 | ||
|
|
b6561fd8e2 | ||
|
|
d475e50bef | ||
|
|
689a01423f | ||
|
|
888d637b67 | ||
|
|
e7660d68cb | ||
|
|
450a01784b | ||
|
|
5d8cb511be | ||
|
|
44ad8ce888 | ||
|
|
14572d9eab | ||
|
|
76d735ff43 | ||
|
|
02b621bd2c | ||
|
|
96eab86bc6 | ||
|
|
93ee96b1cd | ||
|
|
907dbe6388 | ||
|
|
f8e01d5d53 | ||
|
|
454b541a2e | ||
|
|
2b9b22cd90 | ||
|
|
5584c4f1ae | ||
|
|
9830f661c8 | ||
|
|
7a21c44727 | ||
|
|
4c55e5a6cc | ||
|
|
f0012015e6 | ||
|
|
14557983e1 | ||
|
|
865e5cb120 | ||
|
|
d9cb018a7d | ||
|
|
8dd9564171 | ||
|
|
77533f7873 | ||
|
|
a6b2eefee1 | ||
|
|
4cea08c080 | ||
|
|
d56e66917a | ||
|
|
28982e0e0b | ||
|
|
1fbf77d090 | ||
|
|
6c8a2e68d9 | ||
|
|
f5ddb084b6 | ||
|
|
21077ef26e | ||
|
|
5cedf98f55 | ||
|
|
a7247e9812 | ||
|
|
c9298137b5 | ||
|
|
17c95723ec | ||
|
|
1f654d4444 | ||
|
|
0a01d7b041 | ||
|
|
b53017ee87 | ||
|
|
af86a9dac0 | ||
|
|
d792c65ce3 | ||
|
|
8eef574342 | ||
|
|
2d0594398c | ||
|
|
115764ae38 | ||
|
|
5cda35db0a | ||
|
|
4f3b3a787c | ||
|
|
8e55e0b994 | ||
|
|
30c6d4756a | ||
|
|
d1150f150f | ||
|
|
e0f4abaa09 | ||
|
|
889e624a8c | ||
|
|
cd0ab5c709 | ||
|
|
d75fafb19c | ||
|
|
11c3f14b42 | ||
|
|
53528d486c | ||
|
|
3a8aea0de6 | ||
|
|
a3e11f017b | ||
|
|
c4da576030 | ||
|
|
465253a769 | ||
|
|
3b74d987c1 | ||
|
|
3385ba2ca2 | ||
|
|
6dba2879c5 | ||
|
|
8fc1656939 | ||
|
|
75012cdcba | ||
|
|
c1e4c4cb30 | ||
|
|
a3a0af64ce | ||
|
|
1f9e5ca3cc | ||
|
|
7409f15752 |
39
CONTRIBUTING.md
Normal file
39
CONTRIBUTING.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Contributing to Security Onion
|
||||
|
||||
### Questions, suggestions, and general comments
|
||||
* Security Onion uses GitHub's [Discussions](https://github.com/Security-Onion-Solutions/securityonion/discussions) to provide a forum where the community and developers can interact as well as ask and answer questions.
|
||||
|
||||
### Reporting a bug
|
||||
* The primary place to report unexpected behavior or possible bugs is the repo's [Discussions forum](https://github.com/Security-Onion-Solutions/securityonion/discussions).
|
||||
|
||||
* **If you are familiar with the current version of Security Onion and are confident you've discovered a bug**, first ensure there is not already an issue present by searching the open [issues](https://github.com/Security-Onion-Solutions/securityonion/issues). If there is, a thumbs up :+1: is a great way to show this bug is affecting you too.
|
||||
|
||||
* If an issue doesn't exist, [open a new one](https://github.com/Security-Onion-Solutions/securityonion/issues/new), following the directions in the issue template. This means including:
|
||||
* **System information** and how Security Onion was installed
|
||||
* **Log files** relevant to the bug report
|
||||
* **Reproduction steps**
|
||||
|
||||
### Contributing code
|
||||
|
||||
* **All commits must be signed** with a valid key that has been added to your GitHub account. The commits should have all the "**Verified**" tag when viewed on GitHub as shown below:
|
||||
|
||||
<img src="./assets/images/verified-commit-1.png" width="450">
|
||||
|
||||
* If an issue does not already exist for the bug or feature for which you are submitting a pull request, [create one](https://github.com/Security-Onion-Solutions/securityonion/issues/new) with the relevant prefix. (**`FIX:`** for bug fixes, **`FEATURE:`** for new features.)
|
||||
|
||||
* Link the PR to the related issue, either using [keywords](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) in the PR description, or [manually](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-issues/linking-a-pull-request-to-an-issue#manually-linking-a-pull-request-to-an-issue).
|
||||
|
||||
* **Pull requests should be opened against the `dev` branch of this repo**, and should clearly describe the problem and solution.
|
||||
|
||||
* Be sure you have tested your changes and are confident they will not break other parts of the product.
|
||||
|
||||
* See this document's [code styling and conventions section](#code-style-and-conventions) below to be sure your PR fits our code requirements prior to submitting.
|
||||
|
||||
|
||||
|
||||
### Code style and conventions
|
||||
* **Keep code [DRY](https://en.wikipedia.org/wiki/Don%27t_repeat_yourself)**. For example, Bash code used by multiple scripts will likely best be added to <span style="white-space: nowrap;">[`so-common`](salt/common/tools/sbin/so-common)</span>.
|
||||
|
||||
* All new Bash code should pass [ShellCheck](https://www.shellcheck.net/) analysis. Where errors can be *safely* [ignored](https://github.com/koalaman/shellcheck/wiki/Ignore), the relevant disable directive should be accompanied by a brief explanation as to why the error is being ignored.
|
||||
|
||||
* **Ensure all YAML (this includes Salt states and pillars) is properly formatted**. The spec for YAML v1.2 can be found [here](https://yaml.org/spec/1.2/spec.html), however there are numerous online resources with simpler descriptions of its formatting rules.
|
||||
@@ -1,14 +1,14 @@
|
||||
## Security Onion 2.3.50
|
||||
## Security Onion 2.3.80
|
||||
|
||||
Security Onion 2.3.50 is here!
|
||||
Security Onion 2.3.80 is here!
|
||||
|
||||
## Screenshots
|
||||
|
||||
Alerts
|
||||

|
||||

|
||||
|
||||
Hunt
|
||||

|
||||

|
||||
|
||||
### Release Notes
|
||||
|
||||
|
||||
21
SECURITY.md
Normal file
21
SECURITY.md
Normal file
@@ -0,0 +1,21 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 2.x.x | :white_check_mark: |
|
||||
| 16.04.x | :x: |
|
||||
|
||||
Security Onion 16.04 has reached End Of Life and is no longer supported.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you have any security concerns regarding Security Onion or believe you have uncovered a vulnerability, please follow these steps:
|
||||
|
||||
- send an email to security@securityonion.net
|
||||
- include a description of the issue and steps to reproduce
|
||||
- please use plain text format (no Word documents or PDF files)
|
||||
- please do not disclose publicly until we have had sufficient time to resolve the issue
|
||||
|
||||
This security address should be used only for undisclosed vulnerabilities. Dealing with fixed issues or general questions on how to use Security Onion should be handled via the normal support channels.
|
||||
@@ -1,17 +1,18 @@
|
||||
### 2.3.50 ISO image built on 2021/04/27
|
||||
### 2.3.80 ISO image built on 2021/09/27
|
||||
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.3.50 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.50.iso
|
||||
2.3.80 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.80.iso
|
||||
|
||||
MD5: C39CEA68B5A8AFC5CFFB2481797C0374
|
||||
SHA1: 00AD9F29ABE3AB495136989E62EBB8FA00DA82C6
|
||||
SHA256: D77AE370D7863837A989F6735413D1DD46B866D8D135A4C363B0633E3990387E
|
||||
MD5: 24F38563860416F4A8ABE18746913E14
|
||||
SHA1: F923C005F54EA2A17AB225ADA0DA46042707AAD9
|
||||
SHA256: 8E95D10AF664D9A406C168EC421D943CB23F0D0C1813C6C2DBA9B4E131984018
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.50.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.80.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
||||
@@ -25,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.50.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.80.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.50.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.80.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.3.50.iso.sig securityonion-2.3.50.iso
|
||||
gpg --verify securityonion-2.3.80.iso.sig securityonion-2.3.80.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Tue 27 Apr 2021 02:17:25 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Mon 27 Sep 2021 08:55:01 AM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
|
Before Width: | Height: | Size: 245 KiB After Width: | Height: | Size: 245 KiB |
|
Before Width: | Height: | Size: 168 KiB After Width: | Height: | Size: 168 KiB |
BIN
assets/images/verified-commit-1.png
Normal file
BIN
assets/images/verified-commit-1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 24 KiB |
@@ -67,3 +67,7 @@ peer:
|
||||
reactor:
|
||||
- 'so/fleet':
|
||||
- salt://reactor/fleet.sls
|
||||
- 'salt/beacon/*/watch_sqlite_db//opt/so/conf/kratos/db/sqlite.db':
|
||||
- salt://reactor/kratos.sls
|
||||
|
||||
|
||||
|
||||
@@ -1,208 +0,0 @@
|
||||
{%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%}
|
||||
{%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%}
|
||||
{% set WAZUH = salt['pillar.get']('manager:wazuh', '0') %}
|
||||
{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %}
|
||||
{% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %}
|
||||
{% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %}
|
||||
{% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %}
|
||||
{% set ZEEKVER = salt['pillar.get']('global:mdengine', 'COMMUNITY') %}
|
||||
{% set GRAFANA = salt['pillar.get']('manager:grafana', '0') %}
|
||||
|
||||
eval:
|
||||
containers:
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
{% if GRAFANA == '1' %}
|
||||
- so-influxdb
|
||||
- so-grafana
|
||||
{% endif %}
|
||||
- so-dockerregistry
|
||||
- so-soc
|
||||
- so-kratos
|
||||
- so-idstools
|
||||
{% if FLEETMANAGER %}
|
||||
- so-mysql
|
||||
- so-fleet
|
||||
- so-redis
|
||||
{% endif %}
|
||||
- so-elasticsearch
|
||||
- so-logstash
|
||||
- so-kibana
|
||||
- so-steno
|
||||
- so-suricata
|
||||
- so-zeek
|
||||
- so-curator
|
||||
- so-elastalert
|
||||
{% if WAZUH != '0' %}
|
||||
- so-wazuh
|
||||
{% endif %}
|
||||
- so-soctopus
|
||||
{% if THEHIVE != '0' %}
|
||||
- so-thehive
|
||||
- so-thehive-es
|
||||
- so-cortex
|
||||
{% endif %}
|
||||
{% if PLAYBOOK != '0' %}
|
||||
- so-playbook
|
||||
{% endif %}
|
||||
{% if FREQSERVER != '0' %}
|
||||
- so-freqserver
|
||||
{% endif %}
|
||||
{% if DOMAINSTATS != '0' %}
|
||||
- so-domainstats
|
||||
{% endif %}
|
||||
heavy_node:
|
||||
containers:
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
- so-redis
|
||||
- so-logstash
|
||||
- so-elasticsearch
|
||||
- so-curator
|
||||
- so-steno
|
||||
- so-suricata
|
||||
- so-wazuh
|
||||
- so-filebeat
|
||||
{% if ZEEKVER != 'SURICATA' %}
|
||||
- so-zeek
|
||||
{% endif %}
|
||||
helix:
|
||||
containers:
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
- so-idstools
|
||||
- so-steno
|
||||
- so-zeek
|
||||
- so-redis
|
||||
- so-logstash
|
||||
- so-filebeat
|
||||
hot_node:
|
||||
containers:
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
- so-logstash
|
||||
- so-elasticsearch
|
||||
- so-curator
|
||||
manager_search:
|
||||
containers:
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
- so-soc
|
||||
- so-kratos
|
||||
- so-acng
|
||||
- so-idstools
|
||||
- so-redis
|
||||
- so-logstash
|
||||
- so-elasticsearch
|
||||
- so-curator
|
||||
- so-kibana
|
||||
- so-elastalert
|
||||
- so-filebeat
|
||||
- so-soctopus
|
||||
{% if FLEETMANAGER %}
|
||||
- so-mysql
|
||||
- so-fleet
|
||||
- so-redis
|
||||
{% endif %}
|
||||
{% if WAZUH != '0' %}
|
||||
- so-wazuh
|
||||
{% endif %}
|
||||
- so-soctopus
|
||||
{% if THEHIVE != '0' %}
|
||||
- so-thehive
|
||||
- so-thehive-es
|
||||
- so-cortex
|
||||
{% endif %}
|
||||
{% if PLAYBOOK != '0' %}
|
||||
- so-playbook
|
||||
{% endif %}
|
||||
{% if FREQSERVER != '0' %}
|
||||
- so-freqserver
|
||||
{% endif %}
|
||||
{% if DOMAINSTATS != '0' %}
|
||||
- so-domainstats
|
||||
{% endif %}
|
||||
manager:
|
||||
containers:
|
||||
- so-dockerregistry
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
{% if GRAFANA == '1' %}
|
||||
- so-influxdb
|
||||
- so-grafana
|
||||
{% endif %}
|
||||
- so-soc
|
||||
- so-kratos
|
||||
- so-acng
|
||||
- so-idstools
|
||||
- so-redis
|
||||
- so-elasticsearch
|
||||
- so-logstash
|
||||
- so-kibana
|
||||
- so-elastalert
|
||||
- so-filebeat
|
||||
{% if FLEETMANAGER %}
|
||||
- so-mysql
|
||||
- so-fleet
|
||||
- so-redis
|
||||
{% endif %}
|
||||
{% if WAZUH != '0' %}
|
||||
- so-wazuh
|
||||
{% endif %}
|
||||
- so-soctopus
|
||||
{% if THEHIVE != '0' %}
|
||||
- so-thehive
|
||||
- so-thehive-es
|
||||
- so-cortex
|
||||
{% endif %}
|
||||
{% if PLAYBOOK != '0' %}
|
||||
- so-playbook
|
||||
{% endif %}
|
||||
{% if FREQSERVER != '0' %}
|
||||
- so-freqserver
|
||||
{% endif %}
|
||||
{% if DOMAINSTATS != '0' %}
|
||||
- so-domainstats
|
||||
{% endif %}
|
||||
parser_node:
|
||||
containers:
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
- so-logstash
|
||||
search_node:
|
||||
containers:
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
- so-logstash
|
||||
- so-elasticsearch
|
||||
- so-curator
|
||||
- so-filebeat
|
||||
{% if WAZUH != '0' %}
|
||||
- so-wazuh
|
||||
{% endif %}
|
||||
sensor:
|
||||
containers:
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
- so-steno
|
||||
- so-suricata
|
||||
{% if ZEEKVER != 'SURICATA' %}
|
||||
- so-zeek
|
||||
{% endif %}
|
||||
- so-wazuh
|
||||
- so-filebeat
|
||||
warm_node:
|
||||
containers:
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
- so-elasticsearch
|
||||
fleet:
|
||||
containers:
|
||||
{% if FLEETNODE %}
|
||||
- so-mysql
|
||||
- so-fleet
|
||||
- so-redis
|
||||
- so-filebeat
|
||||
- so-nginx
|
||||
- so-telegraf
|
||||
{% endif %}
|
||||
@@ -1,7 +1,7 @@
|
||||
elasticsearch:
|
||||
templates:
|
||||
- so/so-beats-template.json.jinja
|
||||
- so/so-common-template.json
|
||||
- so/so-common-template.json.jinja
|
||||
- so/so-firewall-template.json.jinja
|
||||
- so/so-flow-template.json.jinja
|
||||
- so/so-ids-template.json.jinja
|
||||
@@ -10,4 +10,4 @@ elasticsearch:
|
||||
- so/so-ossec-template.json.jinja
|
||||
- so/so-strelka-template.json.jinja
|
||||
- so/so-syslog-template.json.jinja
|
||||
- so/so-zeek-template.json.jinja
|
||||
- so/so-zeek-template.json.jinja
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
elasticsearch:
|
||||
templates:
|
||||
- so/so-beats-template.json.jinja
|
||||
- so/so-common-template.json
|
||||
- so/so-common-template.json.jinja
|
||||
- so/so-firewall-template.json.jinja
|
||||
- so/so-flow-template.json.jinja
|
||||
- so/so-ids-template.json.jinja
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
elasticsearch:
|
||||
templates:
|
||||
- so/so-beats-template.json.jinja
|
||||
- so/so-common-template.json
|
||||
- so/so-common-template.json.jinja
|
||||
- so/so-firewall-template.json.jinja
|
||||
- so/so-flow-template.json.jinja
|
||||
- so/so-ids-template.json.jinja
|
||||
|
||||
@@ -7,8 +7,10 @@ logstash:
|
||||
- so/9000_output_zeek.conf.jinja
|
||||
- so/9002_output_import.conf.jinja
|
||||
- so/9034_output_syslog.conf.jinja
|
||||
- so/9050_output_filebeatmodules.conf.jinja
|
||||
- so/9100_output_osquery.conf.jinja
|
||||
- so/9400_output_suricata.conf.jinja
|
||||
- so/9500_output_beats.conf.jinja
|
||||
- so/9600_output_ossec.conf.jinja
|
||||
- so/9700_output_strelka.conf.jinja
|
||||
- so/9800_output_logscan.conf.jinja
|
||||
|
||||
@@ -22,6 +22,9 @@ base:
|
||||
'*_manager or *_managersearch':
|
||||
- match: compound
|
||||
- data.*
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- secrets
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
@@ -38,6 +41,9 @@ base:
|
||||
- secrets
|
||||
- healthcheck.eval
|
||||
- elasticsearch.eval
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
@@ -46,6 +52,9 @@ base:
|
||||
- logstash.manager
|
||||
- logstash.search
|
||||
- elasticsearch.search
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- data.*
|
||||
- zeeklogs
|
||||
- secrets
|
||||
@@ -59,6 +68,7 @@ base:
|
||||
|
||||
'*_heavynode':
|
||||
- zeeklogs
|
||||
- elasticsearch.auth
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
@@ -80,6 +90,7 @@ base:
|
||||
- logstash
|
||||
- logstash.search
|
||||
- elasticsearch.search
|
||||
- elasticsearch.auth
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
- data.nodestab
|
||||
@@ -88,5 +99,8 @@ base:
|
||||
- zeeklogs
|
||||
- secrets
|
||||
- elasticsearch.eval
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
@@ -52,5 +52,4 @@ zeek:
|
||||
- frameworks/signatures/detect-windows-shells
|
||||
redef:
|
||||
- LogAscii::use_json = T;
|
||||
- LogAscii::json_timestamps = JSON::TS_ISO8601;
|
||||
- CaptureLoss::watch_interval = 5 mins;
|
||||
- CaptureLoss::watch_interval = 5 mins;
|
||||
|
||||
@@ -45,7 +45,8 @@
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'learn'
|
||||
],
|
||||
'so-heavynode': [
|
||||
'ca',
|
||||
@@ -108,7 +109,8 @@
|
||||
'zeek',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'learn'
|
||||
],
|
||||
'so-manager': [
|
||||
'salt.master',
|
||||
@@ -127,7 +129,8 @@
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'learn'
|
||||
],
|
||||
'so-managersearch': [
|
||||
'salt.master',
|
||||
@@ -146,7 +149,8 @@
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'learn'
|
||||
],
|
||||
'so-node': [
|
||||
'ca',
|
||||
@@ -178,7 +182,8 @@
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'learn'
|
||||
],
|
||||
'so-sensor': [
|
||||
'ca',
|
||||
@@ -237,7 +242,7 @@
|
||||
{% do allowed_states.append('kibana') %}
|
||||
{% endif %}
|
||||
|
||||
{% if CURATOR and grains.role in ['so-eval', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode'] %}
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||
{% do allowed_states.append('curator') %}
|
||||
{% endif %}
|
||||
|
||||
@@ -296,4 +301,4 @@
|
||||
{% endif %}
|
||||
|
||||
{# all nodes can always run salt.minion state #}
|
||||
{% do allowed_states.append('salt.minion') %}
|
||||
{% do allowed_states.append('salt.minion') %}
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
/opt/so/log/salt/so-salt-minion-check
|
||||
/opt/so/log/salt/minion
|
||||
/opt/so/log/salt/master
|
||||
/opt/so/log/logscan/*.log
|
||||
{
|
||||
{{ logrotate_conf | indent(width=4) }}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
{% if sls in allowed_states %}
|
||||
|
||||
{% set role = grains.id.split('_') | last %}
|
||||
{% from 'elasticsearch/auth.map.jinja' import ELASTICAUTH with context %}
|
||||
|
||||
# Remove variables.txt from /tmp - This is temp
|
||||
rmvariablesfile:
|
||||
@@ -95,7 +96,6 @@ commonpkgs:
|
||||
- netcat
|
||||
- python3-mysqldb
|
||||
- sqlite3
|
||||
- argon2
|
||||
- libssl-dev
|
||||
- python3-dateutil
|
||||
- python3-m2crypto
|
||||
@@ -128,7 +128,6 @@ commonpkgs:
|
||||
- net-tools
|
||||
- curl
|
||||
- sqlite
|
||||
- argon2
|
||||
- mariadb-devel
|
||||
- nmap-ncat
|
||||
- python3
|
||||
@@ -169,6 +168,14 @@ alwaysupdated:
|
||||
Etc/UTC:
|
||||
timezone.system
|
||||
|
||||
elastic_curl_config:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/curl.config
|
||||
- source: salt://elasticsearch/curl.config
|
||||
- mode: 600
|
||||
- show_changes: False
|
||||
- makedirs: True
|
||||
|
||||
# Sync some Utilities
|
||||
utilsyncscripts:
|
||||
file.recurse:
|
||||
@@ -178,6 +185,10 @@ utilsyncscripts:
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- source: salt://common/tools/sbin
|
||||
- defaults:
|
||||
ELASTICCURL: 'curl'
|
||||
- context:
|
||||
ELASTICCURL: {{ ELASTICAUTH.elasticcurl }}
|
||||
|
||||
{% if role in ['eval', 'standalone', 'sensor', 'heavynode'] %}
|
||||
# Add sensor cleanup
|
||||
@@ -315,6 +326,16 @@ dockerreserveports:
|
||||
- name: /etc/sysctl.d/99-reserved-ports.conf
|
||||
|
||||
{% if salt['grains.get']('sosmodel', '') %}
|
||||
{% if grains['os'] == 'CentOS' %}
|
||||
# Install Raid tools
|
||||
raidpkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: True
|
||||
- pkgs:
|
||||
- securityonion-raidtools
|
||||
- securityonion-megactl
|
||||
{% endif %}
|
||||
|
||||
# Install raid check cron
|
||||
/usr/sbin/so-raid-status > /dev/null 2>&1:
|
||||
cron.present:
|
||||
|
||||
@@ -17,4 +17,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
salt-call state.highstate
|
||||
salt-call state.highstate -l info
|
||||
|
||||
@@ -15,6 +15,8 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||||
|
||||
# Check for prerequisites
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
@@ -86,19 +88,6 @@ add_interface_bond0() {
|
||||
fi
|
||||
}
|
||||
|
||||
check_airgap() {
|
||||
# See if this is an airgap install
|
||||
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap: | awk '{print $2}')
|
||||
if [[ "$AIRGAP" == "True" ]]; then
|
||||
is_airgap=0
|
||||
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
|
||||
AGDOCKER=/tmp/soagupdate/docker
|
||||
AGREPO=/tmp/soagupdate/Packages
|
||||
else
|
||||
is_airgap=1
|
||||
fi
|
||||
}
|
||||
|
||||
check_container() {
|
||||
docker ps | grep "$1:" > /dev/null 2>&1
|
||||
return $?
|
||||
@@ -110,6 +99,15 @@ check_password() {
|
||||
return $?
|
||||
}
|
||||
|
||||
check_password_and_exit() {
|
||||
local password=$1
|
||||
if ! check_password "$password"; then
|
||||
echo "Password is invalid. Do not include single quotes, double quotes, dollar signs, and backslashes in the password."
|
||||
exit 2
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_elastic_license() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -122,6 +120,16 @@ check_elastic_license() {
|
||||
fi
|
||||
}
|
||||
|
||||
copy_new_files() {
|
||||
# Copy new files over to the salt dir
|
||||
cd $UPDATE_DIR
|
||||
rsync -a salt $DEFAULT_SALT_DIR/
|
||||
rsync -a pillar $DEFAULT_SALT_DIR/
|
||||
chown -R socore:socore $DEFAULT_SALT_DIR/
|
||||
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
|
||||
cd /tmp
|
||||
}
|
||||
|
||||
disable_fastestmirror() {
|
||||
sed -i 's/enabled=1/enabled=0/' /etc/yum/pluginconf.d/fastestmirror.conf
|
||||
}
|
||||
@@ -141,16 +149,16 @@ Do you agree to the terms of the Elastic License?
|
||||
If so, type AGREE to accept the Elastic License and continue. Otherwise, press Enter to exit this program without making any changes.
|
||||
EOM
|
||||
|
||||
AGREED=$(whiptail --title "Security Onion Setup" --inputbox \
|
||||
"$message" 20 75 3>&1 1>&2 2>&3)
|
||||
AGREED=$(whiptail --title "$whiptail_title" --inputbox \
|
||||
"$message" 20 75 3>&1 1>&2 2>&3)
|
||||
|
||||
if [ "${AGREED^^}" = 'AGREE' ]; then
|
||||
mkdir -p /opt/so/state
|
||||
touch /opt/so/state/yeselastic.txt
|
||||
else
|
||||
echo "Starting in 2.3.40 you must accept the Elastic license if you want to run Security Onion."
|
||||
exit 1
|
||||
fi
|
||||
if [ "${AGREED^^}" = 'AGREE' ]; then
|
||||
mkdir -p /opt/so/state
|
||||
touch /opt/so/state/yeselastic.txt
|
||||
else
|
||||
echo "Starting in 2.3.40 you must accept the Elastic license if you want to run Security Onion."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
@@ -240,6 +248,7 @@ lookup_salt_value() {
|
||||
key=$1
|
||||
group=$2
|
||||
kind=$3
|
||||
output=${4:-newline_values_only}
|
||||
|
||||
if [ -z "$kind" ]; then
|
||||
kind=pillar
|
||||
@@ -249,7 +258,7 @@ lookup_salt_value() {
|
||||
group=${group}:
|
||||
fi
|
||||
|
||||
salt-call --no-color ${kind}.get ${group}${key} --out=newline_values_only
|
||||
salt-call --no-color ${kind}.get ${group}${key} --out=${output}
|
||||
}
|
||||
|
||||
lookup_pillar() {
|
||||
@@ -277,7 +286,7 @@ lookup_role() {
|
||||
|
||||
require_manager() {
|
||||
if is_manager_node; then
|
||||
echo "This is a manager, We can proceed."
|
||||
echo "This is a manager, so we can proceed."
|
||||
else
|
||||
echo "Please run this command on the manager; the manager controls the grid."
|
||||
exit 1
|
||||
@@ -290,6 +299,7 @@ retry() {
|
||||
cmd=$3
|
||||
expectedOutput=$4
|
||||
attempt=0
|
||||
local exitcode=0
|
||||
while [[ $attempt -lt $maxAttempts ]]; do
|
||||
attempt=$((attempt+1))
|
||||
echo "Executing command with retry support: $cmd"
|
||||
@@ -309,7 +319,29 @@ retry() {
|
||||
sleep $sleepDelay
|
||||
done
|
||||
echo "Command continues to fail; giving up."
|
||||
return 1
|
||||
return $exitcode
|
||||
}
|
||||
|
||||
run_check_net_err() {
|
||||
local cmd=$1
|
||||
local err_msg=${2:-"Unknown error occured, please check /root/$WHATWOULDYOUSAYYAHDOHERE.log for details."} # Really need to rename that variable
|
||||
local no_retry=$3
|
||||
|
||||
local exit_code
|
||||
if [[ -z $no_retry ]]; then
|
||||
retry 5 60 "$cmd"
|
||||
exit_code=$?
|
||||
else
|
||||
eval "$cmd"
|
||||
exit_code=$?
|
||||
fi
|
||||
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
ERR_HANDLED=true
|
||||
[[ -z $no_retry ]] || echo "Command failed with error $exit_code"
|
||||
echo "$err_msg"
|
||||
exit $exit_code
|
||||
fi
|
||||
}
|
||||
|
||||
set_os() {
|
||||
@@ -349,6 +381,14 @@ set_version() {
|
||||
fi
|
||||
}
|
||||
|
||||
has_uppercase() {
|
||||
local string=$1
|
||||
|
||||
echo "$string" | grep -qP '[A-Z]' \
|
||||
&& return 0 \
|
||||
|| return 1
|
||||
}
|
||||
|
||||
valid_cidr() {
|
||||
# Verify there is a backslash in the string
|
||||
echo "$1" | grep -qP "^[^/]+/[^/]+$" || return 1
|
||||
@@ -474,12 +514,14 @@ wait_for_web_response() {
|
||||
url=$1
|
||||
expected=$2
|
||||
maxAttempts=${3:-300}
|
||||
curlcmd=${4:-curl}
|
||||
logfile=/root/wait_for_web_response.log
|
||||
truncate -s 0 "$logfile"
|
||||
attempt=0
|
||||
while [[ $attempt -lt $maxAttempts ]]; do
|
||||
attempt=$((attempt+1))
|
||||
echo "Waiting for value '$expected' at '$url' ($attempt/$maxAttempts)"
|
||||
result=$(curl -ks -L $url)
|
||||
result=$($curlcmd -ks -L $url)
|
||||
exitcode=$?
|
||||
|
||||
echo "--------------------------------------------------" >> $logfile
|
||||
|
||||
@@ -35,6 +35,7 @@ if [ ! -f $BACKUPFILE ]; then
|
||||
{%- endfor %}
|
||||
tar -rf $BACKUPFILE /etc/pki
|
||||
tar -rf $BACKUPFILE /etc/salt
|
||||
tar -rf $BACKUPFILE /opt/so/conf/kratos
|
||||
|
||||
fi
|
||||
|
||||
|
||||
@@ -32,19 +32,25 @@ def get_image_version(string) -> str:
|
||||
ver = string.split(':')[-1]
|
||||
if ver == 'latest':
|
||||
# Version doesn't like "latest", so use a high semver
|
||||
return '999999.9.9'
|
||||
return '99999.9.9'
|
||||
else:
|
||||
try:
|
||||
Version(ver)
|
||||
except InvalidVersion:
|
||||
# Strip the last substring following a hyphen for automated branches
|
||||
ver = '-'.join(ver.split('-')[:-1])
|
||||
# Also return a very high semver for any version
|
||||
# with a dash in it since it will likely be a dev version of some kind
|
||||
if '-' in ver:
|
||||
return '999999.9.9'
|
||||
return ver
|
||||
|
||||
|
||||
def main(quiet):
|
||||
client = docker.from_env()
|
||||
|
||||
# Prune old/stopped containers
|
||||
if not quiet: print('Pruning old containers')
|
||||
client.containers.prune()
|
||||
|
||||
image_list = client.images.list(filters={ 'dangling': False })
|
||||
|
||||
# Map list of image objects to flattened list of tags (format: "name:version")
|
||||
@@ -72,9 +78,16 @@ def main(quiet):
|
||||
for group in grouped_t_list[2:]:
|
||||
for tag in group:
|
||||
if not quiet: print(f'Removing image {tag}')
|
||||
client.images.remove(tag)
|
||||
except InvalidVersion as e:
|
||||
print(f'so-{get_so_image_basename(t_list[0])}: {e.args[0]}', file=sys.stderr)
|
||||
try:
|
||||
client.images.remove(tag, force=True)
|
||||
except docker.errors.ClientError as e:
|
||||
print(f'Could not remove image {tag}, continuing...')
|
||||
except (docker.errors.APIError, InvalidVersion) as e:
|
||||
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
|
||||
exit(1)
|
||||
except Exception as e:
|
||||
print('Unhandled exception occurred:')
|
||||
print(f'so-{get_so_image_basename(t_list[0])}: {e}', file=sys.stderr)
|
||||
exit(1)
|
||||
|
||||
if no_prunable and not quiet:
|
||||
@@ -86,4 +99,4 @@ if __name__ == "__main__":
|
||||
main_parser.add_argument('-q', '--quiet', action='store_const', const=True, required=False)
|
||||
args = main_parser.parse_args(sys.argv[1:])
|
||||
|
||||
main(args.quiet)
|
||||
main(args.quiet)
|
||||
|
||||
@@ -145,9 +145,9 @@ EOF
|
||||
rulename=$(echo ${raw_rulename,,} | sed 's/ /_/g')
|
||||
|
||||
cat << EOF >> "$rulename.yaml"
|
||||
# Elasticsearch Host
|
||||
es_host: elasticsearch
|
||||
es_port: 9200
|
||||
# Elasticsearch Host Override (optional)
|
||||
# es_host: elasticsearch
|
||||
# es_port: 9200
|
||||
|
||||
# (Required)
|
||||
# Rule name, must be unique
|
||||
|
||||
67
salt/common/tools/sbin/so-elastic-auth
Executable file
67
salt/common/tools/sbin/so-elastic-auth
Executable file
@@ -0,0 +1,67 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
if [ -f "/usr/sbin/so-common" ]; then
|
||||
. /usr/sbin/so-common
|
||||
fi
|
||||
|
||||
ES_AUTH_PILLAR=${ELASTIC_AUTH_PILLAR:-/opt/so/saltstack/local/pillar/elasticsearch/auth.sls}
|
||||
ES_USERS_FILE=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
|
||||
|
||||
authEnable=$1
|
||||
|
||||
if ! grep -q "enabled: " "$ES_AUTH_PILLAR"; then
|
||||
echo "Elastic auth pillar file is invalid. Unable to proceed."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
function restart() {
|
||||
if [[ -z "$ELASTIC_AUTH_SKIP_HIGHSTATE" ]]; then
|
||||
echo "Elasticsearch on all affected minions will now be stopped and then restarted..."
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-node or G@role:so-heavynode' cmd.run so-elastic-stop queue=True
|
||||
echo "Applying highstate to all affected minions..."
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-node or G@role:so-heavynode' state.highstate queue=True
|
||||
fi
|
||||
}
|
||||
|
||||
if [[ "$authEnable" == "true" ]]; then
|
||||
if grep -q "enabled: False" "$ES_AUTH_PILLAR"; then
|
||||
sed -i 's/enabled: False/enabled: True/g' "$ES_AUTH_PILLAR"
|
||||
restart
|
||||
echo "Elastic auth is now enabled."
|
||||
if grep -q "argon" "$ES_USERS_FILE"; then
|
||||
echo ""
|
||||
echo "IMPORTANT: The following users will need to change their password, after logging into SOC, in order to access Kibana:"
|
||||
grep argon "$ES_USERS_FILE" | cut -d ":" -f 1
|
||||
fi
|
||||
else
|
||||
echo "Auth is already enabled."
|
||||
fi
|
||||
elif [[ "$authEnable" == "false" ]]; then
|
||||
if grep -q "enabled: True" "$ES_AUTH_PILLAR"; then
|
||||
sed -i 's/enabled: True/enabled: False/g' "$ES_AUTH_PILLAR"
|
||||
restart
|
||||
echo "Elastic auth is now disabled."
|
||||
else
|
||||
echo "Auth is already disabled."
|
||||
fi
|
||||
else
|
||||
echo "Usage: $0 <true|false>"
|
||||
echo ""
|
||||
echo "Toggles Elastic authentication. Elasticsearch will be restarted on each affected minion."
|
||||
echo ""
|
||||
fi
|
||||
@@ -50,7 +50,7 @@ done
|
||||
if [ $SKIP -ne 1 ]; then
|
||||
# List indices
|
||||
echo
|
||||
curl -k -L https://{{ NODEIP }}:9200/_cat/indices?v
|
||||
{{ ELASTICCURL }} -k -L https://{{ NODEIP }}:9200/_cat/indices?v
|
||||
echo
|
||||
# Inform user we are about to delete all data
|
||||
echo
|
||||
@@ -89,10 +89,10 @@ fi
|
||||
# Delete data
|
||||
echo "Deleting data..."
|
||||
|
||||
INDXS=$(curl -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
INDXS=$({{ ELASTICCURL }} -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
for INDX in ${INDXS}
|
||||
do
|
||||
curl -XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
{{ ELASTICCURL }} -XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
done
|
||||
|
||||
#Start Logstash/Filebeat
|
||||
|
||||
@@ -18,4 +18,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_cat/indices?pretty
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_cat/indices?pretty
|
||||
|
||||
@@ -21,5 +21,5 @@ THEHIVEESPORT=9400
|
||||
|
||||
echo "Removing read only attributes for indices..."
|
||||
echo
|
||||
curl -s -k -XPUT -H "Content-Type: application/json" -L https://$IP:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
curl -XPUT -H "Content-Type: application/json" -L http://$IP:9400/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
{{ ELASTICCURL }} -s -k -XPUT -H "Content-Type: application/json" -L https://$IP:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
{{ ELASTICCURL }} -XPUT -H "Content-Type: application/json" -L http://$IP:9400/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
else
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
fi
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq .
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq .
|
||||
else
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq .
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq .[]
|
||||
fi
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
. /usr/sbin/so-common
|
||||
if [ "$1" == "" ]; then
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
else
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
fi
|
||||
|
||||
37
salt/common/tools/sbin/so-elasticsearch-query
Executable file
37
salt/common/tools/sbin/so-elasticsearch-query
Executable file
@@ -0,0 +1,37 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [[ $# -lt 1 ]]; then
|
||||
echo "Submit a cURL request to the local Security Onion Elasticsearch host."
|
||||
echo ""
|
||||
echo "Usage: $0 <PATH> [ARGS,...]"
|
||||
echo ""
|
||||
echo "Where "
|
||||
echo " PATH represents the elastic function being requested."
|
||||
echo " ARGS is used to specify additional, optional curl parameters."
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 /"
|
||||
echo " $0 '*:so-*/_search' -d '{\"query\": {\"match_all\": {}},\"size\": 1}' | jq"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
QUERYPATH=$1
|
||||
shift
|
||||
|
||||
{{ ELASTICCURL }} -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${QUERYPATH}" "$@"
|
||||
57
salt/common/tools/sbin/so-elasticsearch-roles-load
Normal file
57
salt/common/tools/sbin/so-elasticsearch-roles-load
Normal file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{%- set mainint = salt['pillar.get']('host:mainint') %}
|
||||
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
|
||||
|
||||
default_conf_dir=/opt/so/conf
|
||||
ELASTICSEARCH_HOST="{{ MYIP }}"
|
||||
ELASTICSEARCH_PORT=9200
|
||||
|
||||
# Define a default directory to load roles from
|
||||
ELASTICSEARCH_ROLES="$default_conf_dir/elasticsearch/roles/"
|
||||
|
||||
# Wait for ElasticSearch to initialize
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
sleep 1
|
||||
echo -n "."
|
||||
fi
|
||||
done
|
||||
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
echo
|
||||
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
echo
|
||||
fi
|
||||
|
||||
cd ${ELASTICSEARCH_ROLES}
|
||||
|
||||
echo "Loading templates..."
|
||||
for role in *; do
|
||||
name=$(echo "$role" | cut -d. -f1)
|
||||
so-elasticsearch-query _security/role/$name -XPUT -d @"$role"
|
||||
done
|
||||
|
||||
cd - >/dev/null
|
||||
@@ -18,4 +18,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_cat/shards?pretty
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_cat/shards?pretty
|
||||
|
||||
@@ -18,4 +18,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
curl -s -k -L -XDELETE https://{{ NODEIP }}:9200/_template/$1
|
||||
{{ ELASTICCURL }} -s -k -L -XDELETE https://{{ NODEIP }}:9200/_template/$1
|
||||
|
||||
@@ -19,7 +19,7 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq .
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_template/* | jq .
|
||||
else
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq .
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq .
|
||||
fi
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
. /usr/sbin/so-common
|
||||
if [ "$1" == "" ]; then
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
else
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq
|
||||
{{ ELASTICCURL }} -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq
|
||||
fi
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
{%- set mainint = salt['pillar.get']('host:mainint') %}
|
||||
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
|
||||
|
||||
#!/bin/bash
|
||||
# Copyright 2014,2015,2016,2017,2018,2019 Security Onion Solutions, LLC
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@@ -17,6 +14,9 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{%- set mainint = salt['pillar.get']('host:mainint') %}
|
||||
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
|
||||
|
||||
default_conf_dir=/opt/so/conf
|
||||
ELASTICSEARCH_HOST="{{ MYIP }}"
|
||||
ELASTICSEARCH_PORT=9200
|
||||
@@ -30,7 +30,7 @@ echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
curl -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
@@ -51,7 +51,7 @@ cd ${ELASTICSEARCH_TEMPLATES}
|
||||
|
||||
|
||||
echo "Loading templates..."
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; {{ ELASTICCURL }} -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
echo
|
||||
|
||||
cd - >/dev/null
|
||||
|
||||
5
salt/common/tools/sbin/so-elasticsearch-wait
Executable file
5
salt/common/tools/sbin/so-elasticsearch-wait
Executable file
@@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
wait_for_web_response "https://localhost:9200/_cat/indices/.kibana*" "green open" 300 "{{ ELASTICCURL }}"
|
||||
67
salt/common/tools/sbin/so-filebeat-module-setup
Executable file
67
salt/common/tools/sbin/so-filebeat-module-setup
Executable file
@@ -0,0 +1,67 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{%- set mainint = salt['pillar.get']('host:mainint') %}
|
||||
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
|
||||
|
||||
default_conf_dir=/opt/so/conf
|
||||
ELASTICSEARCH_HOST="{{ MYIP }}"
|
||||
ELASTICSEARCH_PORT=9200
|
||||
#ELASTICSEARCH_AUTH=""
|
||||
|
||||
# Define a default directory to load pipelines from
|
||||
FB_MODULE_YML="/usr/share/filebeat/module-setup.yml"
|
||||
|
||||
|
||||
# Wait for ElasticSearch to initialize
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
sleep 1
|
||||
echo -n "."
|
||||
fi
|
||||
done
|
||||
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
echo
|
||||
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
echo
|
||||
fi
|
||||
echo "Testing to see if the pipelines are already applied"
|
||||
ESVER=$({{ ELASTICCURL }} -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" |jq .version.number |tr -d \")
|
||||
PIPELINES=$({{ ELASTICCURL }} -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"/_ingest/pipeline/filebeat-$ESVER-suricata-eve-pipeline | jq . | wc -c)
|
||||
|
||||
if [[ "$PIPELINES" -lt 5 ]]; then
|
||||
echo "Setting up ingest pipeline(s)"
|
||||
|
||||
for MODULE in activemq apache auditd aws azure barracuda bluecoat cef checkpoint cisco coredns crowdstrike cyberark cylance elasticsearch envoyproxy f5 fortinet gcp google_workspace googlecloud gsuite haproxy ibmmq icinga iis imperva infoblox iptables juniper kafka kibana logstash microsoft misp mongodb mssql mysql nats netscout nginx o365 okta osquery panw postgresql rabbitmq radware redis santa snort snyk sonicwall sophos squid suricata system tomcat traefik zeek zscaler
|
||||
do
|
||||
echo "Loading $MODULE"
|
||||
docker exec -i so-filebeat filebeat setup modules -pipelines -modules $MODULE -c $FB_MODULE_YML
|
||||
sleep 2
|
||||
done
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
|
||||
|
||||
@@ -35,6 +35,7 @@ def showUsage(options, args):
|
||||
print('')
|
||||
print(' General commands:')
|
||||
print(' help - Prints this usage information.')
|
||||
print(' apply - Apply the firewall state.')
|
||||
print('')
|
||||
print(' Host commands:')
|
||||
print(' listhostgroups - Lists the known host groups.')
|
||||
@@ -66,7 +67,7 @@ def checkDefaultPortsOption(options):
|
||||
|
||||
def checkApplyOption(options):
|
||||
if "--apply" in options:
|
||||
return apply()
|
||||
return apply(None, None)
|
||||
|
||||
def loadYaml(filename):
|
||||
file = open(filename, "r")
|
||||
@@ -328,7 +329,7 @@ def removehost(options, args):
|
||||
code = checkApplyOption(options)
|
||||
return code
|
||||
|
||||
def apply():
|
||||
def apply(options, args):
|
||||
proc = subprocess.run(['salt-call', 'state.apply', 'firewall', 'queue=True'])
|
||||
return proc.returncode
|
||||
|
||||
@@ -356,7 +357,8 @@ def main():
|
||||
"addport": addport,
|
||||
"removeport": removeport,
|
||||
"addhostgroup": addhostgroup,
|
||||
"addportgroup": addportgroup
|
||||
"addportgroup": addportgroup,
|
||||
"apply": apply
|
||||
}
|
||||
|
||||
code=1
|
||||
|
||||
@@ -41,10 +41,7 @@ if [[ $? == 0 ]]; then
|
||||
fi
|
||||
read -rs FLEET_PASS
|
||||
|
||||
if ! check_password "$FLEET_PASS"; then
|
||||
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
|
||||
exit 2
|
||||
fi
|
||||
check_password_and_exit "$FLEET_PASS"
|
||||
|
||||
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
|
||||
if [[ $? -ne 0 ]]; then
|
||||
|
||||
75
salt/common/tools/sbin/so-fleet-user-update
Executable file
75
salt/common/tools/sbin/so-fleet-user-update
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <user-name>"
|
||||
echo ""
|
||||
echo "Update password for an existing Fleet user. The new password will be read from STDIN."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
MYSQL_PASS=$(lookup_pillar_secret mysql)
|
||||
FLEET_IP=$(lookup_pillar fleet_ip)
|
||||
FLEET_USER=$USER
|
||||
|
||||
# test existence of user
|
||||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
|
||||
"SELECT count(1) FROM users WHERE username='$FLEET_USER'" 2>/dev/null | tail -1)
|
||||
if [[ $? -ne 0 ]] || [[ $MYSQL_OUTPUT -ne 1 ]] ; then
|
||||
echo "Test for username [${FLEET_USER}] failed"
|
||||
echo " expect 1 hit in users database, return $MYSQL_OUTPUT hit(s)."
|
||||
echo "Unable to update Fleet user password."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Read password for new user from stdin
|
||||
test -t 0
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Enter new password:"
|
||||
fi
|
||||
read -rs FLEET_PASS
|
||||
|
||||
if ! check_password "$FLEET_PASS"; then
|
||||
echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Failed to generate Fleet password hash"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
|
||||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
|
||||
"UPDATE users SET password='$FLEET_HASH', salt='' where username='$FLEET_USER'" 2>&1)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully updated Fleet user password"
|
||||
else
|
||||
echo "Unable to update Fleet user password"
|
||||
echo "$MYSQL_OUTPUT"
|
||||
exit 2
|
||||
fi
|
||||
17
salt/common/tools/sbin/so-grafana-dashboard-folder-delete
Executable file
17
salt/common/tools/sbin/so-grafana-dashboard-folder-delete
Executable file
@@ -0,0 +1,17 @@
|
||||
# this script is used to delete the default Grafana dashboard folders that existed prior to Grafana dashboard and Salt management changes in 2.3.70
|
||||
|
||||
folders=$(curl -X GET http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders | jq -r '.[] | @base64')
|
||||
delfolder=("Manager" "Manager Search" "Sensor Nodes" "Search Nodes" "Standalone" "Eval Mode")
|
||||
|
||||
for row in $folders; do
|
||||
title=$(echo ${row} | base64 --decode | jq -r '.title')
|
||||
uid=$(echo ${row} | base64 --decode | jq -r '.uid')
|
||||
|
||||
if [[ " ${delfolder[@]} " =~ " ${title} " ]]; then
|
||||
curl -X DELETE http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders/$uid
|
||||
fi
|
||||
done
|
||||
|
||||
echo "so-grafana-dashboard-folder-delete has been run to delete default Grafana dashboard folders that existed prior to 2.3.70" > /opt/so/state/so-grafana-dashboard-folder-delete-complete
|
||||
|
||||
exit 0
|
||||
@@ -17,7 +17,9 @@
|
||||
|
||||
# NOTE: This script depends on so-common
|
||||
IMAGEREPO=security-onion-solutions
|
||||
STATUS_CONF='/opt/so/conf/so-status/so-status.conf'
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
container_list() {
|
||||
MANAGERCHECK=$1
|
||||
|
||||
@@ -128,13 +130,18 @@ update_docker_containers() {
|
||||
mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1
|
||||
|
||||
# Let's make sure we have the public key
|
||||
retry 50 10 "curl -sSL https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS -o $SIGNPATH/KEYS" >> "$LOG_FILE" 2>&1
|
||||
run_check_net_err \
|
||||
"curl --retry 5 --retry-delay 60 -sSL https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS -o $SIGNPATH/KEYS" \
|
||||
"Could not pull signature key file, please ensure connectivity to https://raw.gihubusercontent.com" \
|
||||
noretry >> "$LOG_FILE" 2>&1
|
||||
result=$?
|
||||
if [[ $result -eq 0 ]]; then
|
||||
cat $SIGNPATH/KEYS | gpg --import - >> "$LOG_FILE" 2>&1
|
||||
else
|
||||
echo "Failed to pull signature key file: $result"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If downloading for soup, check if any optional images need to be pulled
|
||||
if [[ $CURLTYPE == 'soup' ]]; then
|
||||
grep -q "so-logscan" "$STATUS_CONF" && TRUSTED_CONTAINERS+=("so-logscan")
|
||||
fi
|
||||
|
||||
# Download the containers from the interwebs
|
||||
@@ -148,14 +155,15 @@ update_docker_containers() {
|
||||
|
||||
# Pull down the trusted docker image
|
||||
local image=$i:$VERSION$IMAGE_TAG_SUFFIX
|
||||
retry 50 10 "docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" >> "$LOG_FILE" 2>&1
|
||||
run_check_net_err \
|
||||
"docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" \
|
||||
"Could not pull $image, please ensure connectivity to $CONTAINER_REGISTRY" >> "$LOG_FILE" 2>&1
|
||||
|
||||
# Get signature
|
||||
retry 50 10 "curl -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig" >> "$LOG_FILE" 2>&1
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to pull signature file for $image" >> "$LOG_FILE" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
run_check_net_err \
|
||||
"curl --retry 5 --retry-delay 60 -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig" \
|
||||
"Could not pull signature file for $image, please ensure connectivity to https://sigs.securityonion.net " \
|
||||
noretry >> "$LOG_FILE" 2>&1
|
||||
# Dump our hash values
|
||||
DOCKERINSPECT=$(docker inspect $CONTAINER_REGISTRY/$IMAGEREPO/$image)
|
||||
|
||||
|
||||
58
salt/common/tools/sbin/so-image-pull
Executable file
58
salt/common/tools/sbin/so-image-pull
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
. /usr/sbin/so-image-common
|
||||
|
||||
usage() {
|
||||
read -r -d '' message <<- EOM
|
||||
usage: so-image-pull [-h] IMAGE [IMAGE ...]
|
||||
|
||||
positional arguments:
|
||||
IMAGE One or more 'so-' prefixed images to download and verify.
|
||||
|
||||
optional arguments:
|
||||
-h, --help Show this help message and exit.
|
||||
EOM
|
||||
echo "$message"
|
||||
exit 1
|
||||
}
|
||||
|
||||
for arg; do
|
||||
shift
|
||||
[[ "$arg" = "--quiet" || "$arg" = "-q" ]] && quiet=true && continue
|
||||
set -- "$@" "$arg"
|
||||
done
|
||||
|
||||
if [[ $# -eq 0 || $# -gt 1 ]] || [[ $1 == '-h' || $1 == '--help' ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
TRUSTED_CONTAINERS=("$@")
|
||||
set_version
|
||||
|
||||
for image in "${TRUSTED_CONTAINERS[@]}"; do
|
||||
if ! docker images | grep "$image" | grep ":5000" | grep -q "$VERSION"; then
|
||||
if [[ $quiet == true ]]; then
|
||||
update_docker_containers "$image" "" "" "/dev/null"
|
||||
else
|
||||
update_docker_containers "$image" "" "" ""
|
||||
fi
|
||||
else
|
||||
echo "$image:$VERSION image exists."
|
||||
fi
|
||||
done
|
||||
172
salt/common/tools/sbin/so-import-evtx
Normal file
172
salt/common/tools/sbin/so-import-evtx
Normal file
@@ -0,0 +1,172 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
{%- set VERSION = salt['pillar.get']('global:soversion') %}
|
||||
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{%- set MANAGERIP = salt['pillar.get']('global:managerip') -%}
|
||||
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
|
||||
{% set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{% set ES_PW = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
INDEX_DATE=$(date +'%Y.%m.%d')
|
||||
RUNID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
function usage {
|
||||
cat << EOF
|
||||
Usage: $0 <evtx-file-1> [evtx-file-2] [evtx-file-*]
|
||||
|
||||
Imports one or more evtx files into Security Onion. The evtx files will be analyzed and made available for review in the Security Onion toolset.
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
function evtx2es() {
|
||||
EVTX=$1
|
||||
HASH=$2
|
||||
|
||||
docker run --rm \
|
||||
-v "$EVTX:/tmp/$RUNID.evtx" \
|
||||
--entrypoint evtx2es \
|
||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} \
|
||||
--host {{ MANAGERIP }} --scheme https \
|
||||
--index so-beats-$INDEX_DATE --pipeline import.wel \
|
||||
--login {{ES_USER}} --pwd {{ES_PW}} \
|
||||
"/tmp/$RUNID.evtx" 1>/dev/null 2>/dev/null
|
||||
|
||||
docker run --rm \
|
||||
-v "$EVTX:/tmp/import.evtx" \
|
||||
-v "/nsm/import/evtx-end_newest:/tmp/newest" \
|
||||
-v "/nsm/import/evtx-start_oldest:/tmp/oldest" \
|
||||
--entrypoint '/evtx_calc_timestamps.sh' \
|
||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }}
|
||||
}
|
||||
|
||||
# if no parameters supplied, display usage
|
||||
if [ $# -eq 0 ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ensure this is a Manager node
|
||||
require_manager
|
||||
|
||||
# verify that all parameters are files
|
||||
for i in "$@"; do
|
||||
if ! [ -f "$i" ]; then
|
||||
usage
|
||||
echo "\"$i\" is not a valid file!"
|
||||
exit 2
|
||||
fi
|
||||
done
|
||||
|
||||
# track if we have any valid or invalid evtx
|
||||
INVALID_EVTXS="no"
|
||||
VALID_EVTXS="no"
|
||||
|
||||
# track oldest start and newest end so that we can generate the Kibana search hyperlink at the end
|
||||
START_OLDEST="2050-12-31"
|
||||
END_NEWEST="1971-01-01"
|
||||
|
||||
touch /nsm/import/evtx-start_oldest
|
||||
touch /nsm/import/evtx-end_newest
|
||||
|
||||
echo $START_OLDEST > /nsm/import/evtx-start_oldest
|
||||
echo $END_NEWEST > /nsm/import/evtx-end_newest
|
||||
|
||||
# paths must be quoted in case they include spaces
|
||||
for EVTX in "$@"; do
|
||||
EVTX=$(/usr/bin/realpath "$EVTX")
|
||||
echo "Processing Import: ${EVTX}"
|
||||
|
||||
# generate a unique hash to assist with dedupe checks
|
||||
HASH=$(md5sum "${EVTX}" | awk '{ print $1 }')
|
||||
HASH_DIR=/nsm/import/${HASH}
|
||||
echo "- assigning unique identifier to import: $HASH"
|
||||
|
||||
if [ -d $HASH_DIR ]; then
|
||||
echo "- this EVTX has already been imported; skipping"
|
||||
INVALID_EVTXS="yes"
|
||||
else
|
||||
VALID_EVTXS="yes"
|
||||
|
||||
EVTX_DIR=$HASH_DIR/evtx
|
||||
mkdir -p $EVTX_DIR
|
||||
|
||||
# import evtx and write them to import ingest pipeline
|
||||
echo "- importing logs to Elasticsearch..."
|
||||
evtx2es "${EVTX}" $HASH
|
||||
|
||||
# compare $START to $START_OLDEST
|
||||
START=$(cat /nsm/import/evtx-start_oldest)
|
||||
START_COMPARE=$(date -d $START +%s)
|
||||
START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s)
|
||||
if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then
|
||||
START_OLDEST=$START
|
||||
fi
|
||||
|
||||
# compare $ENDNEXT to $END_NEWEST
|
||||
END=$(cat /nsm/import/evtx-end_newest)
|
||||
ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"`
|
||||
ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s)
|
||||
END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s)
|
||||
if [ $ENDNEXT_COMPARE -gt $END_NEWEST_COMPARE ]; then
|
||||
END_NEWEST=$ENDNEXT
|
||||
fi
|
||||
|
||||
cp -f "${EVTX}" "${EVTX_DIR}"/data.evtx
|
||||
chmod 644 "${EVTX_DIR}"/data.evtx
|
||||
|
||||
fi # end of valid evtx
|
||||
|
||||
echo
|
||||
|
||||
done # end of for-loop processing evtx files
|
||||
|
||||
# remove temp files
|
||||
echo "Cleaning up:"
|
||||
for TEMP_EVTX in ${TEMP_EVTXS[@]}; do
|
||||
echo "- removing temporary evtx $TEMP_EVTX"
|
||||
rm -f $TEMP_EVTX
|
||||
done
|
||||
|
||||
# output final messages
|
||||
if [ "$INVALID_EVTXS" = "yes" ]; then
|
||||
echo
|
||||
echo "Please note! One or more evtx was invalid! You can scroll up to see which ones were invalid."
|
||||
fi
|
||||
|
||||
START_OLDEST_FORMATTED=`date +%Y-%m-%d --date="$START_OLDEST"`
|
||||
START_OLDEST_SLASH=$(echo $START_OLDEST_FORMATTED | sed -e 's/-/%2F/g')
|
||||
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||
|
||||
if [ "$VALID_EVTXS" = "yes" ]; then
|
||||
cat << EOF
|
||||
|
||||
Import complete!
|
||||
|
||||
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
|
||||
https://{{ URLBASE }}/#/hunt?q=import.id:${RUNID}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC
|
||||
|
||||
or you can manually set your Time Range to be (in UTC):
|
||||
From: $START_OLDEST_FORMATTED To: $END_NEWEST
|
||||
|
||||
Please note that it may take 30 seconds or more for events to appear in Hunt.
|
||||
EOF
|
||||
fi
|
||||
@@ -132,6 +132,8 @@ for PCAP in "$@"; do
|
||||
PCAP_FIXED=`mktemp /tmp/so-import-pcap-XXXXXXXXXX.pcap`
|
||||
echo "- attempting to recover corrupted PCAP file"
|
||||
pcapfix "${PCAP}" "${PCAP_FIXED}"
|
||||
# Make fixed file world readable since the Suricata docker container will runas a non-root user
|
||||
chmod a+r "${PCAP_FIXED}"
|
||||
PCAP="${PCAP_FIXED}"
|
||||
TEMP_PCAPS+=(${PCAP_FIXED})
|
||||
fi
|
||||
|
||||
@@ -15,4 +15,4 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
curl -X GET -k -L "https://localhost:9200/_cat/indices?v&s=index"
|
||||
{{ ELASTICCURL }} -X GET -k -L "https://localhost:9200/_cat/indices?v&s=index"
|
||||
|
||||
53
salt/common/tools/sbin/so-influxdb-clean
Executable file
53
salt/common/tools/sbin/so-influxdb-clean
Executable file
@@ -0,0 +1,53 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
wdurregex="^[0-9]+w$"
|
||||
ddurregex="^[0-9]+d$"
|
||||
|
||||
echo -e "\nThis script is used to reduce the size of InfluxDB by removing old data and retaining only the duration specified."
|
||||
echo "The duration will need to be specified as an integer followed by the duration unit without a space."
|
||||
echo -e "\nFor example, to purge all data but retain the past 12 weeks, specify 12w for the duration."
|
||||
echo "The duration units are as follows:"
|
||||
echo " w - week(s)"
|
||||
echo " d - day(s)"
|
||||
|
||||
while true; do
|
||||
echo ""
|
||||
read -p 'Enter the duration of past data that you would like to retain: ' duration
|
||||
duration=$(echo $duration | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
if [[ "$duration" =~ $wdurregex ]] || [[ "$duration" =~ $ddurregex ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
echo -e "\nInvalid duration."
|
||||
done
|
||||
|
||||
echo -e "\nInfluxDB will now be cleaned and leave only the past $duration worth of data."
|
||||
read -r -p "Are you sure you want to continue? [y/N] " yorn
|
||||
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
|
||||
echo -e "\nCleaning InfluxDb and saving only the past $duration. This may could take several minutes depending on how much data needs to be cleaned."
|
||||
if docker exec -t so-influxdb /bin/bash -c "influx -ssl -unsafeSsl -database telegraf -execute \"DELETE FROM /.*/ WHERE \"time\" >= '2020-01-01T00:00:00.0000000Z' AND \"time\" <= now() - $duration\""; then
|
||||
echo -e "\nInfluxDb clean complete."
|
||||
else
|
||||
echo -e "\nSomething went wrong with cleaning InfluxDB. Please verify that the so-influxdb Docker container is running, and check the log at /opt/so/log/influxdb/influxdb.log for any details."
|
||||
fi
|
||||
else
|
||||
echo -e "\nExiting as requested."
|
||||
fi
|
||||
63
salt/common/tools/sbin/so-influxdb-downsample
Executable file
63
salt/common/tools/sbin/so-influxdb-downsample
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{%- set role = grains.id.split('_') | last %}
|
||||
{%- if role in ['manager', 'managersearch', 'eval', 'standalone'] %}
|
||||
{%- import_yaml 'influxdb/defaults.yaml' as default_settings %}
|
||||
{%- set influxdb = salt['grains.filter_by'](default_settings, default='influxdb', merge=salt['pillar.get']('influxdb', {})) %}
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
echo -e "\nThis script is used to reduce the size of InfluxDB by downsampling old data into the so_long_term retention policy."
|
||||
|
||||
echo -e "\nInfluxDB will now be downsampled. This could take a few hours depending on how large the database is and hardware resources available."
|
||||
read -r -p "Are you sure you want to continue? [y/N] " yorn
|
||||
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
|
||||
echo -e "\nDownsampling InfluxDb started at `date`. This may take several hours depending on how much data needs to be downsampled."
|
||||
|
||||
{% for dest_rp in influxdb.downsample.keys() -%}
|
||||
{% for measurement in influxdb.downsample[dest_rp].get('measurements', []) -%}
|
||||
|
||||
day=0
|
||||
startdate=`date`
|
||||
while docker exec -t so-influxdb /bin/bash -c "influx -ssl -unsafeSsl -database telegraf -execute \"SELECT mean(*) INTO \"so_long_term\".\"{{measurement}}\" FROM \"autogen\".\"{{measurement}}\" WHERE \"time\" >= '2020-07-21T00:00:00.0000000Z' + ${day}d AND \"time\" <= '2020-07-21T00:00:00.0000000Z' + $((day+1))d GROUP BY time(5m),*\""; do
|
||||
# why 2020-07-21?
|
||||
migrationdate=`date -d "2020-07-21 + ${day} days" +"%y-%m-%d"`
|
||||
|
||||
echo "Downsampling of measurement: {{measurement}} from $migrationdate started at $startdate and completed at `date`."
|
||||
|
||||
newdaytomigrate=$(date -d "$migrationdate + 1 days" +"%s")
|
||||
today=$(date +"%s")
|
||||
if [ $newdaytomigrate -ge $today ]; then
|
||||
break
|
||||
else
|
||||
((day=day+1))
|
||||
startdate=`date`
|
||||
echo -e "\nDownsampling the next day's worth of data for measurement: {{measurement}}."
|
||||
fi
|
||||
done
|
||||
|
||||
{% endfor -%}
|
||||
{% endfor -%}
|
||||
|
||||
echo -e "\nInfluxDb data downsampling complete."
|
||||
|
||||
else
|
||||
echo -e "\nExiting as requested."
|
||||
fi
|
||||
{%- else %}
|
||||
echo -e "\nThis script can only be run on a node running InfluxDB."
|
||||
{%- endif %}
|
||||
34
salt/common/tools/sbin/so-influxdb-drop-autogen
Executable file
34
salt/common/tools/sbin/so-influxdb-drop-autogen
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
echo -e "\nThis script is used to reduce the size of InfluxDB by dropping the autogen retention policy."
|
||||
echo "If you want to retain historical data prior to 2.3.60, then this should only be run after you have downsampled your data using so-influxdb-downsample."
|
||||
|
||||
echo -e "\nThe autogen retention policy will now be dropped from InfluxDB."
|
||||
read -r -p "Are you sure you want to continue? [y/N] " yorn
|
||||
if [[ "$yorn" =~ ^([yY][eE][sS]|[yY])$ ]]; then
|
||||
echo -e "\nDropping autogen retention policy."
|
||||
if docker exec -t so-influxdb influx -format json -ssl -unsafeSsl -execute "drop retention policy autogen on telegraf"; then
|
||||
echo -e "\nAutogen retention policy dropped from InfluxDb."
|
||||
else
|
||||
echo -e "\nSomething went wrong dropping then autogen retention policy from InfluxDB. Please verify that the so-influxdb Docker container is running, and check the log at /opt/so/log/influxdb/influxdb.log for any details."
|
||||
fi
|
||||
else
|
||||
echo -e "\nExiting as requested."
|
||||
fi
|
||||
@@ -23,7 +23,9 @@
|
||||
KIBANA_HOST={{ MANAGER }}
|
||||
KSO_PORT=5601
|
||||
OUTFILE="saved_objects.ndjson"
|
||||
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -L $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
|
||||
|
||||
SESSIONCOOKIE=$({{ ELASTICCURL }} -c - -X GET http://$KIBANA_HOST:$KSO_PORT/ | grep sid | awk '{print $7}')
|
||||
{{ ELASTICCURL }} -b "sid=$SESSIONCOOKIE" -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -L $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
|
||||
|
||||
# Clean up using PLACEHOLDER
|
||||
sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic"
|
||||
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic" 300 "{{ ELASTICCURL }}"
|
||||
## This hackery will be removed if using Elastic Auth ##
|
||||
|
||||
# Let's snag a cookie from Kibana
|
||||
THECOOKIE=$(curl -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||
SESSIONCOOKIE=$({{ ELASTICCURL }} -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
|
||||
|
||||
# Disable certain Features from showing up in the Kibana UI
|
||||
echo
|
||||
echo "Setting up default Space:"
|
||||
curl -b "sid=$THECOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet"]} ' >> /opt/so/log/kibana/misc.log
|
||||
echo
|
||||
{{ ELASTICCURL }} -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet"]} ' >> /opt/so/log/kibana/misc.log
|
||||
echo
|
||||
|
||||
303
salt/common/tools/sbin/so-learn
Executable file
303
salt/common/tools/sbin/so-learn
Executable file
@@ -0,0 +1,303 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from itertools import chain
|
||||
from typing import List
|
||||
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import argparse
|
||||
import textwrap
|
||||
import yaml
|
||||
import multiprocessing
|
||||
import docker
|
||||
import pty
|
||||
|
||||
minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
|
||||
so_status_conf = '/opt/so/conf/so-status/so-status.conf'
|
||||
proc: subprocess.CompletedProcess = None
|
||||
|
||||
# Temp store of modules, will likely be broken out into salt
|
||||
def get_learn_modules():
|
||||
return {
|
||||
'logscan': { 'cpu_period': get_cpu_period(fraction=0.25), 'enabled': False, 'description': 'Scan log files against pre-trained models to alert on anomalies.' }
|
||||
}
|
||||
|
||||
|
||||
def get_cpu_period(fraction: float):
|
||||
multiplier = 10000
|
||||
|
||||
num_cores = multiprocessing.cpu_count()
|
||||
if num_cores <= 2:
|
||||
fraction = 1.
|
||||
|
||||
num_used_cores = int(num_cores * fraction)
|
||||
cpu_period = num_used_cores * multiplier
|
||||
return cpu_period
|
||||
|
||||
|
||||
def sigint_handler(*_):
|
||||
print('Exiting gracefully on Ctrl-C')
|
||||
if proc is not None: proc.send_signal(signal.SIGINT)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def find_minion_pillar() -> str:
|
||||
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
|
||||
|
||||
result = []
|
||||
for root, _, files in os.walk(minion_pillar_dir):
|
||||
for f_minion_id in files:
|
||||
if re.search(regex, f_minion_id):
|
||||
result.append(os.path.join(root, f_minion_id))
|
||||
|
||||
if len(result) == 0:
|
||||
print('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?', file=sys.stderr)
|
||||
sys.exit(3)
|
||||
elif len(result) > 1:
|
||||
res_str = ', '.join(f'\"{result}\"')
|
||||
print('(This should not happen, the system is in an error state if you see this message.)\n', file=sys.stderr)
|
||||
print('More than one manager-type pillar exists, minion id\'s listed below:', file=sys.stderr)
|
||||
print(f' {res_str}', file=sys.stderr)
|
||||
sys.exit(3)
|
||||
else:
|
||||
return result[0]
|
||||
|
||||
|
||||
def read_pillar(pillar: str):
|
||||
try:
|
||||
with open(pillar, 'r') as pillar_file:
|
||||
loaded_yaml = yaml.safe_load(pillar_file.read())
|
||||
if loaded_yaml is None:
|
||||
print(f'Could not parse {pillar}', file=sys.stderr)
|
||||
sys.exit(3)
|
||||
return loaded_yaml
|
||||
except:
|
||||
print(f'Could not open {pillar}', file=sys.stderr)
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
def write_pillar(pillar: str, content: dict):
|
||||
try:
|
||||
with open(pillar, 'w') as pillar_file:
|
||||
yaml.dump(content, pillar_file, default_flow_style=False)
|
||||
except:
|
||||
print(f'Could not open {pillar}', file=sys.stderr)
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
def mod_so_status(action: str, item: str):
|
||||
with open(so_status_conf, 'a+') as conf:
|
||||
conf.seek(0)
|
||||
containers = conf.readlines()
|
||||
|
||||
if f'so-{item}\n' in containers:
|
||||
if action == 'remove': containers.remove(f'so-{item}\n')
|
||||
if action == 'add': pass
|
||||
else:
|
||||
if action == 'remove': pass
|
||||
if action == 'add': containers.append(f'so-{item}\n')
|
||||
|
||||
[containers.remove(c_name) for c_name in containers if c_name == '\n'] # remove extra newlines
|
||||
|
||||
conf.seek(0)
|
||||
conf.truncate(0)
|
||||
conf.writelines(containers)
|
||||
|
||||
|
||||
def create_pillar_if_not_exist(pillar:str, content: dict):
|
||||
pillar_dict = content
|
||||
|
||||
if pillar_dict.get('learn', {}).get('modules') is None:
|
||||
pillar_dict['learn'] = {}
|
||||
pillar_dict['learn']['modules'] = get_learn_modules()
|
||||
content.update()
|
||||
write_pillar(pillar, content)
|
||||
|
||||
return content
|
||||
|
||||
|
||||
def salt_call(module: str):
|
||||
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', f'learn.{module}', 'queue=True']
|
||||
|
||||
print(f' Applying salt state for {module} module...')
|
||||
proc = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
return_code = proc.returncode
|
||||
if return_code != 0:
|
||||
print(f' [ERROR] Failed to apply salt state for {module} module.')
|
||||
|
||||
return return_code
|
||||
|
||||
|
||||
def pull_image(module: str):
|
||||
container_basename = f'so-{module}'
|
||||
|
||||
client = docker.from_env()
|
||||
image_list = client.images.list(filters={ 'dangling': False })
|
||||
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
|
||||
basename_match = list(filter(lambda x: f'{container_basename}' in x, tag_list))
|
||||
local_registry_match = list(filter(lambda x: ':5000' in x, basename_match))
|
||||
|
||||
if len(local_registry_match) == 0:
|
||||
print(f'Pulling and verifying missing image for {module} (may take several minutes) ...')
|
||||
pull_command = ['so-image-pull', '--quiet', container_basename]
|
||||
|
||||
proc = subprocess.run(pull_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
return_code = proc.returncode
|
||||
if return_code != 0:
|
||||
print(f'[ERROR] Failed to pull image so-{module}, skipping state.')
|
||||
else:
|
||||
return_code = 0
|
||||
return return_code
|
||||
|
||||
|
||||
def apply(module_list: List):
|
||||
return_code = 0
|
||||
for module in module_list:
|
||||
salt_ret = salt_call(module)
|
||||
# Only update return_code if the command returned a non-zero return
|
||||
if salt_ret != 0:
|
||||
return_code = salt_ret
|
||||
|
||||
return return_code
|
||||
|
||||
|
||||
def check_apply(args: dict):
|
||||
if args.apply:
|
||||
print('Configuration updated. Applying changes:')
|
||||
return apply(args.modules)
|
||||
else:
|
||||
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
|
||||
answer = input(message)
|
||||
while answer.lower() not in [ 'y', 'n', '' ]:
|
||||
answer = input(message)
|
||||
if answer.lower() in [ 'n', '' ]:
|
||||
return 0
|
||||
else:
|
||||
print('Applying changes:')
|
||||
return apply(args.modules)
|
||||
|
||||
|
||||
def enable_disable_modules(args, enable: bool):
|
||||
pillar_modules = args.pillar_dict.get('learn', {}).get('modules')
|
||||
pillar_mod_names = args.pillar_dict.get('learn', {}).get('modules').keys()
|
||||
|
||||
action_str = 'add' if enable else 'remove'
|
||||
|
||||
if 'all' in args.modules:
|
||||
for module, details in pillar_modules.items():
|
||||
details['enabled'] = enable
|
||||
mod_so_status(action_str, module)
|
||||
if enable: pull_image(module)
|
||||
args.pillar_dict.update()
|
||||
write_pillar(args.pillar, args.pillar_dict)
|
||||
else:
|
||||
write_needed = False
|
||||
for module in args.modules:
|
||||
if module in pillar_mod_names:
|
||||
if pillar_modules[module]['enabled'] == enable:
|
||||
state_str = 'enabled' if enable else 'disabled'
|
||||
print(f'{module} module already {state_str}.', file=sys.stderr)
|
||||
else:
|
||||
if enable and pull_image(module) != 0:
|
||||
continue
|
||||
pillar_modules[module]['enabled'] = enable
|
||||
mod_so_status(action_str, module)
|
||||
write_needed = True
|
||||
if write_needed:
|
||||
args.pillar_dict.update()
|
||||
write_pillar(args.pillar, args.pillar_dict)
|
||||
|
||||
cmd_ret = check_apply(args)
|
||||
return cmd_ret
|
||||
|
||||
|
||||
def enable_modules(args):
|
||||
enable_disable_modules(args, enable=True)
|
||||
|
||||
|
||||
def disable_modules(args):
|
||||
enable_disable_modules(args, enable=False)
|
||||
|
||||
|
||||
def list_modules(*_):
|
||||
print('Available ML modules:')
|
||||
for module, details in get_learn_modules().items():
|
||||
print(f' - { module } : {details["description"]}')
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
beta_str = 'BETA - SUBJECT TO CHANGE\n'
|
||||
|
||||
apply_help='After ACTION the chosen modules, apply any necessary salt states.'
|
||||
enable_apply_help = apply_help.replace('ACTION', 'enabling')
|
||||
disable_apply_help = apply_help.replace('ACTION', 'disabling')
|
||||
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
|
||||
if os.geteuid() != 0:
|
||||
print('You must run this script as root', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
|
||||
subcommand_desc = textwrap.dedent(
|
||||
"""\
|
||||
enable Enable one or more ML modules.
|
||||
disable Disable one or more ML modules.
|
||||
list List all available ML modules.
|
||||
"""
|
||||
)
|
||||
|
||||
subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
|
||||
|
||||
module_help_str = 'One or more ML modules, which can be listed using \'so-learn list\'. Use the keyword \'all\' to apply the action to all available modules.'
|
||||
|
||||
enable = subparsers.add_parser('enable')
|
||||
enable.set_defaults(func=enable_modules)
|
||||
enable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
|
||||
enable.add_argument('--apply', action='store_const', const=True, required=False, help=enable_apply_help)
|
||||
|
||||
disable = subparsers.add_parser('disable')
|
||||
disable.set_defaults(func=disable_modules)
|
||||
disable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
|
||||
disable.add_argument('--apply', action='store_const', const=True, required=False, help=disable_apply_help)
|
||||
|
||||
list = subparsers.add_parser('list')
|
||||
list.set_defaults(func=list_modules)
|
||||
|
||||
args = main_parser.parse_args(sys.argv[1:])
|
||||
args.pillar = find_minion_pillar()
|
||||
args.pillar_dict = create_pillar_if_not_exist(args.pillar, read_pillar(args.pillar))
|
||||
|
||||
if hasattr(args, 'func'):
|
||||
exit_code = args.func(args)
|
||||
else:
|
||||
if args.command is None:
|
||||
print(beta_str)
|
||||
main_parser.print_help()
|
||||
sys.exit(0)
|
||||
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
26
salt/common/tools/sbin/so-pcap-export
Executable file
26
salt/common/tools/sbin/so-pcap-export
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: $0 <steno-query> Output-Filename"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
docker exec -it so-sensoroni scripts/stenoquery.sh "$1" -w /nsm/pcapout/$2.pcap
|
||||
|
||||
echo ""
|
||||
echo "If successful, the output was written to: /nsm/pcapout/$2.pcap"
|
||||
22
salt/common/tools/sbin/so-playbook-import
Normal file
22
salt/common/tools/sbin/so-playbook-import
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
ENABLEPLAY=${1:-False}
|
||||
|
||||
docker exec so-soctopus /usr/local/bin/python -c "import playbook; print(playbook.play_import($ENABLEPLAY))"
|
||||
@@ -22,5 +22,5 @@ salt-call state.apply playbook.db_init,playbook,playbook.automation_user_create
|
||||
/usr/sbin/so-soctopus-restart
|
||||
|
||||
echo "Importing Plays - this will take some time...."
|
||||
wait 5
|
||||
/usr/sbin/so-playbook-ruleupdate
|
||||
sleep 5
|
||||
/usr/sbin/so-playbook-ruleupdate
|
||||
|
||||
@@ -17,67 +17,101 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
#check_boss_raid() {
|
||||
# BOSSBIN=/opt/boss/mvcli
|
||||
# BOSSRC=$($BOSSBIN info -o vd | grep functional)
|
||||
#
|
||||
# if [[ $BOSSRC ]]; then
|
||||
# # Raid is good
|
||||
# BOSSRAID=0
|
||||
# else
|
||||
# BOSSRAID=1
|
||||
# fi
|
||||
#}
|
||||
appliance_check() {
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
APPLIANCE=1
|
||||
{%- if grains['sosmodel'] in ['SO2AMI01', 'SO2GCI01', 'SO2AZI01'] %}
|
||||
exit 0
|
||||
{%- endif %}
|
||||
DUDEYOUGOTADELL=$(dmidecode |grep Dell)
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
APPTYPE=dell
|
||||
else
|
||||
APPTYPE=sm
|
||||
fi
|
||||
mkdir -p /opt/so/log/raid
|
||||
|
||||
check_lsi_raid() {
|
||||
# For use for LSI on Ubuntu
|
||||
#MEGA=/opt/MegaRAID/MegeCli/MegaCli64
|
||||
#LSIRC=$($MEGA -LDInfo -Lall -aALL | grep Optimal)
|
||||
# Open Source Centos
|
||||
MEGA=/opt/mega/megasasctl
|
||||
LSIRC=$($MEGA | grep optimal)
|
||||
{%- else %}
|
||||
echo "This is not an appliance"
|
||||
exit 0
|
||||
{%- endif %}
|
||||
}
|
||||
|
||||
if [[ $LSIRC ]]; then
|
||||
# Raid is good
|
||||
LSIRAID=0
|
||||
check_nsm_raid() {
|
||||
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
|
||||
MEGACTL=$(/opt/raidtools/megasasctl |grep optimal)
|
||||
|
||||
if [[ $APPLIANCE == '1' ]]; then
|
||||
if [[ -n $PERCCLI ]]; then
|
||||
HWRAID=0
|
||||
elif [[ -n $MEGACTL ]]; then
|
||||
HWRAID=0
|
||||
else
|
||||
LSIRAID=1
|
||||
HWRAID=1
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
check_boss_raid() {
|
||||
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
|
||||
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
if [[ -n $MVCLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_software_raid() {
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
SWRC=$(grep "_" /proc/mdstat)
|
||||
|
||||
if [[ $SWRC ]]; then
|
||||
if [[ -n $SWRC ]]; then
|
||||
# RAID is failed in some way
|
||||
SWRAID=1
|
||||
else
|
||||
SWRAID=0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# This script checks raid status if you use SO appliances
|
||||
|
||||
# See if this is an appliance
|
||||
|
||||
appliance_check
|
||||
check_nsm_raid
|
||||
check_boss_raid
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
mkdir -p /opt/so/log/raid
|
||||
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
|
||||
#check_boss_raid
|
||||
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
|
||||
check_software_raid
|
||||
#echo "osraid=$BOSSRAID nsmraid=$SWRAID" > /opt/so/log/raid/status.log
|
||||
echo "osraid=1 nsmraid=$SWRAID" > /opt/so/log/raid/status.log
|
||||
{%- elif grains['sosmodel'] in ['SOS1000F', 'SOS1000', 'SOSSN7200', 'SOS10K', 'SOS4000'] %}
|
||||
#check_boss_raid
|
||||
check_lsi_raid
|
||||
#echo "osraid=$BOSSRAID nsmraid=$LSIRAID" > /opt/so/log/raid/status.log
|
||||
echo "osraid=1 nsmraid=$LSIRAID" > /opt/so/log/raid/status.log
|
||||
{%- else %}
|
||||
exit 0
|
||||
{%- endif %}
|
||||
{%- else %}
|
||||
exit 0
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
if [[ -n $SWRAID ]]; then
|
||||
if [[ $SWRAID == '0' && $BOSSRAID == '0' ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
elif [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
if [[ $BOSSRAID == '0' && $HWRAID == '0' ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
elif [[ "$APPTYPE" == 'sm' ]]; then
|
||||
if [[ -n "$HWRAID" ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "nsmraid=$RAIDSTATUS" > /opt/so/log/raid/status.log
|
||||
|
||||
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
#!/bin/bash
|
||||
got_root() {
|
||||
|
||||
# Make sure you are root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
. /usr/sbin/so-common
|
||||
|
||||
}
|
||||
argstr=""
|
||||
for arg in "$@"; do
|
||||
argstr="${argstr} \"${arg}\""
|
||||
done
|
||||
|
||||
got_root
|
||||
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat $1"
|
||||
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"
|
||||
|
||||
@@ -23,6 +23,11 @@ TESTPCAP=$2
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [ $# -lt 2 ]; then
|
||||
echo "Usage: $0 <CustomRule> <TargetPCAP>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "==============="
|
||||
echo "Running all.rules and $TESTRULE against the following pcap: $TESTPCAP"
|
||||
|
||||
@@ -31,7 +31,7 @@ if [[ $# -lt 1 ]]; then
|
||||
echo "Usage: $0 <pcap-sample(s)>"
|
||||
echo
|
||||
echo "All PCAPs must be placed in the /opt/so/samples directory unless replaying"
|
||||
echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP sampes"
|
||||
echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP samples"
|
||||
echo "are located in the /opt/samples directory inside of the image."
|
||||
echo
|
||||
echo "Customer provided PCAP example:"
|
||||
|
||||
@@ -41,10 +41,7 @@ if [[ $? == 0 ]]; then
|
||||
fi
|
||||
read -rs THEHIVE_PASS
|
||||
|
||||
if ! check_password "$THEHIVE_PASS"; then
|
||||
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
|
||||
exit 2
|
||||
fi
|
||||
check_password_and_exit "$THEHIVE_PASS"
|
||||
|
||||
# Create new user in TheHive
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
|
||||
|
||||
57
salt/common/tools/sbin/so-thehive-user-update
Executable file
57
salt/common/tools/sbin/so-thehive-user-update
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <user-name>"
|
||||
echo ""
|
||||
echo "Update password for an existing TheHive user. The new password will be read from STDIN."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
THEHIVE_KEY=$(lookup_pillar hivekey)
|
||||
THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api"
|
||||
THEHIVE_USER=$USER
|
||||
|
||||
# Read password for new user from stdin
|
||||
test -t 0
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Enter new password:"
|
||||
fi
|
||||
read -rs THEHIVE_PASS
|
||||
|
||||
if ! check_password "$THEHIVE_PASS"; then
|
||||
echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Change password for user in TheHive
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user/${THEHIVE_USER}/password/set" -d "{\"password\" : \"$THEHIVE_PASS\"}")
|
||||
if [[ -z "$resp" ]]; then
|
||||
echo "Successfully updated TheHive user password"
|
||||
else
|
||||
echo "Unable to update TheHive user password"
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
@@ -18,11 +18,17 @@
|
||||
|
||||
source $(dirname $0)/so-common
|
||||
|
||||
if [[ $# -lt 1 || $# -gt 2 ]]; then
|
||||
echo "Usage: $0 <list|add|update|enable|disable|validate|valemail|valpass> [email]"
|
||||
DEFAULT_ROLE=analyst
|
||||
|
||||
if [[ $# -lt 1 || $# -gt 3 ]]; then
|
||||
echo "Usage: $0 <operation> [email] [role]"
|
||||
echo ""
|
||||
echo " where <operation> is one of the following:"
|
||||
echo ""
|
||||
echo " list: Lists all user email addresses currently defined in the identity system"
|
||||
echo " add: Adds a new user to the identity system; requires 'email' parameter"
|
||||
echo " add: Adds a new user to the identity system; requires 'email' parameter, while 'role' parameter is optional and defaults to $DEFAULT_ROLE"
|
||||
echo " addrole: Grants a role to an existing user; requires 'email' and 'role' parameters"
|
||||
echo " delrole: Removes a role from an existing user; requires 'email' and 'role' parameters"
|
||||
echo " update: Updates a user's password; requires 'email' parameter"
|
||||
echo " enable: Enables a user; requires 'email' parameter"
|
||||
echo " disable: Disables a user; requires 'email' parameter"
|
||||
@@ -36,13 +42,25 @@ fi
|
||||
|
||||
operation=$1
|
||||
email=$2
|
||||
role=$3
|
||||
|
||||
kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434}
|
||||
databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite}
|
||||
argon2Iterations=${ARGON2_ITERATIONS:-3}
|
||||
argon2Memory=${ARGON2_MEMORY:-14}
|
||||
argon2Parallelism=${ARGON2_PARALLELISM:-2}
|
||||
argon2HashSize=${ARGON2_HASH_SIZE:-32}
|
||||
bcryptRounds=${BCRYPT_ROUNDS:-12}
|
||||
elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
|
||||
elasticRolesFile=${ELASTIC_ROLES_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users_roles}
|
||||
socRolesFile=${SOC_ROLES_FILE:-/opt/so/conf/soc/soc_users_roles}
|
||||
esUID=${ELASTIC_UID:-930}
|
||||
esGID=${ELASTIC_GID:-930}
|
||||
soUID=${SOCORE_UID:-939}
|
||||
soGID=${SOCORE_GID:-939}
|
||||
|
||||
function lock() {
|
||||
# Obtain file descriptor lock
|
||||
exec 99>/var/tmp/so-user.lock || fail "Unable to create lock descriptor; if the system was not shutdown gracefully you may need to remove /var/tmp/so-user.lock manually."
|
||||
flock -w 10 99 || fail "Another process is using so-user; if the system was not shutdown gracefully you may need to remove /var/tmp/so-user.lock manually."
|
||||
trap 'rm -f /var/tmp/so-user.lock' EXIT
|
||||
}
|
||||
|
||||
function fail() {
|
||||
msg=$1
|
||||
@@ -58,7 +76,7 @@ function require() {
|
||||
|
||||
# Verify this environment is capable of running this script
|
||||
function verifyEnvironment() {
|
||||
require "argon2"
|
||||
require "htpasswd"
|
||||
require "jq"
|
||||
require "curl"
|
||||
require "openssl"
|
||||
@@ -72,7 +90,7 @@ function findIdByEmail() {
|
||||
email=$1
|
||||
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||
identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
|
||||
identityId=$(echo "${response}" | jq -r ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
|
||||
echo $identityId
|
||||
}
|
||||
|
||||
@@ -81,20 +99,33 @@ function validatePassword() {
|
||||
|
||||
len=$(expr length "$password")
|
||||
if [[ $len -lt 6 ]]; then
|
||||
echo "Password does not meet the minimum requirements"
|
||||
exit 2
|
||||
fail "Password does not meet the minimum requirements"
|
||||
fi
|
||||
check_password_and_exit "$password"
|
||||
}
|
||||
|
||||
function validateEmail() {
|
||||
email=$1
|
||||
# (?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])
|
||||
if [[ ! "$email" =~ ^[[:alnum:]._%+-]+@[[:alnum:].-]+\.[[:alpha:]]{2,}$ ]]; then
|
||||
echo "Email address is invalid"
|
||||
exit 3
|
||||
fail "Email address is invalid"
|
||||
fi
|
||||
|
||||
if [[ "$email" =~ [A-Z] ]]; then
|
||||
fail "Email addresses cannot contain uppercase letters"
|
||||
fi
|
||||
}
|
||||
|
||||
function hashPassword() {
|
||||
password=$1
|
||||
|
||||
passwordHash=$(echo "${password}" | htpasswd -niBC $bcryptRounds SOUSER)
|
||||
passwordHash=$(echo "$passwordHash" | cut -c 11-)
|
||||
passwordHash="\$2a${passwordHash}" # still waiting for https://github.com/elastic/elasticsearch/issues/51132
|
||||
echo "$passwordHash"
|
||||
}
|
||||
|
||||
|
||||
function updatePassword() {
|
||||
identityId=$1
|
||||
|
||||
@@ -109,26 +140,221 @@ function updatePassword() {
|
||||
validatePassword "$password"
|
||||
fi
|
||||
|
||||
if [[ -n $identityId ]]; then
|
||||
if [[ -n "$identityId" ]]; then
|
||||
# Generate password hash
|
||||
salt=$(openssl rand -hex 8)
|
||||
passwordHash=$(echo "${password}" | argon2 ${salt} -id -t $argon2Iterations -m $argon2Memory -p $argon2Parallelism -l $argon2HashSize -e)
|
||||
|
||||
passwordHash=$(hashPassword "$password")
|
||||
# Update DB with new hash
|
||||
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"${passwordHash}\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to update password"
|
||||
fi
|
||||
}
|
||||
|
||||
function createFile() {
|
||||
filename=$1
|
||||
uid=$2
|
||||
gid=$3
|
||||
|
||||
mkdir -p $(dirname "$filename")
|
||||
truncate -s 0 "$filename"
|
||||
chmod 600 "$filename"
|
||||
chown "${uid}:${gid}" "$filename"
|
||||
}
|
||||
|
||||
function ensureRoleFileExists() {
|
||||
if [[ ! -f "$socRolesFile" || ! -s "$socRolesFile" ]]; then
|
||||
# Generate the new users file
|
||||
rolesTmpFile="${socRolesFile}.tmp"
|
||||
createFile "$rolesTmpFile" "$soUID" "$soGID"
|
||||
|
||||
if [[ -f "$databasePath" ]]; then
|
||||
echo "Migrating roles to new file: $socRolesFile"
|
||||
|
||||
echo "select 'superuser:' || id from identities;" | sqlite3 "$databasePath" \
|
||||
>> "$rolesTmpFile"
|
||||
[[ $? != 0 ]] && fail "Unable to read identities from database"
|
||||
|
||||
echo "The following users have all been migrated with the super user role:"
|
||||
cat "${rolesTmpFile}"
|
||||
else
|
||||
echo "Database file does not exist yet, installation is likely not yet complete."
|
||||
fi
|
||||
|
||||
mv "${rolesTmpFile}" "${socRolesFile}"
|
||||
fi
|
||||
}
|
||||
|
||||
function syncElasticSystemUser() {
|
||||
json=$1
|
||||
userid=$2
|
||||
usersFile=$3
|
||||
|
||||
user=$(echo "$json" | jq -r ".local.users.$userid.user")
|
||||
pass=$(echo "$json" | jq -r ".local.users.$userid.pass")
|
||||
|
||||
[[ -z "$user" || -z "$pass" ]] && fail "Elastic auth credentials for system user '$userid' are missing"
|
||||
hash=$(hashPassword "$pass")
|
||||
|
||||
echo "${user}:${hash}" >> "$usersFile"
|
||||
}
|
||||
|
||||
function syncElasticSystemRole() {
|
||||
json=$1
|
||||
userid=$2
|
||||
role=$3
|
||||
rolesFile=$4
|
||||
|
||||
user=$(echo "$json" | jq -r ".local.users.$userid.user")
|
||||
|
||||
[[ -z "$user" ]] && fail "Elastic auth credentials for system user '$userid' are missing"
|
||||
|
||||
echo "${role}:${user}" >> "$rolesFile"
|
||||
}
|
||||
|
||||
function syncElastic() {
|
||||
echo "Syncing users and roles between SOC and Elastic..."
|
||||
|
||||
usersTmpFile="${elasticUsersFile}.tmp"
|
||||
createFile "${usersTmpFile}" "$esUID" "$esGID"
|
||||
rolesTmpFile="${elasticRolesFile}.tmp"
|
||||
createFile "${rolesTmpFile}" "$esUID" "$esGID"
|
||||
|
||||
authPillarJson=$(lookup_salt_value "auth" "elasticsearch" "pillar" "json")
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_elastic_user" "$usersTmpFile"
|
||||
syncElasticSystemUser "$authPillarJson" "so_kibana_user" "$usersTmpFile"
|
||||
syncElasticSystemUser "$authPillarJson" "so_logstash_user" "$usersTmpFile"
|
||||
syncElasticSystemUser "$authPillarJson" "so_beats_user" "$usersTmpFile"
|
||||
syncElasticSystemUser "$authPillarJson" "so_monitor_user" "$usersTmpFile"
|
||||
|
||||
syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_collector" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_agent" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "monitoring_user" "$rolesTmpFile"
|
||||
|
||||
if [[ -f "$databasePath" && -f "$socRolesFile" ]]; then
|
||||
# Append the SOC users
|
||||
echo "select '{\"user\":\"' || ici.identifier || '\", \"data\":' || ic.config || '}'" \
|
||||
"from identity_credential_identifiers ici, identity_credentials ic " \
|
||||
"where ici.identity_credential_id=ic.id and instr(ic.config, 'hashed_password') " \
|
||||
"order by ici.identifier;" | \
|
||||
sqlite3 "$databasePath" | \
|
||||
jq -r '.user + ":" + .data.hashed_password' \
|
||||
>> "$usersTmpFile"
|
||||
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
|
||||
|
||||
# Append the user roles
|
||||
while IFS="" read -r rolePair || [ -n "$rolePair" ]; do
|
||||
userId=$(echo "$rolePair" | cut -d: -f2)
|
||||
role=$(echo "$rolePair" | cut -d: -f1)
|
||||
echo "select '$role:' || ici.identifier " \
|
||||
"from identity_credential_identifiers ici, identity_credentials ic " \
|
||||
"where ici.identity_credential_id=ic.id and ic.identity_id = '$userId';" | \
|
||||
sqlite3 "$databasePath" >> "$rolesTmpFile"
|
||||
done < "$socRolesFile"
|
||||
|
||||
else
|
||||
echo "Database file or soc roles file does not exist yet, skipping users export"
|
||||
fi
|
||||
|
||||
if [[ -s "${usersTmpFile}" ]]; then
|
||||
mv "${usersTmpFile}" "${elasticUsersFile}"
|
||||
mv "${rolesTmpFile}" "${elasticRolesFile}"
|
||||
|
||||
if [[ -z "$SKIP_STATE_APPLY" ]]; then
|
||||
echo "Elastic state will be re-applied to affected minions. This may take several minutes..."
|
||||
echo "Applying elastic state to elastic minions at $(date)" >> /opt/so/log/soc/sync.log 2>&1
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-node or G@role:so-heavynode' state.apply elasticsearch queue=True >> /opt/so/log/soc/sync.log 2>&1
|
||||
fi
|
||||
else
|
||||
echo "Newly generated users/roles files are incomplete; aborting."
|
||||
fi
|
||||
}
|
||||
|
||||
function syncAll() {
|
||||
ensureRoleFileExists
|
||||
|
||||
# Check if a sync is needed. Sync is not needed if the following are true:
|
||||
# - user database entries are all older than the elastic users file
|
||||
# - soc roles file last modify date is older than the elastic roles file
|
||||
if [[ -z "$FORCE_SYNC" && -f "$databasePath" && -f "$elasticUsersFile" ]]; then
|
||||
usersFileAgeSecs=$(echo $(($(date +%s) - $(date +%s -r "$elasticUsersFile"))))
|
||||
staleCount=$(echo "select count(*) from identity_credentials where updated_at >= Datetime('now', '-${usersFileAgeSecs} seconds');" \
|
||||
| sqlite3 "$databasePath")
|
||||
if [[ "$staleCount" == "0" && "$elasticRolesFile" -nt "$socRolesFile" ]]; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
syncElastic
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
function listUsers() {
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
|
||||
users=$(echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort)
|
||||
for user in $users; do
|
||||
roles=$(grep "$user" "$elasticRolesFile" | cut -d: -f1 | tr '\n' ' ')
|
||||
echo "$user: $roles"
|
||||
done
|
||||
}
|
||||
|
||||
function addUserRole() {
|
||||
email=$1
|
||||
role=$2
|
||||
|
||||
adjustUserRole "$email" "$role" "add"
|
||||
}
|
||||
|
||||
function deleteUserRole() {
|
||||
email=$1
|
||||
role=$2
|
||||
|
||||
adjustUserRole "$email" "$role" "del"
|
||||
}
|
||||
|
||||
function adjustUserRole() {
|
||||
email=$1
|
||||
role=$2
|
||||
op=$3
|
||||
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
ensureRoleFileExists
|
||||
|
||||
filename="$socRolesFile"
|
||||
hasRole=0
|
||||
grep "$role:" "$socRolesFile" | grep -q "$identityId" && hasRole=1
|
||||
if [[ "$op" == "add" ]]; then
|
||||
if [[ "$hasRole" == "1" ]]; then
|
||||
echo "User '$email' already has the role: $role"
|
||||
return 1
|
||||
else
|
||||
echo "$role:$identityId" >> "$filename"
|
||||
fi
|
||||
elif [[ "$op" == "del" ]]; then
|
||||
if [[ "$hasRole" -ne 1 ]]; then
|
||||
fail "User '$email' does not have the role: $role"
|
||||
else
|
||||
sed "/^$role:$identityId\$/d" "$filename" > "$filename.tmp"
|
||||
cat "$filename".tmp > "$filename"
|
||||
rm -f "$filename".tmp
|
||||
fi
|
||||
else
|
||||
fail "Unsupported role adjustment operation: $op"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
function createUser() {
|
||||
email=$1
|
||||
role=$2
|
||||
|
||||
now=$(date -u +%FT%TZ)
|
||||
addUserJson=$(cat <<EOF
|
||||
@@ -142,16 +368,17 @@ EOF
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities -d "$addUserJson")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
identityId=$(echo "${response}" | jq ".id")
|
||||
if [[ ${identityId} == "null" ]]; then
|
||||
identityId=$(echo "${response}" | jq -r ".id")
|
||||
if [[ "${identityId}" == "null" ]]; then
|
||||
code=$(echo "${response}" | jq ".error.code")
|
||||
[[ "${code}" == "409" ]] && fail "User already exists"
|
||||
|
||||
reason=$(echo "${response}" | jq ".error.message")
|
||||
[[ $? == 0 ]] && fail "Unable to add user: ${reason}"
|
||||
else
|
||||
updatePassword "$identityId"
|
||||
addUserRole "$email" "$role"
|
||||
fi
|
||||
|
||||
updatePassword $identityId
|
||||
}
|
||||
|
||||
function updateStatus() {
|
||||
@@ -164,21 +391,21 @@ function updateStatus() {
|
||||
response=$(curl -Ss -L "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
|
||||
oldConfig=$(echo "select config from identity_credentials where identity_id='${identityId}';" | sqlite3 "$databasePath")
|
||||
if [[ "$status" == "locked" ]]; then
|
||||
config=$(echo $oldConfig | sed -e 's/hashed/locked/')
|
||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to lock credential record"
|
||||
|
||||
echo "delete from sessions where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
echo "delete from sessions where identity_id='${identityId}';" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to invalidate sessions"
|
||||
else
|
||||
config=$(echo $oldConfig | sed -e 's/locked/hashed/')
|
||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to unlock credential record"
|
||||
fi
|
||||
|
||||
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
|
||||
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url) | del(.created_at) | del(.updated_at)")
|
||||
response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson")
|
||||
[[ $? != 0 ]] && fail "Unable to mark user as locked"
|
||||
|
||||
@@ -190,7 +417,7 @@ function updateUser() {
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
updatePassword $identityId
|
||||
updatePassword "$identityId"
|
||||
}
|
||||
|
||||
function deleteUser() {
|
||||
@@ -201,6 +428,11 @@ function deleteUser() {
|
||||
|
||||
response=$(curl -Ss -XDELETE -L "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
rolesTmpFile="${socRolesFile}.tmp"
|
||||
createFile "$rolesTmpFile" "$soUID" "$soGID"
|
||||
grep -v "$id" "$socRolesFile" > "$rolesTmpFile"
|
||||
mv "$rolesTmpFile" "$socRolesFile"
|
||||
}
|
||||
|
||||
case "${operation}" in
|
||||
@@ -208,12 +440,14 @@ case "${operation}" in
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
lock
|
||||
validateEmail "$email"
|
||||
updatePassword
|
||||
createUser "$email"
|
||||
createUser "$email" "${role:-$DEFAULT_ROLE}"
|
||||
syncAll
|
||||
echo "Successfully added new user to SOC"
|
||||
check_container thehive && echo $password | so-thehive-user-add "$email"
|
||||
check_container fleet && echo $password | so-fleet-user-add "$email"
|
||||
check_container thehive && echo "$password" | so-thehive-user-add "$email"
|
||||
check_container fleet && echo "$password" | so-fleet-user-add "$email"
|
||||
;;
|
||||
|
||||
"list")
|
||||
@@ -221,11 +455,38 @@ case "${operation}" in
|
||||
listUsers
|
||||
;;
|
||||
|
||||
"addrole")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
[[ "$role" == "" ]] && fail "Role must be provided"
|
||||
|
||||
lock
|
||||
validateEmail "$email"
|
||||
if addUserRole "$email" "$role"; then
|
||||
syncElastic
|
||||
echo "Successfully added role to user"
|
||||
fi
|
||||
;;
|
||||
|
||||
"delrole")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
[[ "$role" == "" ]] && fail "Role must be provided"
|
||||
|
||||
lock
|
||||
validateEmail "$email"
|
||||
deleteUserRole "$email" "$role"
|
||||
syncElastic
|
||||
echo "Successfully removed role from user"
|
||||
;;
|
||||
|
||||
"update")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
lock
|
||||
updateUser "$email"
|
||||
syncAll
|
||||
echo "Successfully updated user"
|
||||
;;
|
||||
|
||||
@@ -233,7 +494,9 @@ case "${operation}" in
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
lock
|
||||
updateStatus "$email" 'active'
|
||||
syncAll
|
||||
echo "Successfully enabled user"
|
||||
check_container thehive && so-thehive-user-enable "$email" true
|
||||
check_container fleet && so-fleet-user-enable "$email" true
|
||||
@@ -243,7 +506,9 @@ case "${operation}" in
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
lock
|
||||
updateStatus "$email" 'locked'
|
||||
syncAll
|
||||
echo "Successfully disabled user"
|
||||
check_container thehive && so-thehive-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-enable "$email" false
|
||||
@@ -253,12 +518,19 @@ case "${operation}" in
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
|
||||
lock
|
||||
deleteUser "$email"
|
||||
syncAll
|
||||
echo "Successfully deleted user"
|
||||
check_container thehive && so-thehive-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-enable "$email" false
|
||||
;;
|
||||
|
||||
"sync")
|
||||
lock
|
||||
syncAll
|
||||
;;
|
||||
|
||||
"validate")
|
||||
validateEmail "$email"
|
||||
updatePassword
|
||||
@@ -280,4 +552,4 @@ case "${operation}" in
|
||||
;;
|
||||
esac
|
||||
|
||||
exit 0
|
||||
exit 0
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
@@ -20,13 +19,8 @@ echo "Starting to check for yara rule updates at $(date)..."
|
||||
|
||||
output_dir="/opt/so/saltstack/default/salt/strelka/rules"
|
||||
mkdir -p $output_dir
|
||||
|
||||
repos="$output_dir/repos.txt"
|
||||
ignorefile="$output_dir/ignore.txt"
|
||||
|
||||
deletecounter=0
|
||||
newcounter=0
|
||||
updatecounter=0
|
||||
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
|
||||
@@ -35,58 +29,21 @@ echo "Airgap mode enabled."
|
||||
clone_dir="/nsm/repo/rules/strelka"
|
||||
repo_name="signature-base"
|
||||
mkdir -p /opt/so/saltstack/default/salt/strelka/rules/signature-base
|
||||
|
||||
# Ensure a copy of the license is available for the rules
|
||||
[ -f $clone_dir/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
|
||||
|
||||
# Copy over rules
|
||||
for i in $(find $clone_dir/yara -name "*.yar*"); do
|
||||
rule_name=$(echo $i | awk -F '/' '{print $NF}')
|
||||
repo_sum=$(sha256sum $i | awk '{print $1}')
|
||||
|
||||
# Check rules against those in ignore list -- don't copy if ignored.
|
||||
if ! grep -iq $rule_name $ignorefile; then
|
||||
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
|
||||
|
||||
# For existing rules, check to see if they need to be updated, by comparing checksums
|
||||
if [ $existing_rules -gt 0 ];then
|
||||
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
|
||||
if [ "$repo_sum" != "$local_sum" ]; then
|
||||
echo "Checksums do not match!"
|
||||
echo "Updating $rule_name..."
|
||||
cp $i $output_dir/$repo_name;
|
||||
((updatecounter++))
|
||||
fi
|
||||
else
|
||||
# If rule doesn't exist already, we'll add it
|
||||
echo "Adding new rule: $rule_name..."
|
||||
cp $i $output_dir/$repo_name
|
||||
((newcounter++))
|
||||
fi
|
||||
fi;
|
||||
done
|
||||
|
||||
# Check to see if we have any old rules that need to be removed
|
||||
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
|
||||
is_repo_rule=$(find $clone_dir -name "$i" | wc -l)
|
||||
if [ $is_repo_rule -eq 0 ]; then
|
||||
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
|
||||
rm $output_dir/$repo_name/$i
|
||||
((deletecounter++))
|
||||
fi
|
||||
echo "Adding rule: $rule_name..."
|
||||
cp $i $output_dir/$repo_name
|
||||
((newcounter++))
|
||||
done
|
||||
|
||||
echo "Done!"
|
||||
|
||||
if [ "$newcounter" -gt 0 ];then
|
||||
echo "$newcounter new rules added."
|
||||
fi
|
||||
|
||||
if [ "$updatecounter" -gt 0 ];then
|
||||
echo "$updatecounter rules updated."
|
||||
fi
|
||||
|
||||
if [ "$deletecounter" -gt 0 ];then
|
||||
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
|
||||
echo "$newcounter rules added."
|
||||
fi
|
||||
|
||||
{% else %}
|
||||
@@ -99,69 +56,32 @@ if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
|
||||
if ! $(echo "$repo" | grep -qE '^#'); then
|
||||
# Remove old repo if existing bc of previous error condition or unexpected disruption
|
||||
repo_name=`echo $repo | awk -F '/' '{print $NF}'`
|
||||
[ -d $repo_name ] && rm -rf $repo_name
|
||||
[ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name
|
||||
|
||||
# Clone repo and make appropriate directories for rules
|
||||
|
||||
git clone $repo $clone_dir/$repo_name
|
||||
echo "Analyzing rules from $clone_dir/$repo_name..."
|
||||
mkdir -p $output_dir/$repo_name
|
||||
# Ensure a copy of the license is available for the rules
|
||||
[ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
|
||||
|
||||
# Copy over rules
|
||||
for i in $(find $clone_dir/$repo_name -name "*.yar*"); do
|
||||
rule_name=$(echo $i | awk -F '/' '{print $NF}')
|
||||
repo_sum=$(sha256sum $i | awk '{print $1}')
|
||||
|
||||
# Check rules against those in ignore list -- don't copy if ignored.
|
||||
if ! grep -iq $rule_name $ignorefile; then
|
||||
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
|
||||
|
||||
# For existing rules, check to see if they need to be updated, by comparing checksums
|
||||
if [ $existing_rules -gt 0 ];then
|
||||
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
|
||||
if [ "$repo_sum" != "$local_sum" ]; then
|
||||
echo "Checksums do not match!"
|
||||
echo "Updating $rule_name..."
|
||||
cp $i $output_dir/$repo_name;
|
||||
((updatecounter++))
|
||||
fi
|
||||
else
|
||||
# If rule doesn't exist already, we'll add it
|
||||
echo "Adding new rule: $rule_name..."
|
||||
cp $i $output_dir/$repo_name
|
||||
((newcounter++))
|
||||
fi
|
||||
fi;
|
||||
done
|
||||
|
||||
# Check to see if we have any old rules that need to be removed
|
||||
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
|
||||
is_repo_rule=$(find $clone_dir/$repo_name -name "$i" | wc -l)
|
||||
if [ $is_repo_rule -eq 0 ]; then
|
||||
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
|
||||
rm $output_dir/$repo_name/$i
|
||||
((deletecounter++))
|
||||
fi
|
||||
done
|
||||
rm -rf $clone_dir/$repo_name
|
||||
fi
|
||||
done < $repos
|
||||
echo "Adding rule: $rule_name..."
|
||||
cp $i $output_dir/$repo_name
|
||||
((newcounter++))
|
||||
done
|
||||
rm -rf $clone_dir/$repo_name
|
||||
fi
|
||||
done < $repos
|
||||
|
||||
echo "Done!"
|
||||
|
||||
|
||||
if [ "$newcounter" -gt 0 ];then
|
||||
echo "$newcounter new rules added."
|
||||
echo "$newcounter rules added."
|
||||
fi
|
||||
|
||||
if [ "$updatecounter" -gt 0 ];then
|
||||
echo "$updatecounter rules updated."
|
||||
fi
|
||||
|
||||
if [ "$deletecounter" -gt 0 ];then
|
||||
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
|
||||
fi
|
||||
|
||||
|
||||
else
|
||||
echo "Server returned $gh_status status code."
|
||||
echo "No connectivity to Github...exiting..."
|
||||
|
||||
@@ -10,11 +10,10 @@ zeek_logs_enabled() {
|
||||
}
|
||||
|
||||
whiptail_manager_adv_service_zeeklogs() {
|
||||
BLOGS=$(whiptail --title "Security Onion Setup" --checklist "Please Select Logs to Send:" 24 78 12 \
|
||||
BLOGS=$(whiptail --title "so-zeek-logs" --checklist "Please Select Logs to Send:" 24 78 12 \
|
||||
"conn" "Connection Logging" ON \
|
||||
"dce_rpc" "RPC Logs" ON \
|
||||
"dhcp" "DHCP Logs" ON \
|
||||
"dhcpv6" "DHCP IPv6 Logs" ON \
|
||||
"dnp3" "DNP3 Logs" ON \
|
||||
"dns" "DNS Logs" ON \
|
||||
"dpd" "DPD Logs" ON \
|
||||
@@ -25,25 +24,20 @@ whiptail_manager_adv_service_zeeklogs() {
|
||||
"irc" "IRC Chat Logs" ON \
|
||||
"kerberos" "Kerberos Logs" ON \
|
||||
"modbus" "MODBUS Logs" ON \
|
||||
"mqtt" "MQTT Logs" ON \
|
||||
"notice" "Zeek Notice Logs" ON \
|
||||
"ntlm" "NTLM Logs" ON \
|
||||
"openvpn" "OPENVPN Logs" ON \
|
||||
"pe" "PE Logs" ON \
|
||||
"radius" "Radius Logs" ON \
|
||||
"rfb" "RFB Logs" ON \
|
||||
"rdp" "RDP Logs" ON \
|
||||
"signatures" "Signatures Logs" ON \
|
||||
"sip" "SIP Logs" ON \
|
||||
"smb_files" "SMB Files Logs" ON \
|
||||
"smb_mapping" "SMB Mapping Logs" ON \
|
||||
"smtp" "SMTP Logs" ON \
|
||||
"snmp" "SNMP Logs" ON \
|
||||
"software" "Software Logs" ON \
|
||||
"ssh" "SSH Logs" ON \
|
||||
"ssl" "SSL Logs" ON \
|
||||
"syslog" "Syslog Logs" ON \
|
||||
"telnet" "Telnet Logs" ON \
|
||||
"tunnel" "Tunnel Logs" ON \
|
||||
"weird" "Zeek Weird Logs" ON \
|
||||
"mysql" "MySQL Logs" ON \
|
||||
@@ -61,10 +55,10 @@ whiptail_manager_adv_service_zeeklogs
|
||||
return_code=$?
|
||||
case $return_code in
|
||||
1)
|
||||
whiptail --title "Security Onion Setup" --msgbox "Cancelling. No changes have been made." 8 75
|
||||
whiptail --title "so-zeek-logs" --msgbox "Cancelling. No changes have been made." 8 75
|
||||
;;
|
||||
255)
|
||||
whiptail --title "Security Onion Setup" --msgbox "Whiptail error occured, exiting." 8 75
|
||||
whiptail --title "so-zeek-logs" --msgbox "Whiptail error occured, exiting." 8 75
|
||||
;;
|
||||
*)
|
||||
zeek_logs_enabled
|
||||
|
||||
@@ -24,11 +24,11 @@ show_stats() {
|
||||
echo
|
||||
echo "Average throughput:"
|
||||
echo
|
||||
docker exec so-zeek env -i PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:/usr/local/bin:/usr/local/sbin runuser -l zeek -c '/opt/zeek/bin/zeekctl capstats'
|
||||
docker exec so-zeek env -i PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:/usr/local/bin:/usr/local/sbin /opt/zeek/bin/zeekctl capstats
|
||||
echo
|
||||
echo "Average packet loss:"
|
||||
echo
|
||||
docker exec so-zeek env -i PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:/usr/local/bin:/usr/local/sbin runuser -l zeek -c '/opt/zeek/bin/zeekctl netstats'
|
||||
docker exec so-zeek env -i PATH=/bin:/usr/bin:/sbin:/usr/sbin:/opt/bin:/usr/local/bin:/usr/local/sbin /opt/zeek/bin/zeekctl netstats
|
||||
echo
|
||||
}
|
||||
|
||||
|
||||
@@ -18,13 +18,83 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
UPDATE_DIR=/tmp/sogh/securityonion
|
||||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||||
INSTALLEDVERSION=$(cat /etc/soversion)
|
||||
POSTVERSION=$INSTALLEDVERSION
|
||||
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'})
|
||||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||||
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk '{print $2}')
|
||||
BATCHSIZE=5
|
||||
SOUP_LOG=/root/soup.log
|
||||
INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log
|
||||
WHATWOULDYOUSAYYAHDOHERE=soup
|
||||
whiptail_title='Security Onion UPdater'
|
||||
NOTIFYCUSTOMELASTICCONFIG=false
|
||||
|
||||
check_err() {
|
||||
local exit_code=$1
|
||||
local err_msg="Unhandled error occured, please check $SOUP_LOG for details."
|
||||
|
||||
[[ $ERR_HANDLED == true ]] && exit $exit_code
|
||||
if [[ $exit_code -ne 0 ]]; then
|
||||
printf '%s' "Soup failed with error $exit_code: "
|
||||
case $exit_code in
|
||||
2)
|
||||
echo 'No such file or directory'
|
||||
;;
|
||||
5)
|
||||
echo 'Interrupted system call'
|
||||
;;
|
||||
12)
|
||||
echo 'Out of memory'
|
||||
;;
|
||||
28)
|
||||
echo 'No space left on device'
|
||||
echo 'Likely ran out of space on disk, please review hardware requirements for Security Onion: https://docs.securityonion.net/en/2.3/hardware.html'
|
||||
;;
|
||||
30)
|
||||
echo 'Read-only file system'
|
||||
;;
|
||||
35)
|
||||
echo 'Resource temporarily unavailable'
|
||||
;;
|
||||
64)
|
||||
echo 'Machine is not on the network'
|
||||
;;
|
||||
67)
|
||||
echo 'Link has been severed'
|
||||
;;
|
||||
100)
|
||||
echo 'Network is down'
|
||||
;;
|
||||
101)
|
||||
echo 'Network is unreachable'
|
||||
;;
|
||||
102)
|
||||
echo 'Network reset'
|
||||
;;
|
||||
110)
|
||||
echo 'Connection timed out'
|
||||
;;
|
||||
111)
|
||||
echo 'Connection refused'
|
||||
;;
|
||||
112)
|
||||
echo 'Host is down'
|
||||
;;
|
||||
113)
|
||||
echo 'No route to host'
|
||||
;;
|
||||
*)
|
||||
echo 'Unhandled error'
|
||||
echo "$err_msg"
|
||||
;;
|
||||
esac
|
||||
if [[ $exit_code -ge 64 && $exit_code -le 113 ]]; then
|
||||
echo "$err_msg"
|
||||
fi
|
||||
exit $exit_code
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
add_common() {
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
@@ -36,19 +106,21 @@ add_common() {
|
||||
|
||||
airgap_mounted() {
|
||||
# Let's see if the ISO is already mounted.
|
||||
if [ -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||||
if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then
|
||||
echo "The ISO is already mounted"
|
||||
else
|
||||
echo ""
|
||||
echo "Looks like we need access to the upgrade content"
|
||||
echo ""
|
||||
echo "If you just copied the .iso file over you can specify the path."
|
||||
echo "If you burned the ISO to a disk the standard way you can specify the device."
|
||||
echo "Example: /home/user/securityonion-2.X.0.iso"
|
||||
echo "Example: /dev/sdx1"
|
||||
echo ""
|
||||
read -p 'Enter the location of the iso: ' ISOLOC
|
||||
if [ -f $ISOLOC ]; then
|
||||
if [[ -z $ISOLOC ]]; then
|
||||
echo "This is airgap. Ask for a location."
|
||||
echo ""
|
||||
cat << EOF
|
||||
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
|
||||
For example, if you have copied the new Security Onion ISO file to your home directory, then the path might look like /home/myuser/securityonion-2.x.y.iso.
|
||||
Or, if you have burned the new ISO onto an optical disk then the path might look like /dev/cdrom.
|
||||
|
||||
EOF
|
||||
read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
|
||||
fi
|
||||
if [[ -f $ISOLOC ]]; then
|
||||
# Mounting the ISO image
|
||||
mkdir -p /tmp/soagupdate
|
||||
mount -t iso9660 -o loop $ISOLOC /tmp/soagupdate
|
||||
@@ -60,10 +132,10 @@ airgap_mounted() {
|
||||
else
|
||||
echo "ISO has been mounted!"
|
||||
fi
|
||||
elif [ -f $ISOLOC/SecurityOnion/VERSION ]; then
|
||||
elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
|
||||
ln -s $ISOLOC /tmp/soagupdate
|
||||
echo "Found the update content"
|
||||
else
|
||||
elif [[ -b $ISOLOC ]]; then
|
||||
mkdir -p /tmp/soagupdate
|
||||
mount $ISOLOC /tmp/soagupdate
|
||||
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||||
@@ -72,25 +144,29 @@ airgap_mounted() {
|
||||
exit 0
|
||||
else
|
||||
echo "Device has been mounted!"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
echo "Could not find Security Onion ISO content at ${ISOLOC}"
|
||||
echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded."
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
airgap_update_dockers() {
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
# Let's copy the tarball
|
||||
if [ ! -f $AGDOCKER/registry.tar ]; then
|
||||
if [[ ! -f $AGDOCKER/registry.tar ]]; then
|
||||
echo "Unable to locate registry. Exiting"
|
||||
exit 1
|
||||
exit 0
|
||||
else
|
||||
echo "Stopping the registry docker"
|
||||
docker stop so-dockerregistry
|
||||
docker rm so-dockerregistry
|
||||
echo "Copying the new dockers over"
|
||||
tar xvf $AGDOCKER/registry.tar -C /nsm/docker-registry/docker
|
||||
tar xvf "$AGDOCKER/registry.tar" -C /nsm/docker-registry/docker
|
||||
echo "Add Registry back"
|
||||
docker load -i $AGDOCKER/registry_image.tar
|
||||
docker load -i "$AGDOCKER/registry_image.tar"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
@@ -101,10 +177,54 @@ update_registry() {
|
||||
salt-call state.apply registry queue=True
|
||||
}
|
||||
|
||||
check_airgap() {
|
||||
# See if this is an airgap install
|
||||
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap: | awk '{print $2}')
|
||||
if [[ "$AIRGAP" == "True" ]]; then
|
||||
is_airgap=0
|
||||
UPDATE_DIR=/tmp/soagupdate/SecurityOnion
|
||||
AGDOCKER=/tmp/soagupdate/docker
|
||||
AGREPO=/tmp/soagupdate/Packages
|
||||
else
|
||||
is_airgap=1
|
||||
fi
|
||||
}
|
||||
|
||||
# {% raw %}
|
||||
|
||||
check_local_mods() {
|
||||
local salt_local=/opt/so/saltstack/local
|
||||
|
||||
local_mod_arr=()
|
||||
|
||||
while IFS= read -r -d '' local_file; do
|
||||
stripped_path=${local_file#"$salt_local"}
|
||||
default_file="${DEFAULT_SALT_DIR}${stripped_path}"
|
||||
if [[ -f $default_file ]]; then
|
||||
file_diff=$(diff "$default_file" "$local_file" )
|
||||
if [[ $(echo "$file_diff" | grep -c "^<") -gt 0 ]]; then
|
||||
local_mod_arr+=( "$local_file" )
|
||||
fi
|
||||
fi
|
||||
done< <(find $salt_local -type f -print0)
|
||||
|
||||
if [[ ${#local_mod_arr} -gt 0 ]]; then
|
||||
echo "Potentially breaking changes found in the following files (check ${DEFAULT_SALT_DIR} for original copy):"
|
||||
for file_str in "${local_mod_arr[@]}"; do
|
||||
echo " $file_str"
|
||||
done
|
||||
echo ""
|
||||
echo "To reference this list later, check $SOUP_LOG"
|
||||
sleep 10
|
||||
fi
|
||||
}
|
||||
|
||||
# {% endraw %}
|
||||
|
||||
check_sudoers() {
|
||||
if grep -q "so-setup" /etc/sudoers; then
|
||||
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
||||
fi
|
||||
if grep -q "so-setup" /etc/sudoers; then
|
||||
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
||||
fi
|
||||
}
|
||||
|
||||
check_log_size_limit() {
|
||||
@@ -170,23 +290,31 @@ check_os_updates() {
|
||||
OSUPDATES=$(yum -q list updates | wc -l)
|
||||
fi
|
||||
if [[ "$OSUPDATES" -gt 0 ]]; then
|
||||
echo $NEEDUPDATES
|
||||
echo ""
|
||||
read -p "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
|
||||
|
||||
if [[ "$confirm" == [cC] ]]; then
|
||||
if [[ -z $UNATTENDED ]]; then
|
||||
echo "$NEEDUPDATES"
|
||||
echo ""
|
||||
read -rp "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
|
||||
if [[ "$confirm" == [cC] ]]; then
|
||||
echo "Continuing without updating packages"
|
||||
elif [[ "$confirm" == [uU] ]]; then
|
||||
elif [[ "$confirm" == [uU] ]]; then
|
||||
echo "Applying Grid Updates"
|
||||
salt \* -b 5 state.apply patch.os queue=True
|
||||
else
|
||||
update_flag=true
|
||||
else
|
||||
echo "Exiting soup"
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
update_flag=true
|
||||
fi
|
||||
else
|
||||
echo "Looks like you have an updated OS"
|
||||
echo "Looks like you have an updated OS"
|
||||
fi
|
||||
|
||||
if [[ $update_flag == true ]]; then
|
||||
set +e
|
||||
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
|
||||
set -e
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
clean_dockers() {
|
||||
@@ -214,21 +342,11 @@ clone_to_tmp() {
|
||||
fi
|
||||
}
|
||||
|
||||
copy_new_files() {
|
||||
# Copy new files over to the salt dir
|
||||
cd $UPDATE_DIR
|
||||
rsync -a salt $DEFAULT_SALT_DIR/
|
||||
rsync -a pillar $DEFAULT_SALT_DIR/
|
||||
chown -R socore:socore $DEFAULT_SALT_DIR/
|
||||
chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh
|
||||
cd /tmp
|
||||
}
|
||||
|
||||
generate_and_clean_tarballs() {
|
||||
local new_version
|
||||
new_version=$(cat $UPDATE_DIR/VERSION)
|
||||
[ -d /opt/so/repo ] || mkdir -p /opt/so/repo
|
||||
tar -czf "/opt/so/repo/$new_version.tar.gz" "$UPDATE_DIR"
|
||||
tar -czf "/opt/so/repo/$new_version.tar.gz" -C "$UPDATE_DIR" .
|
||||
find "/opt/so/repo" -type f -not -name "$new_version.tar.gz" -exec rm -rf {} \;
|
||||
}
|
||||
|
||||
@@ -258,13 +376,6 @@ masterunlock() {
|
||||
fi
|
||||
}
|
||||
|
||||
preupgrade_changes_2.3.50_repo() {
|
||||
# We made repo changes in 2.3.50 and this prepares for that on upgrade
|
||||
echo "Checking to see if 2.3.50 repo changes are needed."
|
||||
|
||||
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50_repo
|
||||
}
|
||||
|
||||
preupgrade_changes() {
|
||||
# This function is to add any new pillar items if needed.
|
||||
echo "Checking to see if changes are needed."
|
||||
@@ -275,6 +386,8 @@ preupgrade_changes() {
|
||||
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20
|
||||
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
|
||||
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50
|
||||
[[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_2.3.5X_to_2.3.80
|
||||
true
|
||||
}
|
||||
|
||||
postupgrade_changes() {
|
||||
@@ -284,6 +397,8 @@ postupgrade_changes() {
|
||||
[[ "$POSTVERSION" =~ rc.1 ]] && post_rc1_to_rc2
|
||||
[[ "$POSTVERSION" == 2.3.20 || "$POSTVERSION" == 2.3.21 ]] && post_2.3.2X_to_2.3.30
|
||||
[[ "$POSTVERSION" == 2.3.30 ]] && post_2.3.30_to_2.3.40
|
||||
[[ "$POSTVERSION" == 2.3.50 ]] && post_2.3.5X_to_2.3.60
|
||||
true
|
||||
}
|
||||
|
||||
post_rc1_to_2.3.21() {
|
||||
@@ -304,6 +419,10 @@ post_2.3.30_to_2.3.40() {
|
||||
POSTVERSION=2.3.40
|
||||
}
|
||||
|
||||
post_2.3.5X_to_2.3.60() {
|
||||
POSTVERSION=2.3.60
|
||||
}
|
||||
|
||||
|
||||
rc1_to_rc2() {
|
||||
|
||||
@@ -437,7 +556,7 @@ up_2.3.2X_to_2.3.30() {
|
||||
sed -i "/ imagerepo: securityonion/c\ imagerepo: 'security-onion-solutions'" /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
# Strelka rule repo pillar addition
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
# Add manager as default Strelka YARA rule repo
|
||||
sed -i "/^strelka:/a \\ repos: \n - https://$HOSTNAME/repo/rules/strelka" /opt/so/saltstack/local/pillar/global.sls;
|
||||
else
|
||||
@@ -448,8 +567,8 @@ up_2.3.2X_to_2.3.30() {
|
||||
INSTALLEDVERSION=2.3.30
|
||||
}
|
||||
|
||||
up_2.3.3X_to_2.3.50_repo() {
|
||||
echo "Performing 2.3.50 repo actions."
|
||||
upgrade_to_2.3.50_repo() {
|
||||
echo "Performing repo changes."
|
||||
if [[ "$OS" == "centos" ]]; then
|
||||
# Import GPG Keys
|
||||
gpg_rpm_import
|
||||
@@ -464,7 +583,7 @@ up_2.3.3X_to_2.3.50_repo() {
|
||||
rm -f "/etc/yum.repos.d/$DELREPO.repo"
|
||||
fi
|
||||
done
|
||||
if [ $is_airgap -eq 1 ]; then
|
||||
if [[ $is_airgap -eq 1 ]]; then
|
||||
# Copy the new repo file if not airgap
|
||||
cp $UPDATE_DIR/salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/
|
||||
yum clean all
|
||||
@@ -505,6 +624,46 @@ EOF
|
||||
INSTALLEDVERSION=2.3.50
|
||||
}
|
||||
|
||||
up_2.3.5X_to_2.3.80() {
|
||||
|
||||
# Remove watermark settings from global.sls
|
||||
sed -i '/ cluster_routing_allocation_disk/d' /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
# Add new indices to the global
|
||||
sed -i '/ index_settings:/a \\ so-elasticsearch: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ index_settings:/a \\ so-logstash: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ index_settings:/a \\ so-kibana: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ index_settings:/a \\ so-redis: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
# Do some pillar formatting
|
||||
tc=$(grep -w true_cluster /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print tolower($2)'}| xargs)
|
||||
|
||||
if [[ "$tc" == "true" ]]; then
|
||||
tcname=$(grep -w true_cluster_name /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print $2'})
|
||||
sed -i "/^elasticsearch:/a \\ config: \n cluster: \n name: $tcname" /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ true_cluster_name/d' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ esclustername/d' /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
|
||||
if [[ ${file} != *"manager.sls"* ]]; then
|
||||
noderoutetype=$(grep -w node_route_type $file | awk -F: {'print $2'})
|
||||
if [ -n "$noderoutetype" ]; then
|
||||
sed -i "/^elasticsearch:/a \\ config: \n node: \n attr: \n box_type: $noderoutetype" $file
|
||||
sed -i '/ node_route_type/d' $file
|
||||
noderoutetype=''
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# check for local es config to inform user that the config in local is now ignored and those options need to be placed in the pillar
|
||||
if [ -f "/opt/so/saltstack/local/salt/elasticsearch/files/elasticsearch.yml" ]; then
|
||||
NOTIFYCUSTOMELASTICCONFIG=true
|
||||
fi
|
||||
|
||||
INSTALLEDVERSION=2.3.80
|
||||
}
|
||||
|
||||
verify_upgradespace() {
|
||||
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
||||
if [ "$CURRENTSPACE" -lt "10" ]; then
|
||||
@@ -520,7 +679,7 @@ upgrade_space() {
|
||||
clean_dockers
|
||||
if ! verify_upgradespace; then
|
||||
echo "There is not enough space to perform the upgrade. Please free up space and try again"
|
||||
exit 1
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
echo "You have enough space for upgrade. Proceeding with soup."
|
||||
@@ -545,8 +704,8 @@ thehive_maint() {
|
||||
done
|
||||
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
||||
echo "Migrating thehive databases if needed."
|
||||
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate"
|
||||
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate"
|
||||
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1
|
||||
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -572,26 +731,39 @@ update_version() {
|
||||
# Update the version to the latest
|
||||
echo "Updating the Security Onion version file."
|
||||
echo $NEWVERSION > /etc/soversion
|
||||
echo $HOTFIXVERSION > /etc/sohotfix
|
||||
sed -i "/ soversion:/c\ soversion: $NEWVERSION" /opt/so/saltstack/local/pillar/global.sls
|
||||
}
|
||||
|
||||
upgrade_check() {
|
||||
# Let's make sure we actually need to update.
|
||||
NEWVERSION=$(cat $UPDATE_DIR/VERSION)
|
||||
HOTFIXVERSION=$(cat $UPDATE_DIR/HOTFIX)
|
||||
[[ -f /etc/sohotfix ]] && CURRENTHOTFIX=$(cat /etc/sohotfix)
|
||||
if [ "$INSTALLEDVERSION" == "$NEWVERSION" ]; then
|
||||
echo "You are already running the latest version of Security Onion."
|
||||
exit 0
|
||||
echo "Checking to see if there are hotfixes needed"
|
||||
if [ "$HOTFIXVERSION" == "$CURRENTHOTFIX" ]; then
|
||||
echo "You are already running the latest version of Security Onion."
|
||||
exit 0
|
||||
else
|
||||
echo "We need to apply a hotfix"
|
||||
is_hotfix=true
|
||||
fi
|
||||
else
|
||||
is_hotfix=false
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
upgrade_check_salt() {
|
||||
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk {'print $2'})
|
||||
NEWSALTVERSION=$(grep version: $UPDATE_DIR/salt/salt/master.defaults.yaml | awk '{print $2}')
|
||||
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
|
||||
echo "You are already running the correct version of Salt for Security Onion."
|
||||
else
|
||||
UPGRADESALT=1
|
||||
fi
|
||||
}
|
||||
|
||||
upgrade_salt() {
|
||||
SALTUPGRADED=True
|
||||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||||
@@ -603,7 +775,11 @@ upgrade_salt() {
|
||||
yum versionlock delete "salt-*"
|
||||
echo "Updating Salt packages and restarting services."
|
||||
echo ""
|
||||
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
|
||||
set +e
|
||||
run_check_net_err \
|
||||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
|
||||
"Could not update salt, please check $SOUP_LOG for details."
|
||||
set -e
|
||||
echo "Applying yum versionlock for Salt."
|
||||
echo ""
|
||||
yum versionlock add "salt-*"
|
||||
@@ -616,7 +792,11 @@ upgrade_salt() {
|
||||
apt-mark unhold "salt-minion"
|
||||
echo "Updating Salt packages and restarting services."
|
||||
echo ""
|
||||
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
|
||||
set +e
|
||||
run_check_net_err \
|
||||
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable \"$NEWSALTVERSION\"" \
|
||||
"Could not update salt, please check $SOUP_LOG for details."
|
||||
set -e
|
||||
echo "Applying apt hold for Salt."
|
||||
echo ""
|
||||
apt-mark hold "salt-common"
|
||||
@@ -641,213 +821,244 @@ verify_latest_update_script() {
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
salt-call state.apply common queue=True
|
||||
salt-call state.apply -l info common queue=True
|
||||
echo ""
|
||||
echo "soup has been updated. Please run soup again."
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
|
||||
main () {
|
||||
echo "### Preparing soup at `date` ###"
|
||||
while getopts ":b" opt; do
|
||||
case "$opt" in
|
||||
b ) # process option b
|
||||
shift
|
||||
BATCHSIZE=$1
|
||||
if ! [[ "$BATCHSIZE" =~ ^[0-9]+$ ]]; then
|
||||
echo "Batch size must be a number greater than 0."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
\? )
|
||||
echo "Usage: cmd [-b]"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
echo "Checking to see if this is a manager."
|
||||
echo ""
|
||||
require_manager
|
||||
set_minionid
|
||||
echo "Checking to see if this is an airgap install"
|
||||
echo ""
|
||||
check_airgap
|
||||
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
||||
echo ""
|
||||
set_os
|
||||
set_palette
|
||||
check_elastic_license
|
||||
echo ""
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
# Let's mount the ISO since this is airgap
|
||||
airgap_mounted
|
||||
else
|
||||
echo "Cloning Security Onion github repo into $UPDATE_DIR."
|
||||
echo "Removing previous upgrade sources."
|
||||
rm -rf $UPDATE_DIR
|
||||
clone_to_tmp
|
||||
fi
|
||||
check_os_updates
|
||||
echo ""
|
||||
echo "Verifying we have the latest soup script."
|
||||
verify_latest_update_script
|
||||
echo ""
|
||||
|
||||
echo "Generating new repo archive"
|
||||
generate_and_clean_tarballs
|
||||
if [ -f /usr/sbin/so-image-common ]; then
|
||||
. /usr/sbin/so-image-common
|
||||
else
|
||||
add_common
|
||||
fi
|
||||
|
||||
echo "Let's see if we need to update Security Onion."
|
||||
upgrade_check
|
||||
upgrade_space
|
||||
|
||||
echo "Checking for Salt Master and Minion updates."
|
||||
upgrade_check_salt
|
||||
|
||||
echo ""
|
||||
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
|
||||
echo ""
|
||||
echo "Updating dockers to $NEWVERSION."
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
airgap_update_dockers
|
||||
update_centos_repo
|
||||
yum clean all
|
||||
check_os_updates
|
||||
else
|
||||
update_registry
|
||||
update_docker_containers "soup"
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Stopping Salt Minion service."
|
||||
systemctl stop salt-minion
|
||||
echo "Killing any remaining Salt Minion processes."
|
||||
pkill -9 -ef /usr/bin/salt-minion
|
||||
echo ""
|
||||
echo "Stopping Salt Master service."
|
||||
systemctl stop salt-master
|
||||
echo ""
|
||||
|
||||
preupgrade_changes_2.3.50_repo
|
||||
|
||||
# Does salt need upgraded. If so update it.
|
||||
if [ "$UPGRADESALT" == "1" ]; then
|
||||
echo "Upgrading Salt"
|
||||
# Update the repo files so it can actually upgrade
|
||||
upgrade_salt
|
||||
fi
|
||||
|
||||
echo "Checking if Salt was upgraded."
|
||||
echo ""
|
||||
# Check that Salt was upgraded
|
||||
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk {'print $2'})
|
||||
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
|
||||
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
|
||||
echo "Once the issue is resolved, run soup again."
|
||||
echo "Exiting."
|
||||
main() {
|
||||
trap 'check_err $?' EXIT
|
||||
|
||||
echo "Checking to see if this is an airgap install."
|
||||
echo ""
|
||||
exit 1
|
||||
else
|
||||
echo "Salt upgrade success."
|
||||
echo ""
|
||||
fi
|
||||
|
||||
preupgrade_changes
|
||||
echo ""
|
||||
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
echo "Updating Rule Files to the Latest."
|
||||
update_airgap_rules
|
||||
fi
|
||||
|
||||
# Only update the repo if its airgap
|
||||
if [[ $is_airgap -eq 0 ]] && [[ "$UPGRADESALT" != "1" ]]; then
|
||||
update_centos_repo
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
|
||||
copy_new_files
|
||||
echo ""
|
||||
update_version
|
||||
|
||||
echo ""
|
||||
echo "Locking down Salt Master for upgrade"
|
||||
masterlock
|
||||
|
||||
echo ""
|
||||
echo "Starting Salt Master service."
|
||||
systemctl start salt-master
|
||||
|
||||
# Only regenerate osquery packages if Fleet is enabled
|
||||
FLEET_MANAGER=$(lookup_pillar fleet_manager)
|
||||
FLEET_NODE=$(lookup_pillar fleet_node)
|
||||
if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then
|
||||
echo ""
|
||||
echo "Regenerating Osquery Packages.... This will take several minutes."
|
||||
salt-call state.apply fleet.event_gen-packages -l info queue=True
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||||
salt-call state.highstate -l info queue=True
|
||||
echo ""
|
||||
echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete."
|
||||
|
||||
echo ""
|
||||
echo "Stopping Salt Master to remove ACL"
|
||||
systemctl stop salt-master
|
||||
|
||||
masterunlock
|
||||
|
||||
echo ""
|
||||
echo "Starting Salt Master service."
|
||||
systemctl start salt-master
|
||||
echo "Running a highstate. This could take several minutes."
|
||||
salt-call state.highstate -l info queue=True
|
||||
postupgrade_changes
|
||||
unmount_update
|
||||
thehive_maint
|
||||
|
||||
if [ "$UPGRADESALT" == "1" ]; then
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
echo ""
|
||||
echo "Cleaning repos on remote Security Onion nodes."
|
||||
salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone and G@os:CentOS' cmd.run "yum clean all"
|
||||
echo ""
|
||||
check_airgap
|
||||
if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then
|
||||
echo "Missing file argument (-f <FILENAME>) for unattended airgap upgrade."
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
echo "Checking to see if this is a manager."
|
||||
echo ""
|
||||
require_manager
|
||||
set_minionid
|
||||
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
||||
echo ""
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
# Let's mount the ISO since this is airgap
|
||||
airgap_mounted
|
||||
else
|
||||
echo "Cloning Security Onion github repo into $UPDATE_DIR."
|
||||
echo "Removing previous upgrade sources."
|
||||
rm -rf $UPDATE_DIR
|
||||
echo "Cloning the Security Onion Repo."
|
||||
clone_to_tmp
|
||||
fi
|
||||
echo "Verifying we have the latest soup script."
|
||||
verify_latest_update_script
|
||||
echo ""
|
||||
set_os
|
||||
set_palette
|
||||
check_elastic_license
|
||||
echo ""
|
||||
check_os_updates
|
||||
|
||||
check_sudoers
|
||||
echo "Generating new repo archive"
|
||||
generate_and_clean_tarballs
|
||||
if [ -f /usr/sbin/so-image-common ]; then
|
||||
. /usr/sbin/so-image-common
|
||||
else
|
||||
add_common
|
||||
fi
|
||||
|
||||
if [[ -n $lsl_msg ]]; then
|
||||
case $lsl_msg in
|
||||
'distributed')
|
||||
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
|
||||
echo " -> We recommend checking and adjusting the values as necessary."
|
||||
echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
|
||||
;;
|
||||
'single-node')
|
||||
# We can assume the lsl_details array has been set if lsl_msg has this value
|
||||
echo "[WARNING] The value of log_size_limit (${lsl_details[0]}) does not match the recommended value of ${lsl_details[1]}."
|
||||
echo " -> We recommend checking and adjusting the value as necessary."
|
||||
echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
echo "Let's see if we need to update Security Onion."
|
||||
upgrade_check
|
||||
upgrade_space
|
||||
|
||||
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
|
||||
echo "Checking for Salt Master and Minion updates."
|
||||
upgrade_check_salt
|
||||
set -e
|
||||
|
||||
if [ $NUM_MINIONS -gt 1 ]; then
|
||||
if [ "$is_hotfix" == "true" ]; then
|
||||
echo "Applying $HOTFIXVERSION"
|
||||
copy_new_files
|
||||
echo ""
|
||||
update_version
|
||||
salt-call state.highstate -l info queue=True
|
||||
else
|
||||
echo ""
|
||||
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
|
||||
echo ""
|
||||
|
||||
cat << EOF
|
||||
|
||||
echo "Updating dockers to $NEWVERSION."
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
airgap_update_dockers
|
||||
update_centos_repo
|
||||
yum clean all
|
||||
check_os_updates
|
||||
else
|
||||
update_registry
|
||||
set +e
|
||||
update_docker_containers "soup"
|
||||
set -e
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Stopping Salt Minion service."
|
||||
systemctl stop salt-minion
|
||||
echo "Killing any remaining Salt Minion processes."
|
||||
set +e
|
||||
pkill -9 -ef /usr/bin/salt-minion
|
||||
set -e
|
||||
echo ""
|
||||
echo "Stopping Salt Master service."
|
||||
systemctl stop salt-master
|
||||
echo ""
|
||||
|
||||
upgrade_to_2.3.50_repo
|
||||
|
||||
# Does salt need upgraded. If so update it.
|
||||
if [[ $UPGRADESALT -eq 1 ]]; then
|
||||
echo "Upgrading Salt"
|
||||
# Update the repo files so it can actually upgrade
|
||||
upgrade_salt
|
||||
fi
|
||||
|
||||
echo "Checking if Salt was upgraded."
|
||||
echo ""
|
||||
# Check that Salt was upgraded
|
||||
SALTVERSIONPOSTUPGRADE=$(salt --versions-report | grep Salt: | awk '{print $2}')
|
||||
if [[ "$SALTVERSIONPOSTUPGRADE" != "$NEWSALTVERSION" ]]; then
|
||||
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
|
||||
echo "Once the issue is resolved, run soup again."
|
||||
echo "Exiting."
|
||||
echo ""
|
||||
exit 0
|
||||
else
|
||||
echo "Salt upgrade success."
|
||||
echo ""
|
||||
fi
|
||||
|
||||
preupgrade_changes
|
||||
echo ""
|
||||
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
echo "Updating Rule Files to the Latest."
|
||||
update_airgap_rules
|
||||
fi
|
||||
|
||||
# Only update the repo if its airgap
|
||||
if [[ $is_airgap -eq 0 && $UPGRADESALT -ne 1 ]]; then
|
||||
update_centos_repo
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR."
|
||||
copy_new_files
|
||||
echo ""
|
||||
update_version
|
||||
|
||||
echo ""
|
||||
echo "Locking down Salt Master for upgrade"
|
||||
masterlock
|
||||
|
||||
echo ""
|
||||
echo "Starting Salt Master service."
|
||||
systemctl start salt-master
|
||||
|
||||
# Testing that salt-master is up by checking that is it connected to itself
|
||||
set +e
|
||||
echo "Waiting on the Salt Master service to be ready."
|
||||
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
|
||||
set -e
|
||||
|
||||
echo ""
|
||||
echo "Ensuring python modules for Salt are installed and patched."
|
||||
salt-call state.apply salt.python3-influxdb -l info queue=True
|
||||
echo ""
|
||||
|
||||
# Only regenerate osquery packages if Fleet is enabled
|
||||
FLEET_MANAGER=$(lookup_pillar fleet_manager)
|
||||
FLEET_NODE=$(lookup_pillar fleet_node)
|
||||
if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then
|
||||
echo ""
|
||||
echo "Regenerating Osquery Packages.... This will take several minutes."
|
||||
salt-call state.apply fleet.event_gen-packages -l info queue=True
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||||
set +e
|
||||
salt-call state.highstate -l info queue=True
|
||||
set -e
|
||||
|
||||
echo ""
|
||||
echo "Stopping Salt Master to remove ACL"
|
||||
systemctl stop salt-master
|
||||
|
||||
masterunlock
|
||||
|
||||
echo ""
|
||||
echo "Starting Salt Master service."
|
||||
systemctl start salt-master
|
||||
|
||||
set +e
|
||||
echo "Waiting on the Salt Master service to be ready."
|
||||
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
|
||||
set -e
|
||||
|
||||
echo "Running a highstate. This could take several minutes."
|
||||
salt-call state.highstate -l info queue=True
|
||||
postupgrade_changes
|
||||
[[ $is_airgap -eq 0 ]] && unmount_update
|
||||
thehive_maint
|
||||
|
||||
echo ""
|
||||
echo "Upgrade to $NEWVERSION complete."
|
||||
|
||||
# Everything beyond this is post-upgrade checking, don't fail past this point if something here causes an error
|
||||
set +e
|
||||
|
||||
echo "Checking the number of minions."
|
||||
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
|
||||
if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
echo ""
|
||||
echo "Cleaning repos on remote Security Onion nodes."
|
||||
salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone and G@os:CentOS' cmd.run "yum clean all"
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Checking for local modifications."
|
||||
check_local_mods
|
||||
|
||||
echo "Checking sudoers file."
|
||||
check_sudoers
|
||||
|
||||
if [[ -n $lsl_msg ]]; then
|
||||
case $lsl_msg in
|
||||
'distributed')
|
||||
echo "[INFO] The value of log_size_limit in any heavy node minion pillars may be incorrect."
|
||||
echo " -> We recommend checking and adjusting the values as necessary."
|
||||
echo " -> Minion pillar directory: /opt/so/saltstack/local/pillar/minions/"
|
||||
;;
|
||||
'single-node')
|
||||
# We can assume the lsl_details array has been set if lsl_msg has this value
|
||||
echo "[WARNING] The value of log_size_limit (${lsl_details[0]}) does not match the recommended value of ${lsl_details[1]}."
|
||||
echo " -> We recommend checking and adjusting the value as necessary."
|
||||
echo " -> File: /opt/so/saltstack/local/pillar/minions/${lsl_details[2]}.sls"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
if [[ $NUM_MINIONS -gt 1 ]]; then
|
||||
|
||||
cat << EOF
|
||||
|
||||
|
||||
|
||||
This appears to be a distributed deployment. Other nodes should update themselves at the next Salt highstate (typically within 15 minutes). Do not manually restart anything until you know that all the search/heavy nodes in your deployment are updated. This is especially important if you are using true clustering for Elasticsearch.
|
||||
|
||||
Each minion is on a random 15 minute check-in period and things like network bandwidth can be a factor in how long the actual upgrade takes. If you have a heavy node on a slow link, it is going to take a while to get the containers to it. Depending on what changes happened between the versions, Elasticsearch might not be able to talk to said heavy node until the update is complete.
|
||||
@@ -855,13 +1066,62 @@ Each minion is on a random 15 minute check-in period and things like network ban
|
||||
If it looks like you’re missing data after the upgrade, please avoid restarting services and instead make sure at least one search node has completed its upgrade. The best way to do this is to run 'sudo salt-call state.highstate' from a search node and make sure there are no errors. Typically if it works on one node it will work on the rest. Forward nodes are less complex and will update as they check in so you can monitor those from the Grid section of SOC.
|
||||
|
||||
For more information, please see https://docs.securityonion.net/en/2.3/soup.html#distributed-deployments.
|
||||
|
||||
EOF
|
||||
|
||||
fi
|
||||
echo "### soup has been served at `date` ###"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$NOTIFYCUSTOMELASTICCONFIG" = true ] ; then
|
||||
|
||||
cat << EOF
|
||||
|
||||
|
||||
A custom Elasticsearch configuration has been found at /opt/so/saltstack/local/elasticsearch/files/elasticsearch.yml. This file is no longer referenced in Security Onion versions >= 2.3.80.
|
||||
|
||||
If you still need those customizations, you'll need to manually migrate them to the new Elasticsearch config as shown at https://docs.securityonion.net/en/2.3/elasticsearch.html.
|
||||
|
||||
EOF
|
||||
|
||||
fi
|
||||
|
||||
echo "### soup has been served at $(date) ###"
|
||||
}
|
||||
|
||||
cat << EOF
|
||||
while getopts ":b:f:y" opt; do
|
||||
case ${opt} in
|
||||
b )
|
||||
BATCHSIZE="$OPTARG"
|
||||
if ! [[ "$BATCHSIZE" =~ ^[1-9][0-9]*$ ]]; then
|
||||
echo "Batch size must be a number greater than 0."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
y )
|
||||
if [[ ! -f /opt/so/state/yeselastic.txt ]]; then
|
||||
echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License."
|
||||
exit 1
|
||||
else
|
||||
UNATTENDED=true
|
||||
fi
|
||||
;;
|
||||
f )
|
||||
ISOLOC="$OPTARG"
|
||||
;;
|
||||
\? )
|
||||
echo "Usage: soup [-b] [-y] [-f <iso location>]"
|
||||
exit 1
|
||||
;;
|
||||
: )
|
||||
echo "Invalid option: $OPTARG requires an argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
if [[ -z $UNATTENDED ]]; then
|
||||
cat << EOF
|
||||
|
||||
SOUP - Security Onion UPdater
|
||||
|
||||
@@ -873,6 +1133,9 @@ Press Enter to continue or Ctrl-C to cancel.
|
||||
|
||||
EOF
|
||||
|
||||
read input
|
||||
read -r input
|
||||
fi
|
||||
|
||||
echo "### Preparing soup at $(date) ###"
|
||||
main "$@" | tee -a $SOUP_LOG
|
||||
|
||||
|
||||
29
salt/curator/files/action/so-aws-close.yml
Normal file
29
salt/curator/files/action/so-aws-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-aws:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close aws indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-aws.*|so-aws.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-aws-delete.yml
Normal file
29
salt/curator/files/action/so-aws-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete aws indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-aws.*|so-aws.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-aws-warm.yml
Normal file
24
salt/curator/files/action/so-aws-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-aws
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-azure-close.yml
Normal file
29
salt/curator/files/action/so-azure-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-azure:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close azure indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-azure.*|so-azure.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-azure-delete.yml
Normal file
29
salt/curator/files/action/so-azure-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-azure:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete azure indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-azure.*|so-azure.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-azure-warm.yml
Normal file
24
salt/curator/files/action/so-azure-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-azure:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-azure
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-barracuda-close.yml
Normal file
29
salt/curator/files/action/so-barracuda-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close barracuda indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-barracuda.*|so-barracuda.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-barracuda-delete.yml
Normal file
29
salt/curator/files/action/so-barracuda-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete barracuda indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-barracuda.*|so-barracuda.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-barracuda-warm.yml
Normal file
24
salt/curator/files/action/so-barracuda-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-barracuda
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-beats-delete.yml
Normal file
29
salt/curator/files/action/so-beats-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-beats:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete beats indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-beats.*|so-beats.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-beats-warm.yml
Normal file
24
salt/curator/files/action/so-beats-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-beats:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-beats
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-bluecoat-close.yml
Normal file
29
salt/curator/files/action/so-bluecoat-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close bluecoat indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-bluecoat.*|so-bluecoat.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-bluecoat-delete.yml
Normal file
29
salt/curator/files/action/so-bluecoat-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete bluecoat indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-bluecoat.*|so-bluecoat.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-bluecoat-warm.yml
Normal file
24
salt/curator/files/action/so-bluecoat-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-bluecoat
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-cef-close.yml
Normal file
29
salt/curator/files/action/so-cef-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cef:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close cef indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cef.*|so-cef.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-cef-delete.yml
Normal file
29
salt/curator/files/action/so-cef-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cef:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete cef indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cef.*|so-cef.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-cef-warm.yml
Normal file
24
salt/curator/files/action/so-cef-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cef:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-cef
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-checkpoint-close.yml
Normal file
29
salt/curator/files/action/so-checkpoint-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close checkpoint indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-checkpoint.*|so-checkpoint.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-checkpoint-delete.yml
Normal file
29
salt/curator/files/action/so-checkpoint-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete checkpoint indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-checkpoint.*|so-checkpoint.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-checkpoint-warm.yml
Normal file
24
salt/curator/files/action/so-checkpoint-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-checkpoint
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-cisco-close.yml
Normal file
29
salt/curator/files/action/so-cisco-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cisco:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close cisco indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cisco.*|so-cisco.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-cisco-delete.yml
Normal file
29
salt/curator/files/action/so-cisco-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete cisco indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cisco.*|so-cisco.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-cisco-warm.yml
Normal file
24
salt/curator/files/action/so-cisco-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-cisco
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-cyberark-close.yml
Normal file
29
salt/curator/files/action/so-cyberark-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close cyberark indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cyberark.*|so-cyberark.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-cyberark-delete.yml
Normal file
29
salt/curator/files/action/so-cyberark-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete cyberark indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cyberark.*|so-cyberark.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-cyberark-warm.yml
Normal file
24
salt/curator/files/action/so-cyberark-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-cyberark
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-cylance-close.yml
Normal file
29
salt/curator/files/action/so-cylance-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cylance:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close cylance indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cylance.*|so-cylance.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-cylance-delete.yml
Normal file
29
salt/curator/files/action/so-cylance-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete cylance indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cylance.*|so-cylance.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-cylance-warm.yml
Normal file
24
salt/curator/files/action/so-cylance-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-cylance
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-elasticsearch-close.yml
Normal file
29
salt/curator/files/action/so-elasticsearch-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close elasticsearch indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-elasticsearch.*|so-elasticsearch.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user