mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
Compare commits
1104 Commits
2.3.60FBPI
...
2.3.90-WAZ
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4c6786a412 | ||
|
|
5062e910e2 | ||
|
|
1f9dc0db1f | ||
|
|
c536e11383 | ||
|
|
faa8464b60 | ||
|
|
4f283c2d86 | ||
|
|
801d42ed20 | ||
|
|
30a1ffc1c7 | ||
|
|
59fc122eec | ||
|
|
52ffa27eda | ||
|
|
bd59d65f02 | ||
|
|
01ceded223 | ||
|
|
3c37bd61ab | ||
|
|
a35670c889 | ||
|
|
7627d37386 | ||
|
|
273842eb43 | ||
|
|
0dd251e2a9 | ||
|
|
c67b2b6936 | ||
|
|
af4c04be59 | ||
|
|
4672b0c15c | ||
|
|
9737a4088c | ||
|
|
d8d429c71a | ||
|
|
3bfc3b8943 | ||
|
|
4ad6d616ae | ||
|
|
759c0b858a | ||
|
|
c17a49a730 | ||
|
|
c0f183fb5e | ||
|
|
d602339c45 | ||
|
|
0122e62920 | ||
|
|
1634105780 | ||
|
|
198a690ba1 | ||
|
|
bebd62187d | ||
|
|
a91564605c | ||
|
|
23b91ee7e5 | ||
|
|
d3f25f8d74 | ||
|
|
8bd4ba3acd | ||
|
|
e5927d0bf7 | ||
|
|
9dd89f6be7 | ||
|
|
796eb59dc6 | ||
|
|
55fed43469 | ||
|
|
af83019427 | ||
|
|
4149236cda | ||
|
|
825106d074 | ||
|
|
1a3324868a | ||
|
|
bc87bb4770 | ||
|
|
6aae48bdae | ||
|
|
a0425a48e6 | ||
|
|
4b89bf7bbc | ||
|
|
5fc5afa9ea | ||
|
|
ddec8e4da0 | ||
|
|
9c0e8cedba | ||
|
|
5054da0027 | ||
|
|
96f1f0174b | ||
|
|
cd1f0c0440 | ||
|
|
12546a8efa | ||
|
|
3f5956b56d | ||
|
|
6e49ab0558 | ||
|
|
c52df32f05 | ||
|
|
c0602f4222 | ||
|
|
d4b412bcbe | ||
|
|
66e2de0934 | ||
|
|
c93794a402 | ||
|
|
98efc6f2ed | ||
|
|
59ef734064 | ||
|
|
922657afbc | ||
|
|
5f3601ac78 | ||
|
|
2fe4fa06a6 | ||
|
|
773c580e77 | ||
|
|
aca684d55a | ||
|
|
6f391dbe50 | ||
|
|
8d033264e7 | ||
|
|
262d2023b5 | ||
|
|
d143a309a1 | ||
|
|
ac400f1c41 | ||
|
|
df495c0017 | ||
|
|
8c454973ad | ||
|
|
a16e6aca22 | ||
|
|
ce21ae11f5 | ||
|
|
fdd9706669 | ||
|
|
8fa9a180b2 | ||
|
|
6288365a50 | ||
|
|
5448107310 | ||
|
|
adaf3faf90 | ||
|
|
1bd8e226b4 | ||
|
|
f60f0b5b6d | ||
|
|
adc867846c | ||
|
|
5945326817 | ||
|
|
90cbb5d00e | ||
|
|
8bb2789c6f | ||
|
|
11fc0da971 | ||
|
|
76a1d767f2 | ||
|
|
a2152446ea | ||
|
|
d4d9032bfc | ||
|
|
4e3f43bee4 | ||
|
|
57377e0a0e | ||
|
|
2514d36ccd | ||
|
|
809dbc0a48 | ||
|
|
b51405d5e8 | ||
|
|
d1cfc4a8dc | ||
|
|
731bbabe4c | ||
|
|
d4509ff4d8 | ||
|
|
85c0b0818b | ||
|
|
f674555290 | ||
|
|
a8aae544d5 | ||
|
|
6f9db25ea7 | ||
|
|
405e78858a | ||
|
|
146e1f4297 | ||
|
|
f78e0fb7b9 | ||
|
|
6e6d2d1949 | ||
|
|
ca5d20fecb | ||
|
|
dcfaece8b1 | ||
|
|
af0e062193 | ||
|
|
56acedfbf7 | ||
|
|
4b0a5c3a17 | ||
|
|
052192e1d6 | ||
|
|
92131d4bb7 | ||
|
|
9ac1cb0e76 | ||
|
|
ffbb04bb5a | ||
|
|
cc1dea446c | ||
|
|
7f3379e034 | ||
|
|
8c46a2d1db | ||
|
|
ba621639bd | ||
|
|
2fb9196604 | ||
|
|
48c71c8b12 | ||
|
|
8d185ced61 | ||
|
|
9141c271f0 | ||
|
|
bc2e470da9 | ||
|
|
0f817cd735 | ||
|
|
df5901a65d | ||
|
|
3cd1b5687e | ||
|
|
86a42addf0 | ||
|
|
6bf4d5a576 | ||
|
|
efa5eb9f7f | ||
|
|
22959f0260 | ||
|
|
8da2133cff | ||
|
|
1472af4fc3 | ||
|
|
f91a6d3cb6 | ||
|
|
96f427d924 | ||
|
|
184356618c | ||
|
|
ed3b2e4569 | ||
|
|
62b41af069 | ||
|
|
569cb24861 | ||
|
|
ac22df8381 | ||
|
|
446d6bd532 | ||
|
|
fcf889be2f | ||
|
|
8168f19b31 | ||
|
|
ba553d971c | ||
|
|
9137454a25 | ||
|
|
7ebd861e32 | ||
|
|
d110b63050 | ||
|
|
3806f10f8b | ||
|
|
83bd314a63 | ||
|
|
6cd7b252df | ||
|
|
dea03bbf5e | ||
|
|
9edc543262 | ||
|
|
d3dc5ffc5a | ||
|
|
2c296e832f | ||
|
|
b350174df1 | ||
|
|
67ebfeab16 | ||
|
|
435f430747 | ||
|
|
aa9e1701f0 | ||
|
|
02d9b87f66 | ||
|
|
cfd46c1e58 | ||
|
|
392305e4ed | ||
|
|
5ff14ab652 | ||
|
|
1890c7244a | ||
|
|
a8c4ed7bbf | ||
|
|
91f54537d7 | ||
|
|
7e3a4656aa | ||
|
|
8a04fcd919 | ||
|
|
409ab623a5 | ||
|
|
ac85d1598e | ||
|
|
4c8e68e014 | ||
|
|
57c6e26634 | ||
|
|
b6a1d7418e | ||
|
|
6eb1a0b0ae | ||
|
|
9301b8f5b9 | ||
|
|
202977a323 | ||
|
|
9597373e4a | ||
|
|
f80b70e008 | ||
|
|
04d2b52306 | ||
|
|
af7830c2be | ||
|
|
3c3cb47b88 | ||
|
|
da4e92a7a3 | ||
|
|
3afb0bd263 | ||
|
|
f6e6b20392 | ||
|
|
3835a4401e | ||
|
|
4bae57d994 | ||
|
|
ea7289d92e | ||
|
|
48eaf190e9 | ||
|
|
497de0fede | ||
|
|
70e3bc7eb8 | ||
|
|
eefc9cfcb6 | ||
|
|
42b8955883 | ||
|
|
f6b753b805 | ||
|
|
17fc03a553 | ||
|
|
8bf88043ac | ||
|
|
79640342f2 | ||
|
|
3ad47742bd | ||
|
|
a8c02252dc | ||
|
|
fbef420155 | ||
|
|
ccd84e441d | ||
|
|
46d3eb452d | ||
|
|
083d467aa9 | ||
|
|
f026ac1b41 | ||
|
|
9ea292b11e | ||
|
|
e2ee460fdd | ||
|
|
5b70ff61d1 | ||
|
|
3b2ca89852 | ||
|
|
199c97684c | ||
|
|
d67e34dac4 | ||
|
|
49a573074e | ||
|
|
6c16d6d222 | ||
|
|
acba82d194 | ||
|
|
f66d915f5d | ||
|
|
ee2dd75dfd | ||
|
|
50b7779d6e | ||
|
|
ad71485361 | ||
|
|
8b2cccdf4a | ||
|
|
dbe4a7de63 | ||
|
|
9c4bba9ac9 | ||
|
|
b3fd7c548c | ||
|
|
dcf6dfb676 | ||
|
|
246d41c552 | ||
|
|
988932293f | ||
|
|
0b28e89f3c | ||
|
|
665732bd32 | ||
|
|
b599b49630 | ||
|
|
edb3b602a9 | ||
|
|
a4289b7ab9 | ||
|
|
9b0ce8b395 | ||
|
|
05456b38d1 | ||
|
|
4fc58e7a5a | ||
|
|
dc07aba63d | ||
|
|
f1d66e2d51 | ||
|
|
fab0dd2bad | ||
|
|
747f14d60e | ||
|
|
fb35ff40b4 | ||
|
|
2cb31a4c05 | ||
|
|
32f986c505 | ||
|
|
c8ee67f354 | ||
|
|
db80315c06 | ||
|
|
8e3b08a831 | ||
|
|
677f62ebd1 | ||
|
|
d927e79154 | ||
|
|
8670aa6cd8 | ||
|
|
7c7c225a41 | ||
|
|
54b034b537 | ||
|
|
2232759fa4 | ||
|
|
f65eea6a03 | ||
|
|
e4a77acfe6 | ||
|
|
9671dab2a3 | ||
|
|
e6adb46364 | ||
|
|
7abb2e5935 | ||
|
|
561f86eac8 | ||
|
|
9a9d1480de | ||
|
|
8b52f87a60 | ||
|
|
a6f399acf4 | ||
|
|
3534256517 | ||
|
|
b109d95d6f | ||
|
|
b756c0cd38 | ||
|
|
3517ea3f2a | ||
|
|
5d414c8bdd | ||
|
|
2b56b53c15 | ||
|
|
2ba619144c | ||
|
|
a9be0a0409 | ||
|
|
bf116d210e | ||
|
|
f8b62b63f9 | ||
|
|
f4d9455872 | ||
|
|
936c796b9d | ||
|
|
8ff122262c | ||
|
|
c4a1fbd82a | ||
|
|
8857fca797 | ||
|
|
b63b50d98c | ||
|
|
c17187708e | ||
|
|
095e6bd48c | ||
|
|
c4b9244f9a | ||
|
|
2ba548fcfc | ||
|
|
f76a52b2ee | ||
|
|
b555ad16da | ||
|
|
b1c67f696e | ||
|
|
d08149f728 | ||
|
|
a5cba5ecf8 | ||
|
|
f081938be5 | ||
|
|
c2b18efdbb | ||
|
|
6b480a5ba4 | ||
|
|
d6eeb0b735 | ||
|
|
3000c57428 | ||
|
|
5c5b4004e9 | ||
|
|
05e0f92ec5 | ||
|
|
0cea5e8f22 | ||
|
|
7eb42fa6bd | ||
|
|
18ce9c7819 | ||
|
|
b3e5319806 | ||
|
|
c8c8cf203f | ||
|
|
19056b9177 | ||
|
|
75490a2536 | ||
|
|
eee612e73d | ||
|
|
9e9079f9cb | ||
|
|
331801eec2 | ||
|
|
a0216cea57 | ||
|
|
e7f43cff5e | ||
|
|
90d473f2d6 | ||
|
|
bf403a8307 | ||
|
|
58d62f29ea | ||
|
|
bcf03773c0 | ||
|
|
c0dd9efd9b | ||
|
|
36ae07b78e | ||
|
|
d77328608e | ||
|
|
682cbfd223 | ||
|
|
fa2edb2b59 | ||
|
|
0c679b62b2 | ||
|
|
7e8d74e770 | ||
|
|
9a78d13bee | ||
|
|
c469d12a49 | ||
|
|
d5f42e0d7c | ||
|
|
926551d398 | ||
|
|
3be0d05eea | ||
|
|
7fa43a276a | ||
|
|
2bfedbd581 | ||
|
|
dca30146ab | ||
|
|
6e34905b42 | ||
|
|
ee7e714f43 | ||
|
|
d7e5377a44 | ||
|
|
38b16a507b | ||
|
|
17af513692 | ||
|
|
283f7296bc | ||
|
|
9f6407fcb0 | ||
|
|
f61400680d | ||
|
|
fed8bfac67 | ||
|
|
62971d8c15 | ||
|
|
352e30f9e1 | ||
|
|
451b19dc4d | ||
|
|
d5d970672d | ||
|
|
f93c6146f5 | ||
|
|
40dd33affe | ||
|
|
f374dcbb58 | ||
|
|
77ee1db44c | ||
|
|
8784d65023 | ||
|
|
15fe7512b7 | ||
|
|
0beeeb94bf | ||
|
|
928aed27c5 | ||
|
|
387d4d6ad5 | ||
|
|
adf6cb4b3c | ||
|
|
0ed2ce0766 | ||
|
|
b5cb47e066 | ||
|
|
8061508330 | ||
|
|
adffb11800 | ||
|
|
8619af59cc | ||
|
|
7ecfb55b70 | ||
|
|
b496810b63 | ||
|
|
e1ad02c28d | ||
|
|
2f8bb5a2a6 | ||
|
|
6f3e441bf7 | ||
|
|
7f1585dcc0 | ||
|
|
9453ed7fa1 | ||
|
|
64f25961b0 | ||
|
|
b9a3d3a6a9 | ||
|
|
36cb0d6c42 | ||
|
|
1b2268dfe5 | ||
|
|
00e5b54dda | ||
|
|
4016b416ec | ||
|
|
7590728a0b | ||
|
|
bb36fc1ed8 | ||
|
|
d0a6dafc8b | ||
|
|
76097476d3 | ||
|
|
8b3b0bf160 | ||
|
|
f19680b3e6 | ||
|
|
7e1bbe3cc2 | ||
|
|
947285e932 | ||
|
|
1741f5068a | ||
|
|
a9f6c84d7c | ||
|
|
59852841ff | ||
|
|
6f1f7d2a63 | ||
|
|
8de8d58155 | ||
|
|
8feeff97b5 | ||
|
|
032373187c | ||
|
|
db2b70f655 | ||
|
|
1800ec4570 | ||
|
|
8a5960c220 | ||
|
|
9797a15218 | ||
|
|
c7b15a9b1f | ||
|
|
cba97802fe | ||
|
|
025256aeaf | ||
|
|
490f7eaf81 | ||
|
|
6a2bf11a75 | ||
|
|
78d30285b1 | ||
|
|
f1fafa015e | ||
|
|
6cdc214582 | ||
|
|
15049f44b9 | ||
|
|
42a642b85c | ||
|
|
3b45e68ead | ||
|
|
5ee0ea3fe7 | ||
|
|
55c60f485c | ||
|
|
78e88e0765 | ||
|
|
a9b250c0f4 | ||
|
|
ae9753326a | ||
|
|
c8fb504ee0 | ||
|
|
54eec92621 | ||
|
|
7832e59629 | ||
|
|
f9001654bb | ||
|
|
2a504a061b | ||
|
|
bb9c6446e4 | ||
|
|
e7581036f7 | ||
|
|
e1629d7ec4 | ||
|
|
b4873bd296 | ||
|
|
3044edb104 | ||
|
|
a495779552 | ||
|
|
880c1b97b0 | ||
|
|
7a4fa8879c | ||
|
|
adb8292814 | ||
|
|
6e7a5fa326 | ||
|
|
23ea53248d | ||
|
|
f1a5991699 | ||
|
|
c69ad091f7 | ||
|
|
b97361fab9 | ||
|
|
36e1795295 | ||
|
|
498e385484 | ||
|
|
af687b0706 | ||
|
|
19489f3626 | ||
|
|
89d1df8a1d | ||
|
|
946cf81a27 | ||
|
|
2561480371 | ||
|
|
d21dee162d | ||
|
|
444d067112 | ||
|
|
2a82373051 | ||
|
|
64758a534c | ||
|
|
7517a63008 | ||
|
|
b2facdf31c | ||
|
|
4c54d6309c | ||
|
|
62c3afc81d | ||
|
|
7d8c8144b0 | ||
|
|
a2c4fce1ef | ||
|
|
599aba43d9 | ||
|
|
fa4f92cdda | ||
|
|
5d98c0d14c | ||
|
|
27614569e3 | ||
|
|
ec357cca3c | ||
|
|
26681ac98a | ||
|
|
748f0f2a1d | ||
|
|
869af548af | ||
|
|
2fd344822d | ||
|
|
a3e0fb127a | ||
|
|
9569e73bd0 | ||
|
|
96d783b158 | ||
|
|
e0c097c270 | ||
|
|
e6fce4cf3e | ||
|
|
6ef9a5c95d | ||
|
|
727613b6e1 | ||
|
|
5013aa8490 | ||
|
|
72a1b299ac | ||
|
|
cfaa0e679c | ||
|
|
4ddf2b49ce | ||
|
|
bb95963d73 | ||
|
|
dfa9afde0e | ||
|
|
fa2333b9ef | ||
|
|
8b9c43915d | ||
|
|
36832139b2 | ||
|
|
c3bf835566 | ||
|
|
39d3c7c6ed | ||
|
|
b1a5527e82 | ||
|
|
d0592c4293 | ||
|
|
b1d0e3e93f | ||
|
|
b069377c8a | ||
|
|
e9a44c6e1b | ||
|
|
275163f85d | ||
|
|
98f74c25ba | ||
|
|
3064800820 | ||
|
|
f8bea82430 | ||
|
|
8b905b585d | ||
|
|
b44358fc26 | ||
|
|
8a9dcb7fdb | ||
|
|
a01d49981c | ||
|
|
b8b1867e52 | ||
|
|
292ce37ce4 | ||
|
|
73dacdcbff | ||
|
|
bea7555464 | ||
|
|
52c1298b9b | ||
|
|
cdb9dcbaec | ||
|
|
37153288e8 | ||
|
|
edf75255cf | ||
|
|
9eb6f5942e | ||
|
|
dae41d279a | ||
|
|
07288367cf | ||
|
|
f4186feffa | ||
|
|
d82e91f69e | ||
|
|
a2680fad0a | ||
|
|
5c2be487f5 | ||
|
|
531c9de488 | ||
|
|
19efa493ad | ||
|
|
0db3f14261 | ||
|
|
ed28e4d000 | ||
|
|
2c8cbf0db1 | ||
|
|
c1537335b1 | ||
|
|
5f475ff9cb | ||
|
|
481ffb1cda | ||
|
|
50b78681f2 | ||
|
|
3924b8f5db | ||
|
|
a9049eccd4 | ||
|
|
1a7237bcdf | ||
|
|
1e5e1c9ef0 | ||
|
|
47cd1ddc0a | ||
|
|
aed73511e4 | ||
|
|
a3f62c81c3 | ||
|
|
730503b69c | ||
|
|
3508f3d8c1 | ||
|
|
5704906b11 | ||
|
|
357c1db445 | ||
|
|
5377a1a85e | ||
|
|
7f2d7eb038 | ||
|
|
30e781d076 | ||
|
|
01323cc192 | ||
|
|
109c83d8c3 | ||
|
|
e864bc5404 | ||
|
|
22eb82e950 | ||
|
|
b877aa44bc | ||
|
|
4d307c53e8 | ||
|
|
d0c87cd317 | ||
|
|
0d074dafd4 | ||
|
|
5b77dc109f | ||
|
|
3ce48acadd | ||
|
|
fbd9bab2f1 | ||
|
|
5526a2bc3a | ||
|
|
18d81352c6 | ||
|
|
889d235c45 | ||
|
|
3fc26312e0 | ||
|
|
b81d38e392 | ||
|
|
82da0041a4 | ||
|
|
782b01e76f | ||
|
|
3bf9685df8 | ||
|
|
4cf91f6c86 | ||
|
|
a43b37f234 | ||
|
|
e0dc62b6e9 | ||
|
|
c213834316 | ||
|
|
c06668c68e | ||
|
|
a75238bc3f | ||
|
|
ac417867ed | ||
|
|
1614b70853 | ||
|
|
0882158e03 | ||
|
|
1a03853a7c | ||
|
|
aff571faf2 | ||
|
|
e0faa4c75b | ||
|
|
e3e2e1d851 | ||
|
|
2affaf07a2 | ||
|
|
39e5ded58d | ||
|
|
4d41d3aee1 | ||
|
|
5c8067728e | ||
|
|
1d905124d3 | ||
|
|
e0a289182f | ||
|
|
551dba955c | ||
|
|
9970e54081 | ||
|
|
ff989b1c73 | ||
|
|
2ffb723bbd | ||
|
|
6ae2fba71f | ||
|
|
2cc25587d9 | ||
|
|
614a6dc9fe | ||
|
|
4b7667d87f | ||
|
|
74b0b365bd | ||
|
|
0b0d508585 | ||
|
|
0534a2dda3 | ||
|
|
f8ab0ac8a9 | ||
|
|
0ae09cc630 | ||
|
|
332c4dda22 | ||
|
|
679faddd52 | ||
|
|
0b42b19763 | ||
|
|
943bd3e902 | ||
|
|
4af6a901a1 | ||
|
|
9c310de459 | ||
|
|
4f6a3269cb | ||
|
|
6a2e1df7d4 | ||
|
|
db50ef71b4 | ||
|
|
4e2d5018a2 | ||
|
|
94688a9adb | ||
|
|
63f67b3500 | ||
|
|
eaa5e41651 | ||
|
|
c83f119cc0 | ||
|
|
5d235e932c | ||
|
|
93f2cd75a4 | ||
|
|
f06ab8b77d | ||
|
|
03b45512fa | ||
|
|
b8600be0f1 | ||
|
|
19a02baa7c | ||
|
|
3c59579f99 | ||
|
|
3f989590ad | ||
|
|
72cff7ec7a | ||
|
|
e3900606dc | ||
|
|
a2fd8ae200 | ||
|
|
b7591093cf | ||
|
|
51439cd1ab | ||
|
|
94ea1f856b | ||
|
|
fbbb7f4e85 | ||
|
|
7b3a0cd1e4 | ||
|
|
9fb28709d5 | ||
|
|
649f339934 | ||
|
|
f659079542 | ||
|
|
ce70380f0f | ||
|
|
c4d402d8b4 | ||
|
|
9f5dafd560 | ||
|
|
1cee603ee4 | ||
|
|
a14854d56d | ||
|
|
2bf471054b | ||
|
|
56894b9581 | ||
|
|
10126bb7ef | ||
|
|
6dfc943e8c | ||
|
|
84ecc3cba7 | ||
|
|
0ad3d826eb | ||
|
|
d785dafe2f | ||
|
|
e3dffcc2cb | ||
|
|
556bad6925 | ||
|
|
446821e9fd | ||
|
|
576c893eb3 | ||
|
|
34a5d6e56a | ||
|
|
324e6b12e2 | ||
|
|
007b15979a | ||
|
|
c168703e9f | ||
|
|
527a793e94 | ||
|
|
61ebedc0e9 | ||
|
|
e09aa4e5d4 | ||
|
|
e7b04b862f | ||
|
|
62edfd0b7f | ||
|
|
958575c22a | ||
|
|
0c8e11dc9f | ||
|
|
5b9ef3bc0d | ||
|
|
c12f380bc3 | ||
|
|
dc25ed2594 | ||
|
|
9f51f02ab4 | ||
|
|
f6f4375e13 | ||
|
|
ed116cf850 | ||
|
|
476ecccbc1 | ||
|
|
c09cebbd6b | ||
|
|
0ed92fd9bd | ||
|
|
c3454c9e8a | ||
|
|
3425a0fe78 | ||
|
|
9605eda559 | ||
|
|
ff09d9ca58 | ||
|
|
77b82bf2c0 | ||
|
|
ccc8f9ff0a | ||
|
|
43d20226a8 | ||
|
|
4fe0a1d7b4 | ||
|
|
7a48a94624 | ||
|
|
1aacc27cd4 | ||
|
|
92858cd13a | ||
|
|
99cb38362a | ||
|
|
bfd632e20a | ||
|
|
518f9fceb0 | ||
|
|
2b34da0fee | ||
|
|
72859adb13 | ||
|
|
a27263435a | ||
|
|
f8cdf5bca3 | ||
|
|
ca5339341f | ||
|
|
c5d120293d | ||
|
|
12b5c0899b | ||
|
|
09d5097837 | ||
|
|
de5f823abf | ||
|
|
7b93f355e2 | ||
|
|
a27569f20b | ||
|
|
fd1e632386 | ||
|
|
0681d29bb0 | ||
|
|
ef650c6ee6 | ||
|
|
24f36bb4c9 | ||
|
|
9783d13ea3 | ||
|
|
427ec98ce5 | ||
|
|
72ba29fb7b | ||
|
|
2859bff0e4 | ||
|
|
6e921415ea | ||
|
|
2f8b68e67a | ||
|
|
e762491039 | ||
|
|
11381e304b | ||
|
|
6d49bca0ac | ||
|
|
8ea89932ae | ||
|
|
f87cf123b0 | ||
|
|
80f4d03254 | ||
|
|
a9cc68f89e | ||
|
|
b053f29a89 | ||
|
|
19cfce5e0b | ||
|
|
c4a32ca631 | ||
|
|
b78da5c237 | ||
|
|
0abf7593ed | ||
|
|
aa420b914b | ||
|
|
f096b513b7 | ||
|
|
51b517581a | ||
|
|
936c998ecb | ||
|
|
02372d130a | ||
|
|
6f9a263af3 | ||
|
|
43ffaab82c | ||
|
|
dccfdb14e4 | ||
|
|
21f3b3d985 | ||
|
|
e2d74b115f | ||
|
|
13741400f1 | ||
|
|
d0f587858c | ||
|
|
acca8cc5d2 | ||
|
|
ef950955bd | ||
|
|
9a8ccef828 | ||
|
|
7b8e23fadd | ||
|
|
18335afa7f | ||
|
|
41e8be87b6 | ||
|
|
39f32a6e13 | ||
|
|
8e9f95652d | ||
|
|
30489e4117 | ||
|
|
9dc9f10003 | ||
|
|
1ced05c1d2 | ||
|
|
41b246b8b3 | ||
|
|
a12f19c533 | ||
|
|
f1c91555ae | ||
|
|
e39de8c7bc | ||
|
|
d0e312ec42 | ||
|
|
e492833453 | ||
|
|
9beacacd44 | ||
|
|
aad14b2461 | ||
|
|
4955b552df | ||
|
|
55e8a777d4 | ||
|
|
a98ed282c0 | ||
|
|
7504b1cb2e | ||
|
|
afab1cb1e6 | ||
|
|
cd0b9bbe4a | ||
|
|
3ea29e77a9 | ||
|
|
fb4c2c35e3 | ||
|
|
81ccce8659 | ||
|
|
0d5e3771f5 | ||
|
|
2030ef65f1 | ||
|
|
b6c361f83d | ||
|
|
9404cb635d | ||
|
|
da53b39c15 | ||
|
|
86569b0599 | ||
|
|
45aa2f72cb | ||
|
|
06b7434ca2 | ||
|
|
258cebda6e | ||
|
|
0cca43c4bd | ||
|
|
bf40a1038e | ||
|
|
3312a66e75 | ||
|
|
4a31d6b3bc | ||
|
|
64dfc6e191 | ||
|
|
95bd7f9861 | ||
|
|
983549711c | ||
|
|
5922dbdf22 | ||
|
|
9e48a5b57b | ||
|
|
3c1114403e | ||
|
|
8d2f614af6 | ||
|
|
1415de858c | ||
|
|
59e9fddf18 | ||
|
|
ad3b6cf629 | ||
|
|
b12e2eded5 | ||
|
|
26030d83eb | ||
|
|
3b01f6431e | ||
|
|
a646867593 | ||
|
|
768e61e11a | ||
|
|
e72ad9eb5a | ||
|
|
ac4faf673d | ||
|
|
dd1769fbef | ||
|
|
853a986082 | ||
|
|
727a3742f5 | ||
|
|
478a0b6a3f | ||
|
|
771688a70f | ||
|
|
40fa549353 | ||
|
|
84fdc1e690 | ||
|
|
71bbb41b5f | ||
|
|
52cb72ba67 | ||
|
|
54a3b754e0 | ||
|
|
2bc88e7750 | ||
|
|
ef59cb47dd | ||
|
|
9e5d3aa286 | ||
|
|
25bf25eae6 | ||
|
|
24f5fa66f3 | ||
|
|
1aeb2d7d4f | ||
|
|
ee176f5bfd | ||
|
|
eb093b8e6c | ||
|
|
f88fa6e3b2 | ||
|
|
724f7d4f3d | ||
|
|
19816d8814 | ||
|
|
d3b170c6df | ||
|
|
757091beeb | ||
|
|
8a49039b85 | ||
|
|
4f39cd1d7f | ||
|
|
2a6277c0c3 | ||
|
|
33bd6aed20 | ||
|
|
b9980c9d30 | ||
|
|
01bb94514c | ||
|
|
d71967ea1d | ||
|
|
0b06d0bfdb | ||
|
|
b2a83018ba | ||
|
|
ba265d94f4 | ||
|
|
af7b314cfe | ||
|
|
4c6447a3da | ||
|
|
b30f771fa2 | ||
|
|
837c0402a0 | ||
|
|
e38219aa2e | ||
|
|
9e92f6da3d | ||
|
|
44551ea9ee | ||
|
|
c53da9b1ff | ||
|
|
e1785dbd9a | ||
|
|
2560a9b78c | ||
|
|
d53e989c55 | ||
|
|
211a841cdb | ||
|
|
50e4365475 | ||
|
|
c524b54af1 | ||
|
|
7591bb115e | ||
|
|
3d2da303c8 | ||
|
|
f585eb6e62 | ||
|
|
4b6120a46b | ||
|
|
d946c6d5ed | ||
|
|
5894b85bd1 | ||
|
|
3fc43f7d92 | ||
|
|
8ed264460f | ||
|
|
811b32735e | ||
|
|
4b3db0c4d2 | ||
|
|
281ba21298 | ||
|
|
d4a177949a | ||
|
|
a42d8c9229 | ||
|
|
dd0e407935 | ||
|
|
7ef5b39b04 | ||
|
|
cf9121dfc2 | ||
|
|
fcfc2a65a9 | ||
|
|
91accb0bc6 | ||
|
|
e2abe8840f | ||
|
|
ead9ae8cb5 | ||
|
|
455719936b | ||
|
|
8d56fc71fa | ||
|
|
833d154bf4 | ||
|
|
f31dc5abc7 | ||
|
|
9a429230fe | ||
|
|
b36d46b7f2 | ||
|
|
fee89665fd | ||
|
|
d78a37f9e3 | ||
|
|
28c5c02ef1 | ||
|
|
8ffeae38bc | ||
|
|
f4fae7938e | ||
|
|
22920bc9a1 | ||
|
|
ceb82cb863 | ||
|
|
1caa361e22 | ||
|
|
da20790238 | ||
|
|
f359dd0cd4 | ||
|
|
bee442a21f | ||
|
|
a66765e99b | ||
|
|
0db7f91eb4 | ||
|
|
850315dc20 | ||
|
|
d35e4bea01 | ||
|
|
356b623148 | ||
|
|
3a022e7a83 | ||
|
|
64945cec16 | ||
|
|
26741bdb53 | ||
|
|
7aa5e857ed | ||
|
|
2e277bf487 | ||
|
|
e4f46c6e14 | ||
|
|
e9d90644fd | ||
|
|
5a06f0dce9 | ||
|
|
08e9a58f2e | ||
|
|
e1f0c8e87c | ||
|
|
17a532f7b5 | ||
|
|
c7306dda12 | ||
|
|
00d311cd6c | ||
|
|
f8d2a7f449 | ||
|
|
a02a928996 | ||
|
|
eb661b7a24 | ||
|
|
6aea607f21 | ||
|
|
41e747dcc1 | ||
|
|
d3d02faa1c | ||
|
|
7a85a3c7f7 | ||
|
|
fceb2851ef | ||
|
|
2f118781ea | ||
|
|
b8e3a45a7e | ||
|
|
61312397e1 | ||
|
|
8ea4682aab | ||
|
|
3b6befdb97 | ||
|
|
613979ea3f | ||
|
|
191def686b | ||
|
|
f986e0dc78 | ||
|
|
08e75567d4 | ||
|
|
668199f1a8 | ||
|
|
7a753a56ec | ||
|
|
7b38b4e280 | ||
|
|
7dc2e2ca73 | ||
|
|
44eb23615a | ||
|
|
d47566f667 | ||
|
|
9ae84c8108 | ||
|
|
578c7aac35 | ||
|
|
1c460cc19c | ||
|
|
ff436aea93 | ||
|
|
aa333794f7 | ||
|
|
3d3593a1a9 | ||
|
|
257062e20c | ||
|
|
fa9d7afb46 | ||
|
|
ae5f351e1a | ||
|
|
257a88ec8e | ||
|
|
e1e6304a8a | ||
|
|
a81ef0017c | ||
|
|
b89162e086 | ||
|
|
a6630540a4 | ||
|
|
a528c5d54b | ||
|
|
690699ddf7 | ||
|
|
cd8d9c657e | ||
|
|
f732b80b92 | ||
|
|
ad8c12afa5 | ||
|
|
479fcb6c46 | ||
|
|
74874dfff2 | ||
|
|
ceb108a5fe | ||
|
|
235d8b7cf0 | ||
|
|
7c9df2d75a | ||
|
|
43bf75217f | ||
|
|
9bf6d478c5 | ||
|
|
e2baa93270 | ||
|
|
37fcda3817 | ||
|
|
457ae54341 | ||
|
|
4cc3c5ada9 | ||
|
|
07d5736d61 | ||
|
|
a7551a44e5 | ||
|
|
f4d3e13c7f | ||
|
|
47d82b3d35 | ||
|
|
9d06aff1d1 | ||
|
|
5ea8c978a0 | ||
|
|
6809c3a9f6 | ||
|
|
761108964e | ||
|
|
e3e74a84f2 | ||
|
|
1fee4e87c4 | ||
|
|
0c4c59375d | ||
|
|
09165daab8 | ||
|
|
3393b77535 | ||
|
|
d050bc02e2 | ||
|
|
af60ddf404 | ||
|
|
1bb92f63d1 | ||
|
|
a405ca39fa | ||
|
|
852b686d81 | ||
|
|
608d5d3c26 | ||
|
|
6038ebb705 | ||
|
|
4bb350d37d | ||
|
|
d01ac55db1 | ||
|
|
fcde5c3c18 | ||
|
|
dbf19e134f | ||
|
|
b13c5a3b8b | ||
|
|
b0c5a352c1 | ||
|
|
d0b3cd5f66 | ||
|
|
24efdec9ea | ||
|
|
1bed818a8e | ||
|
|
3c4c52567d | ||
|
|
87ae14d11c | ||
|
|
258d303e7f | ||
|
|
458350e1a8 | ||
|
|
fe7ee1e2c7 | ||
|
|
d8910a0097 | ||
|
|
3b6e683d37 | ||
|
|
90f6bad6ce | ||
|
|
fcc6802f86 | ||
|
|
3b9bc77ecc | ||
|
|
0fb4500fcc | ||
|
|
93ca00c7fe | ||
|
|
522f2a3f9f | ||
|
|
40ddf5f49c | ||
|
|
60356eacce | ||
|
|
158f3bf092 | ||
|
|
ebf3c65bed | ||
|
|
df6d1d72e2 | ||
|
|
72542322ca | ||
|
|
fea4f3f973 | ||
|
|
7878180f54 | ||
|
|
0669aa6bbd | ||
|
|
2c4924a602 | ||
|
|
bde86e0383 | ||
|
|
bab18275bc | ||
|
|
7e86681509 | ||
|
|
c2fc2df54c | ||
|
|
0deb77468f | ||
|
|
9bf1d3e0c6 | ||
|
|
3a12d28d20 | ||
|
|
e8ba4bdc6c | ||
|
|
b552973e00 | ||
|
|
ac98e1fd0f | ||
|
|
4246aac51b | ||
|
|
33f396bdae | ||
|
|
ff25cecd54 | ||
|
|
e88b258208 | ||
|
|
1cbf895e0e | ||
|
|
7dc1f5c445 | ||
|
|
439e049948 | ||
|
|
fbf26bef8d | ||
|
|
c1f550382c | ||
|
|
23fb6a5c02 | ||
|
|
d632266092 | ||
|
|
4ea3ab9538 | ||
|
|
725161ea6e | ||
|
|
fccd86f676 | ||
|
|
0f0a977ed9 | ||
|
|
7f9d0b59b8 | ||
|
|
b0d510167c | ||
|
|
4971933201 | ||
|
|
693a9b30ae | ||
|
|
76c285158a | ||
|
|
08517e3732 | ||
|
|
59530f4263 | ||
|
|
4acebe7f59 | ||
|
|
a44a7b7161 | ||
|
|
be13f0a066 | ||
|
|
98ce77c2b1 | ||
|
|
275a491cac | ||
|
|
1c868f85c4 | ||
|
|
b6deacf86d | ||
|
|
294f91473c | ||
|
|
902f04efb4 | ||
|
|
ca2989c0e5 | ||
|
|
2d9697cd66 | ||
|
|
b4111a9f79 | ||
|
|
7f8212fdba | ||
|
|
7e1be8a3a4 | ||
|
|
05aad07bfc | ||
|
|
4b4ceb525a | ||
|
|
42ba9888d7 | ||
|
|
818f912a90 | ||
|
|
dae64b82ff | ||
|
|
53c6edcbdb | ||
|
|
723172bc1f | ||
|
|
323b5d6694 | ||
|
|
441cd3fc59 | ||
|
|
1d23d1b2e2 | ||
|
|
e41811fbd0 | ||
|
|
f111106a9f | ||
|
|
f9e29eaede | ||
|
|
e7a6172d7e | ||
|
|
ec8f9228e8 | ||
|
|
6c12e26632 | ||
|
|
9a6ac7bd20 | ||
|
|
5b3751da70 | ||
|
|
65127eb226 | ||
|
|
115e0a6fee | ||
|
|
ddfab44883 | ||
|
|
8900d52c33 | ||
|
|
bab72393e6 | ||
|
|
e059c25ebc | ||
|
|
c87ca8f5dc | ||
|
|
e01e3cdd43 | ||
|
|
2ab9ade761 | ||
|
|
0b35b8f6d6 | ||
|
|
9ff95f66dd | ||
|
|
c1523c4936 | ||
|
|
b6e31278a7 | ||
|
|
ca2b24f735 | ||
|
|
2b0bca8e55 | ||
|
|
98fe7e8700 | ||
|
|
0acc3cc537 | ||
|
|
8491ffde07 | ||
|
|
2ea3989497 | ||
|
|
e6f9592cde | ||
|
|
222d79bf53 | ||
|
|
19d9258717 | ||
|
|
b46456b78e | ||
|
|
cebc2ef09d | ||
|
|
c4ff8f6876 | ||
|
|
619022ef7f | ||
|
|
c0f3c5b3db | ||
|
|
860b8bf945 | ||
|
|
694db81b80 | ||
|
|
a895270bc8 | ||
|
|
7474b451ca | ||
|
|
e8eecc8bc1 | ||
|
|
28e33b413c | ||
|
|
78c58e61ea | ||
|
|
f3ecdf21bf | ||
|
|
ff656365d2 | ||
|
|
ea7c09bb00 | ||
|
|
e23f7cd3e7 | ||
|
|
c6bb32b862 | ||
|
|
0bde69b441 | ||
|
|
6fbafb74bd | ||
|
|
9572c1f663 | ||
|
|
0fedb0f2c5 | ||
|
|
33d3aef9f5 | ||
|
|
fb8ccedf66 | ||
|
|
efcf0accc1 | ||
|
|
f556d5c07d | ||
|
|
6c1f424c0b | ||
|
|
90970f97e8 | ||
|
|
d3137dc6b9 | ||
|
|
efaf53f2f7 | ||
|
|
beb7b89275 | ||
|
|
8c15fa1627 | ||
|
|
bc814c9be6 | ||
|
|
bac7ef71d8 | ||
|
|
dd199ea30f | ||
|
|
fc8acac1a5 | ||
|
|
fec269c3e7 | ||
|
|
8e366fd633 | ||
|
|
f7d54186dd | ||
|
|
ab92fb3910 | ||
|
|
6783e2e28b | ||
|
|
4e47d3f458 | ||
|
|
b265c7dcb7 | ||
|
|
f4fae89b8e | ||
|
|
45f0b4c85f | ||
|
|
7c80483f6e | ||
|
|
08ba4fdbee | ||
|
|
7085796601 | ||
|
|
091b5f73b1 | ||
|
|
0c079edc1a | ||
|
|
54cdfb89f6 | ||
|
|
f56514ed7d | ||
|
|
56697fde19 | ||
|
|
80525ee736 | ||
|
|
20360d0bb0 | ||
|
|
35f10518b2 | ||
|
|
03066c4674 | ||
|
|
e33a6892b3 | ||
|
|
87bb3f4a6b | ||
|
|
62bfaa4e45 | ||
|
|
dc1363aaf5 | ||
|
|
a5067718d2 | ||
|
|
98505a9a3f | ||
|
|
a16f733622 |
@@ -15,7 +15,7 @@
|
||||
|
||||
### Contributing code
|
||||
|
||||
* **All commits must be signed** with a valid key that has been added to your GitHub account. The commits should have all the "**Verified**" tag when viewed on GitHub as shown below:
|
||||
* **All commits must be signed** with a valid key that has been added to your GitHub account. Each commit should have the "**Verified**" tag when viewed on GitHub as shown below:
|
||||
|
||||
<img src="./assets/images/verified-commit-1.png" width="450">
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
## Security Onion 2.3.60
|
||||
## Security Onion 2.3.90-WAZUH
|
||||
|
||||
Security Onion 2.3.60 is here!
|
||||
Security Onion 2.3.90-WAZUH is here!
|
||||
|
||||
## Screenshots
|
||||
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
### 2.3.60-FBPIPELINE ISO image built on 2021/07/13
|
||||
### 2.3.90-WAZUH ISO image built on 2021/11/23
|
||||
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.3.60-FBPIPELINE ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.60-FBPIPELINE.iso
|
||||
2.3.90-WAZUH ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.90-WAZUH.iso
|
||||
|
||||
MD5: 2EA2B337289D0CFF0C7488E8E88FE7BE
|
||||
SHA1: 7C22F16AD395E079F4C5345093AF26C105E36D4C
|
||||
SHA256: 3B685BBD19711229C5FCD5D254BA5024AF0C36A3E379790B5E83037CE2668724
|
||||
MD5: B7141C8627CDB45F4A8741B2ADE4A9F3
|
||||
SHA1: 16087B385CA651659EC98F139AFDF90922430FB6
|
||||
SHA256: 667AF11BBCFE3248AF59E45043703B55A543E059899AE387FF55EB8077304F04
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.60-FBPIPELINE.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.90-WAZUH.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
||||
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.60-FBPIPELINE.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.90-WAZUH.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.60-FBPIPELINE.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.90-WAZUH.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.3.60-FBPIPELINE.iso.sig securityonion-2.3.60-FBPIPELINE.iso
|
||||
gpg --verify securityonion-2.3.90-WAZUH.iso.sig securityonion-2.3.90-WAZUH.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Tue 13 Jul 2021 04:12:08 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Tue 23 Nov 2021 03:19:08 PM EST using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
@@ -16,6 +16,10 @@ firewall:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
endgame:
|
||||
ips:
|
||||
delete:
|
||||
insert:
|
||||
fleet:
|
||||
ips:
|
||||
delete:
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
elasticsearch:
|
||||
templates:
|
||||
- so/so-beats-template.json.jinja
|
||||
- so/so-common-template.json
|
||||
- so/so-common-template.json.jinja
|
||||
- so/so-firewall-template.json.jinja
|
||||
- so/so-flow-template.json.jinja
|
||||
- so/so-ids-template.json.jinja
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
elasticsearch:
|
||||
templates:
|
||||
- so/so-beats-template.json.jinja
|
||||
- so/so-common-template.json
|
||||
- so/so-common-template.json.jinja
|
||||
- so/so-endgame-template.json.jinja
|
||||
- so/so-firewall-template.json.jinja
|
||||
- so/so-flow-template.json.jinja
|
||||
- so/so-ids-template.json.jinja
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
elasticsearch:
|
||||
templates:
|
||||
- so/so-beats-template.json.jinja
|
||||
- so/so-common-template.json
|
||||
- so/so-common-template.json.jinja
|
||||
- so/so-endgame-template.json.jinja
|
||||
- so/so-firewall-template.json.jinja
|
||||
- so/so-flow-template.json.jinja
|
||||
- so/so-ids-template.json.jinja
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
logstash:
|
||||
docker_options:
|
||||
port_bindings:
|
||||
- 0.0.0.0:3765:3765
|
||||
- 0.0.0.0:5044:5044
|
||||
- 0.0.0.0:5644:5644
|
||||
- 0.0.0.0:6050:6050
|
||||
|
||||
@@ -5,5 +5,6 @@ logstash:
|
||||
config:
|
||||
- so/0009_input_beats.conf
|
||||
- so/0010_input_hhbeats.conf
|
||||
- so/0011_input_endgame.conf
|
||||
- so/9999_output_redis.conf.jinja
|
||||
|
||||
@@ -13,3 +13,5 @@ logstash:
|
||||
- so/9500_output_beats.conf.jinja
|
||||
- so/9600_output_ossec.conf.jinja
|
||||
- so/9700_output_strelka.conf.jinja
|
||||
- so/9800_output_logscan.conf.jinja
|
||||
- so/9900_output_endgame.conf.jinja
|
||||
|
||||
@@ -24,6 +24,9 @@ base:
|
||||
- data.*
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||
- kibana.secrets
|
||||
{% endif %}
|
||||
- secrets
|
||||
- global
|
||||
@@ -43,6 +46,9 @@ base:
|
||||
- elasticsearch.eval
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||
- kibana.secrets
|
||||
{% endif %}
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
@@ -54,6 +60,9 @@ base:
|
||||
- elasticsearch.search
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||
- kibana.secrets
|
||||
{% endif %}
|
||||
- data.*
|
||||
- zeeklogs
|
||||
@@ -101,6 +110,9 @@ base:
|
||||
- elasticsearch.eval
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||
- elasticsearch.auth
|
||||
{% endif %}
|
||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||
- kibana.secrets
|
||||
{% endif %}
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
@@ -35,6 +35,7 @@
|
||||
'influxdb',
|
||||
'grafana',
|
||||
'soc',
|
||||
'kratos',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
@@ -45,7 +46,8 @@
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'learn'
|
||||
],
|
||||
'so-heavynode': [
|
||||
'ca',
|
||||
@@ -99,6 +101,7 @@
|
||||
'manager',
|
||||
'nginx',
|
||||
'soc',
|
||||
'kratos',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
@@ -108,7 +111,8 @@
|
||||
'zeek',
|
||||
'schedule',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'learn'
|
||||
],
|
||||
'so-manager': [
|
||||
'salt.master',
|
||||
@@ -121,13 +125,15 @@
|
||||
'influxdb',
|
||||
'grafana',
|
||||
'soc',
|
||||
'kratos',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'learn'
|
||||
],
|
||||
'so-managersearch': [
|
||||
'salt.master',
|
||||
@@ -139,6 +145,7 @@
|
||||
'influxdb',
|
||||
'grafana',
|
||||
'soc',
|
||||
'kratos',
|
||||
'firewall',
|
||||
'manager',
|
||||
'idstools',
|
||||
@@ -146,7 +153,8 @@
|
||||
'utility',
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'learn'
|
||||
],
|
||||
'so-node': [
|
||||
'ca',
|
||||
@@ -168,6 +176,7 @@
|
||||
'influxdb',
|
||||
'grafana',
|
||||
'soc',
|
||||
'kratos',
|
||||
'firewall',
|
||||
'idstools',
|
||||
'suricata.manager',
|
||||
@@ -178,7 +187,8 @@
|
||||
'schedule',
|
||||
'soctopus',
|
||||
'tcpreplay',
|
||||
'docker_clean'
|
||||
'docker_clean',
|
||||
'learn'
|
||||
],
|
||||
'so-sensor': [
|
||||
'ca',
|
||||
@@ -233,11 +243,16 @@
|
||||
{% do allowed_states.append('elasticsearch') %}
|
||||
{% endif %}
|
||||
|
||||
{% if KIBANA and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('kibana') %}
|
||||
{% if ELASTICSEARCH and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('elasticsearch.auth') %}
|
||||
{% endif %}
|
||||
|
||||
{% if CURATOR and grains.role in ['so-eval', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode'] %}
|
||||
{% if KIBANA and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch', 'so-import'] %}
|
||||
{% do allowed_states.append('kibana') %}
|
||||
{% do allowed_states.append('kibana.secrets') %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||
{% do allowed_states.append('curator') %}
|
||||
{% endif %}
|
||||
|
||||
|
||||
@@ -24,8 +24,9 @@ pki_private_key:
|
||||
- x509: /etc/pki/ca.crt
|
||||
{%- endif %}
|
||||
|
||||
/etc/pki/ca.crt:
|
||||
pki_public_ca_crt:
|
||||
x509.certificate_managed:
|
||||
- name: /etc/pki/ca.crt
|
||||
- signing_private_key: /etc/pki/ca.key
|
||||
- CN: {{ manager }}
|
||||
- C: US
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
/opt/so/log/salt/so-salt-minion-check
|
||||
/opt/so/log/salt/minion
|
||||
/opt/so/log/salt/master
|
||||
/opt/so/log/logscan/*.log
|
||||
{
|
||||
{{ logrotate_conf | indent(width=4) }}
|
||||
}
|
||||
|
||||
@@ -9,6 +9,11 @@ rmvariablesfile:
|
||||
file.absent:
|
||||
- name: /tmp/variables.txt
|
||||
|
||||
dockergroup:
|
||||
group.present:
|
||||
- name: docker
|
||||
- gid: 920
|
||||
|
||||
# Add socore Group
|
||||
socoregroup:
|
||||
group.present:
|
||||
@@ -101,16 +106,24 @@ commonpkgs:
|
||||
- python3-m2crypto
|
||||
- python3-mysqldb
|
||||
- python3-packaging
|
||||
- python3-lxml
|
||||
- git
|
||||
- vim
|
||||
|
||||
heldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
{% if grains['oscodename'] == 'bionic' %}
|
||||
- containerd.io: 1.4.4-1
|
||||
- docker-ce: 5:20.10.5~3-0~ubuntu-bionic
|
||||
- docker-ce-cli: 5:20.10.5~3-0~ubuntu-bionic
|
||||
- docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-bionic
|
||||
{% elif grains['oscodename'] == 'focal' %}
|
||||
- containerd.io: 1.4.9-1
|
||||
- docker-ce: 5:20.10.8~3-0~ubuntu-focal
|
||||
- docker-ce-cli: 5:20.10.5~3-0~ubuntu-focal
|
||||
- docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-focal
|
||||
{% endif %}
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
|
||||
@@ -136,6 +149,7 @@ commonpkgs:
|
||||
- python36-m2crypto
|
||||
- python36-mysql
|
||||
- python36-packaging
|
||||
- python36-lxml
|
||||
- yum-utils
|
||||
- device-mapper-persistent-data
|
||||
- lvm2
|
||||
@@ -326,6 +340,16 @@ dockerreserveports:
|
||||
- name: /etc/sysctl.d/99-reserved-ports.conf
|
||||
|
||||
{% if salt['grains.get']('sosmodel', '') %}
|
||||
{% if grains['os'] == 'CentOS' %}
|
||||
# Install Raid tools
|
||||
raidpkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: True
|
||||
- pkgs:
|
||||
- securityonion-raidtools
|
||||
- securityonion-megactl
|
||||
{% endif %}
|
||||
|
||||
# Install raid check cron
|
||||
/usr/sbin/so-raid-status > /dev/null 2>&1:
|
||||
cron.present:
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
@@ -15,152 +15,193 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
import ipaddress
|
||||
import textwrap
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import argparse
|
||||
import re
|
||||
from lxml import etree as ET
|
||||
from datetime import datetime as dt
|
||||
from datetime import timezone as tz
|
||||
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
SKIP=0
|
||||
|
||||
function usage {
|
||||
|
||||
cat << EOF
|
||||
|
||||
Usage: $0 [-abefhoprsw] [ -i IP ]
|
||||
|
||||
This program allows you to add a firewall rule to allow connections from a new IP address or CIDR range.
|
||||
|
||||
If you run this program with no arguments, it will present a menu for you to choose your options.
|
||||
|
||||
If you want to automate and skip the menu, you can pass the desired options as command line arguments.
|
||||
|
||||
EXAMPLES
|
||||
|
||||
To add 10.1.2.3 to the analyst role:
|
||||
so-allow -a -i 10.1.2.3
|
||||
|
||||
To add 10.1.2.0/24 to the osquery role:
|
||||
so-allow -o -i 10.1.2.0/24
|
||||
|
||||
EOF
|
||||
|
||||
LOCAL_SALT_DIR='/opt/so/saltstack/local'
|
||||
WAZUH_CONF='/nsm/wazuh/etc/ossec.conf'
|
||||
VALID_ROLES = {
|
||||
'a': { 'role': 'analyst','desc': 'Analyst - 80/tcp, 443/tcp' },
|
||||
'b': { 'role': 'beats_endpoint', 'desc': 'Logstash Beat - 5044/tcp' },
|
||||
'e': { 'role': 'elasticsearch_rest', 'desc': 'Elasticsearch REST API - 9200/tcp' },
|
||||
'f': { 'role': 'strelka_frontend', 'desc': 'Strelka frontend - 57314/tcp' },
|
||||
'o': { 'role': 'osquery_endpoint', 'desc': 'Osquery endpoint - 8090/tcp' },
|
||||
's': { 'role': 'syslog', 'desc': 'Syslog device - 514/tcp/udp' },
|
||||
'w': { 'role': 'wazuh_agent', 'desc': 'Wazuh agent - 1514/tcp/udp' },
|
||||
'p': { 'role': 'wazuh_api', 'desc': 'Wazuh API - 55000/tcp' },
|
||||
'r': { 'role': 'wazuh_authd', 'desc': 'Wazuh registration service - 1515/tcp' }
|
||||
}
|
||||
|
||||
while getopts "ahfesprbowi:" OPTION
|
||||
do
|
||||
case $OPTION in
|
||||
h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
a)
|
||||
FULLROLE="analyst"
|
||||
SKIP=1
|
||||
;;
|
||||
b)
|
||||
FULLROLE="beats_endpoint"
|
||||
SKIP=1
|
||||
;;
|
||||
e)
|
||||
FULLROLE="elasticsearch_rest"
|
||||
SKIP=1
|
||||
;;
|
||||
f)
|
||||
FULLROLE="strelka_frontend"
|
||||
SKIP=1
|
||||
;;
|
||||
i) IP=$OPTARG
|
||||
;;
|
||||
o)
|
||||
FULLROLE="osquery_endpoint"
|
||||
SKIP=1
|
||||
;;
|
||||
w)
|
||||
FULLROLE="wazuh_agent"
|
||||
SKIP=1
|
||||
;;
|
||||
s)
|
||||
FULLROLE="syslog"
|
||||
SKIP=1
|
||||
;;
|
||||
p)
|
||||
FULLROLE="wazuh_api"
|
||||
SKIP=1
|
||||
;;
|
||||
r)
|
||||
FULLROLE="wazuh_authd"
|
||||
SKIP=1
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
if [ "$SKIP" -eq 0 ]; then
|
||||
def validate_ip_cidr(ip_cidr: str) -> bool:
|
||||
try:
|
||||
ipaddress.ip_address(ip_cidr)
|
||||
except ValueError:
|
||||
try:
|
||||
ipaddress.ip_network(ip_cidr)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
echo "This program allows you to add a firewall rule to allow connections from a new IP address."
|
||||
echo ""
|
||||
echo "Choose the role for the IP or Range you would like to add"
|
||||
echo ""
|
||||
echo "[a] - Analyst - ports 80/tcp and 443/tcp"
|
||||
echo "[b] - Logstash Beat - port 5044/tcp"
|
||||
echo "[e] - Elasticsearch REST API - port 9200/tcp"
|
||||
echo "[f] - Strelka frontend - port 57314/tcp"
|
||||
echo "[o] - Osquery endpoint - port 8090/tcp"
|
||||
echo "[s] - Syslog device - 514/tcp/udp"
|
||||
echo "[w] - Wazuh agent - port 1514/tcp/udp"
|
||||
echo "[p] - Wazuh API - port 55000/tcp"
|
||||
echo "[r] - Wazuh registration service - 1515/tcp"
|
||||
echo ""
|
||||
echo "Please enter your selection:"
|
||||
read -r ROLE
|
||||
echo "Enter a single ip address or range to allow (example: 10.10.10.10 or 10.10.0.0/16):"
|
||||
read -r IP
|
||||
|
||||
if [ "$ROLE" == "a" ]; then
|
||||
FULLROLE=analyst
|
||||
elif [ "$ROLE" == "b" ]; then
|
||||
FULLROLE=beats_endpoint
|
||||
elif [ "$ROLE" == "e" ]; then
|
||||
FULLROLE=elasticsearch_rest
|
||||
elif [ "$ROLE" == "f" ]; then
|
||||
FULLROLE=strelka_frontend
|
||||
elif [ "$ROLE" == "o" ]; then
|
||||
FULLROLE=osquery_endpoint
|
||||
elif [ "$ROLE" == "w" ]; then
|
||||
FULLROLE=wazuh_agent
|
||||
elif [ "$ROLE" == "s" ]; then
|
||||
FULLROLE=syslog
|
||||
elif [ "$ROLE" == "p" ]; then
|
||||
FULLROLE=wazuh_api
|
||||
elif [ "$ROLE" == "r" ]; then
|
||||
FULLROLE=wazuh_authd
|
||||
else
|
||||
echo "I don't recognize that role"
|
||||
exit 1
|
||||
fi
|
||||
def role_prompt() -> str:
|
||||
print()
|
||||
print('Choose the role for the IP or Range you would like to allow')
|
||||
print()
|
||||
for role in VALID_ROLES:
|
||||
print(f'[{role}] - {VALID_ROLES[role]["desc"]}')
|
||||
print()
|
||||
role = input('Please enter your selection: ')
|
||||
if role in VALID_ROLES.keys():
|
||||
return VALID_ROLES[role]['role']
|
||||
else:
|
||||
print(f'Invalid role \'{role}\', please try again.', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
fi
|
||||
|
||||
echo "Adding $IP to the $FULLROLE role. This can take a few seconds"
|
||||
/usr/sbin/so-firewall includehost $FULLROLE $IP
|
||||
salt-call state.apply firewall queue=True
|
||||
def ip_prompt() -> str:
|
||||
ip = input('Enter a single ip address or range to allow (ex: 10.10.10.10 or 10.10.0.0/16): ')
|
||||
if validate_ip_cidr(ip):
|
||||
return ip
|
||||
else:
|
||||
print(f'Invalid IP address or CIDR block \'{ip}\', please try again.', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def wazuh_enabled() -> bool:
|
||||
file = f'{LOCAL_SALT_DIR}/pillar/global.sls'
|
||||
with open(file, 'r') as pillar:
|
||||
if 'wazuh: 1' in pillar.read():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def root_to_str(root: ET.ElementTree) -> str:
|
||||
return ET.tostring(root, encoding='unicode', method='xml', xml_declaration=False, pretty_print=True)
|
||||
|
||||
|
||||
def add_wl(ip):
|
||||
parser = ET.XMLParser(remove_blank_text=True)
|
||||
with open(WAZUH_CONF, 'rb') as wazuh_conf:
|
||||
tree = ET.parse(wazuh_conf, parser)
|
||||
root = tree.getroot()
|
||||
|
||||
source_comment = ET.Comment(f'Address {ip} added by /usr/sbin/so-allow on {dt.utcnow().replace(tzinfo=tz.utc).strftime("%a %b %e %H:%M:%S %Z %Y")}')
|
||||
new_global = ET.Element("global")
|
||||
new_wl = ET.SubElement(new_global, 'white_list')
|
||||
new_wl.text = ip
|
||||
|
||||
root.append(source_comment)
|
||||
root.append(new_global)
|
||||
|
||||
with open(WAZUH_CONF, 'w') as add_out:
|
||||
add_out.write(root_to_str(root))
|
||||
|
||||
|
||||
def apply(role: str, ip: str) -> int:
|
||||
firewall_cmd = ['so-firewall', 'includehost', role, ip]
|
||||
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'firewall', 'queue=True']
|
||||
restart_wazuh_cmd = ['so-wazuh-restart']
|
||||
print(f'Adding {ip} to the {role} role. This can take a few seconds...')
|
||||
cmd = subprocess.run(firewall_cmd)
|
||||
if cmd.returncode == 0:
|
||||
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
|
||||
else:
|
||||
return cmd.returncode
|
||||
if cmd.returncode == 0:
|
||||
if wazuh_enabled() and role=='analyst':
|
||||
try:
|
||||
add_wl(ip)
|
||||
print(f'Added whitelist entry for {ip} from {WAZUH_CONF}', file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(f'Failed to add whitelist entry for {ip} from {WAZUH_CONF}', file=sys.stderr)
|
||||
print(e)
|
||||
return 1
|
||||
print('Restarting OSSEC Server...')
|
||||
cmd = subprocess.run(restart_wazuh_cmd)
|
||||
else:
|
||||
return cmd.returncode
|
||||
else:
|
||||
print(f'Commmand \'{" ".join(salt_cmd)}\' failed.', file=sys.stderr)
|
||||
return cmd.returncode
|
||||
if cmd.returncode != 0:
|
||||
print('Failed to restart OSSEC server.')
|
||||
return cmd.returncode
|
||||
|
||||
|
||||
def main():
|
||||
if os.geteuid() != 0:
|
||||
print('You must run this script as root', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
main_parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=textwrap.dedent(f'''\
|
||||
additional information:
|
||||
To use this script in interactive mode call it with no arguments
|
||||
'''
|
||||
))
|
||||
|
||||
group = main_parser.add_argument_group(title='roles')
|
||||
group.add_argument('-a', dest='roles', action='append_const', const=VALID_ROLES['a']['role'], help="Analyst - 80/tcp, 443/tcp")
|
||||
group.add_argument('-b', dest='roles', action='append_const', const=VALID_ROLES['b']['role'], help="Logstash Beat - 5044/tcp")
|
||||
group.add_argument('-e', dest='roles', action='append_const', const=VALID_ROLES['e']['role'], help="Elasticsearch REST API - 9200/tcp")
|
||||
group.add_argument('-f', dest='roles', action='append_const', const=VALID_ROLES['f']['role'], help="Strelka frontend - 57314/tcp")
|
||||
group.add_argument('-o', dest='roles', action='append_const', const=VALID_ROLES['o']['role'], help="Osquery endpoint - 8090/tcp")
|
||||
group.add_argument('-s', dest='roles', action='append_const', const=VALID_ROLES['s']['role'], help="Syslog device - 514/tcp/udp")
|
||||
group.add_argument('-w', dest='roles', action='append_const', const=VALID_ROLES['w']['role'], help="Wazuh agent - 1514/tcp/udp")
|
||||
group.add_argument('-p', dest='roles', action='append_const', const=VALID_ROLES['p']['role'], help="Wazuh API - 55000/tcp")
|
||||
group.add_argument('-r', dest='roles', action='append_const', const=VALID_ROLES['r']['role'], help="Wazuh registration service - 1515/tcp")
|
||||
|
||||
ip_g = main_parser.add_argument_group(title='allow')
|
||||
ip_g.add_argument('-i', help="IP or CIDR block to disallow connections from, requires at least one role argument", metavar='', dest='ip')
|
||||
|
||||
args = main_parser.parse_args(sys.argv[1:])
|
||||
|
||||
if args.roles is None:
|
||||
role = role_prompt()
|
||||
ip = ip_prompt()
|
||||
try:
|
||||
return_code = apply(role, ip)
|
||||
except Exception as e:
|
||||
print(f'Unexpected exception occurred: {e}', file=sys.stderr)
|
||||
return_code = e.errno
|
||||
sys.exit(return_code)
|
||||
elif args.roles is not None and args.ip is None:
|
||||
if os.environ.get('IP') is None:
|
||||
main_parser.print_help()
|
||||
sys.exit(1)
|
||||
else:
|
||||
args.ip = os.environ['IP']
|
||||
|
||||
if validate_ip_cidr(args.ip):
|
||||
try:
|
||||
for role in args.roles:
|
||||
return_code = apply(role, args.ip)
|
||||
if return_code > 0:
|
||||
break
|
||||
except Exception as e:
|
||||
print(f'Unexpected exception occurred: {e}', file=sys.stderr)
|
||||
return_code = e.errno
|
||||
else:
|
||||
print(f'Invalid IP address or CIDR block \'{args.ip}\', please try again.', file=sys.stderr)
|
||||
return_code = 1
|
||||
|
||||
sys.exit(return_code)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
||||
|
||||
# Check if Wazuh enabled
|
||||
if grep -q -R "wazuh: 1" $local_salt_dir/pillar/*; then
|
||||
# If analyst, add to Wazuh AR whitelist
|
||||
if [ "$FULLROLE" == "analyst" ]; then
|
||||
WAZUH_MGR_CFG="/nsm/wazuh/etc/ossec.conf"
|
||||
if ! grep -q "<white_list>$IP</white_list>" $WAZUH_MGR_CFG ; then
|
||||
DATE=$(date)
|
||||
sed -i 's/<\/ossec_config>//' $WAZUH_MGR_CFG
|
||||
sed -i '/^$/N;/^\n$/D' $WAZUH_MGR_CFG
|
||||
echo -e "<!--Address $IP added by /usr/sbin/so-allow on \"$DATE\"-->\n <global>\n <white_list>$IP</white_list>\n </global>\n</ossec_config>" >> $WAZUH_MGR_CFG
|
||||
echo "Added whitelist entry for $IP in $WAZUH_MGR_CFG."
|
||||
echo
|
||||
echo "Restarting OSSEC Server..."
|
||||
/usr/sbin/so-wazuh-restart
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -17,4 +17,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
salt-call state.highstate -linfo
|
||||
salt-call state.highstate -l info
|
||||
|
||||
@@ -99,6 +99,15 @@ check_password() {
|
||||
return $?
|
||||
}
|
||||
|
||||
check_password_and_exit() {
|
||||
local password=$1
|
||||
if ! check_password "$password"; then
|
||||
echo "Password is invalid. Do not include single quotes, double quotes, dollar signs, and backslashes in the password."
|
||||
exit 2
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
check_elastic_license() {
|
||||
|
||||
[ -n "$TESTING" ] && return
|
||||
@@ -372,18 +381,29 @@ set_version() {
|
||||
fi
|
||||
}
|
||||
|
||||
has_uppercase() {
|
||||
local string=$1
|
||||
|
||||
echo "$string" | grep -qP '[A-Z]' \
|
||||
&& return 0 \
|
||||
|| return 1
|
||||
}
|
||||
|
||||
valid_cidr() {
|
||||
# Verify there is a backslash in the string
|
||||
echo "$1" | grep -qP "^[^/]+/[^/]+$" || return 1
|
||||
|
||||
local cidr
|
||||
local ip
|
||||
valid_ip4_cidr_mask "$1" && return 0 || return 1
|
||||
|
||||
cidr=$(echo "$1" | sed 's/.*\///')
|
||||
ip=$(echo "$1" | sed 's/\/.*//' )
|
||||
local cidr="$1"
|
||||
local ip
|
||||
ip=$(echo "$cidr" | sed 's/\/.*//' )
|
||||
|
||||
if valid_ip4 "$ip"; then
|
||||
[[ $cidr =~ ([0-9]|[1-2][0-9]|3[0-2]) ]] && return 0 || return 1
|
||||
local ip1 ip2 ip3 ip4 N
|
||||
IFS="./" read -r ip1 ip2 ip3 ip4 N <<< "$cidr"
|
||||
ip_total=$((ip1 * 256 ** 3 + ip2 * 256 ** 2 + ip3 * 256 + ip4))
|
||||
[[ $((ip_total % 2**(32-N))) == 0 ]] && return 0 || return 1
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
@@ -433,6 +453,23 @@ valid_ip4() {
|
||||
echo "$ip" | grep -qP '^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$' && return 0 || return 1
|
||||
}
|
||||
|
||||
valid_ip4_cidr_mask() {
|
||||
# Verify there is a backslash in the string
|
||||
echo "$1" | grep -qP "^[^/]+/[^/]+$" || return 1
|
||||
|
||||
local cidr
|
||||
local ip
|
||||
|
||||
cidr=$(echo "$1" | sed 's/.*\///')
|
||||
ip=$(echo "$1" | sed 's/\/.*//' )
|
||||
|
||||
if valid_ip4 "$ip"; then
|
||||
[[ $cidr =~ ^([0-9]|[1-2][0-9]|3[0-2])$ ]] && return 0 || return 1
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
valid_int() {
|
||||
local num=$1
|
||||
local min=${2:-1}
|
||||
|
||||
213
salt/common/tools/sbin/so-deny
Executable file
213
salt/common/tools/sbin/so-deny
Executable file
@@ -0,0 +1,213 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import ipaddress
|
||||
import textwrap
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import argparse
|
||||
import re
|
||||
from lxml import etree as ET
|
||||
from xml.dom import minidom
|
||||
|
||||
|
||||
LOCAL_SALT_DIR='/opt/so/saltstack/local'
|
||||
WAZUH_CONF='/nsm/wazuh/etc/ossec.conf'
|
||||
VALID_ROLES = {
|
||||
'a': { 'role': 'analyst','desc': 'Analyst - 80/tcp, 443/tcp' },
|
||||
'b': { 'role': 'beats_endpoint', 'desc': 'Logstash Beat - 5044/tcp' },
|
||||
'e': { 'role': 'elasticsearch_rest', 'desc': 'Elasticsearch REST API - 9200/tcp' },
|
||||
'f': { 'role': 'strelka_frontend', 'desc': 'Strelka frontend - 57314/tcp' },
|
||||
'o': { 'role': 'osquery_endpoint', 'desc': 'Osquery endpoint - 8090/tcp' },
|
||||
's': { 'role': 'syslog', 'desc': 'Syslog device - 514/tcp/udp' },
|
||||
'w': { 'role': 'wazuh_agent', 'desc': 'Wazuh agent - 1514/tcp/udp' },
|
||||
'p': { 'role': 'wazuh_api', 'desc': 'Wazuh API - 55000/tcp' },
|
||||
'r': { 'role': 'wazuh_authd', 'desc': 'Wazuh registration service - 1515/tcp' }
|
||||
}
|
||||
|
||||
|
||||
def validate_ip_cidr(ip_cidr: str) -> bool:
|
||||
try:
|
||||
ipaddress.ip_address(ip_cidr)
|
||||
except ValueError:
|
||||
try:
|
||||
ipaddress.ip_network(ip_cidr)
|
||||
except ValueError:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def role_prompt() -> str:
|
||||
print()
|
||||
print('Choose the role for the IP or Range you would like to deny')
|
||||
print()
|
||||
for role in VALID_ROLES:
|
||||
print(f'[{role}] - {VALID_ROLES[role]["desc"]}')
|
||||
print()
|
||||
role = input('Please enter your selection: ')
|
||||
if role in VALID_ROLES.keys():
|
||||
return VALID_ROLES[role]['role']
|
||||
else:
|
||||
print(f'Invalid role \'{role}\', please try again.', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def ip_prompt() -> str:
|
||||
ip = input('Enter a single ip address or range to deny (ex: 10.10.10.10 or 10.10.0.0/16): ')
|
||||
if validate_ip_cidr(ip):
|
||||
return ip
|
||||
else:
|
||||
print(f'Invalid IP address or CIDR block \'{ip}\', please try again.', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def wazuh_enabled() -> bool:
|
||||
for file in os.listdir(f'{LOCAL_SALT_DIR}/pillar'):
|
||||
with open(file, 'r') as pillar:
|
||||
if 'wazuh: 1' in pillar.read():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def root_to_str(root: ET.ElementTree) -> str:
|
||||
xml_str = ET.tostring(root, encoding='unicode', method='xml').replace('\n', '')
|
||||
xml_str = re.sub(r'(?:(?<=>) *)', '', xml_str)
|
||||
|
||||
# Remove specific substrings to better format comments on intial parse/write
|
||||
xml_str = re.sub(r' -', '', xml_str)
|
||||
xml_str = re.sub(r' -->', ' -->', xml_str)
|
||||
|
||||
dom = minidom.parseString(xml_str)
|
||||
return dom.toprettyxml(indent=" ")
|
||||
|
||||
|
||||
def rem_wl(ip):
|
||||
parser = ET.XMLParser(remove_blank_text=True)
|
||||
with open(WAZUH_CONF, 'rb') as wazuh_conf:
|
||||
tree = ET.parse(wazuh_conf, parser)
|
||||
root = tree.getroot()
|
||||
|
||||
global_elems = root.findall(f"global/white_list[. = '{ip}']/..")
|
||||
if len(global_elems) > 0:
|
||||
for g_elem in global_elems:
|
||||
ge_index = list(root).index(g_elem)
|
||||
if ge_index > 0 and root[list(root).index(g_elem) - 1].tag == ET.Comment:
|
||||
root.remove(root[ge_index - 1])
|
||||
root.remove(g_elem)
|
||||
|
||||
with open(WAZUH_CONF, 'w') as out:
|
||||
out.write(root_to_str(root))
|
||||
|
||||
|
||||
def apply(role: str, ip: str) -> int:
|
||||
firewall_cmd = ['so-firewall', 'excludehost', role, ip]
|
||||
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'firewall', 'queue=True']
|
||||
restart_wazuh_cmd = ['so-wazuh-restart']
|
||||
print(f'Removing {ip} from the {role} role. This can take a few seconds...')
|
||||
cmd = subprocess.run(firewall_cmd)
|
||||
if cmd.returncode == 0:
|
||||
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
|
||||
else:
|
||||
return cmd.returncode
|
||||
if cmd.returncode == 0:
|
||||
if wazuh_enabled and role=='analyst':
|
||||
try:
|
||||
rem_wl(ip)
|
||||
print(f'Removed whitelist entry for {ip} from {WAZUH_CONF}', file=sys.stderr)
|
||||
except Exception as e:
|
||||
print(f'Failed to remove whitelist entry for {ip} from {WAZUH_CONF}', file=sys.stderr)
|
||||
print(e)
|
||||
return 1
|
||||
print('Restarting OSSEC Server...')
|
||||
cmd = subprocess.run(restart_wazuh_cmd)
|
||||
else:
|
||||
return cmd.returncode
|
||||
else:
|
||||
print(f'Commmand \'{" ".join(salt_cmd)}\' failed.', file=sys.stderr)
|
||||
return cmd.returncode
|
||||
if cmd.returncode != 0:
|
||||
print('Failed to restart OSSEC server.')
|
||||
return cmd.returncode
|
||||
|
||||
|
||||
def main():
|
||||
if os.geteuid() != 0:
|
||||
print('You must run this script as root', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
main_parser = argparse.ArgumentParser(
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog=textwrap.dedent(f'''\
|
||||
additional information:
|
||||
To use this script in interactive mode call it with no arguments
|
||||
'''
|
||||
))
|
||||
|
||||
group = main_parser.add_argument_group(title='roles')
|
||||
group.add_argument('-a', dest='roles', action='append_const', const=VALID_ROLES['a']['role'], help="Analyst - 80/tcp, 443/tcp")
|
||||
group.add_argument('-b', dest='roles', action='append_const', const=VALID_ROLES['b']['role'], help="Logstash Beat - 5044/tcp")
|
||||
group.add_argument('-e', dest='roles', action='append_const', const=VALID_ROLES['e']['role'], help="Elasticsearch REST API - 9200/tcp")
|
||||
group.add_argument('-f', dest='roles', action='append_const', const=VALID_ROLES['f']['role'], help="Strelka frontend - 57314/tcp")
|
||||
group.add_argument('-o', dest='roles', action='append_const', const=VALID_ROLES['o']['role'], help="Osquery endpoint - 8090/tcp")
|
||||
group.add_argument('-s', dest='roles', action='append_const', const=VALID_ROLES['s']['role'], help="Syslog device - 514/tcp/udp")
|
||||
group.add_argument('-w', dest='roles', action='append_const', const=VALID_ROLES['w']['role'], help="Wazuh agent - 1514/tcp/udp")
|
||||
group.add_argument('-p', dest='roles', action='append_const', const=VALID_ROLES['p']['role'], help="Wazuh API - 55000/tcp")
|
||||
group.add_argument('-r', dest='roles', action='append_const', const=VALID_ROLES['r']['role'], help="Wazuh registration service - 1515/tcp")
|
||||
|
||||
ip_g = main_parser.add_argument_group(title='allow')
|
||||
ip_g.add_argument('-i', help="IP or CIDR block to disallow connections from, requires at least one role argument", metavar='', dest='ip')
|
||||
|
||||
args = main_parser.parse_args(sys.argv[1:])
|
||||
|
||||
if args.roles is None:
|
||||
role = role_prompt()
|
||||
ip = ip_prompt()
|
||||
try:
|
||||
return_code = apply(role, ip)
|
||||
except Exception as e:
|
||||
print(f'Unexpected exception occurred: {e}', file=sys.stderr)
|
||||
return_code = e.errno
|
||||
sys.exit(return_code)
|
||||
elif args.roles is not None and args.ip is None:
|
||||
if os.environ.get('IP') is None:
|
||||
main_parser.print_help()
|
||||
sys.exit(1)
|
||||
else:
|
||||
args.ip = os.environ['IP']
|
||||
|
||||
if validate_ip_cidr(args.ip):
|
||||
try:
|
||||
for role in args.roles:
|
||||
return_code = apply(role, args.ip)
|
||||
if return_code > 0:
|
||||
break
|
||||
except Exception as e:
|
||||
print(f'Unexpected exception occurred: {e}', file=sys.stderr)
|
||||
return_code = e.errno
|
||||
else:
|
||||
print(f'Invalid IP address or CIDR block \'{args.ip}\', please try again.', file=sys.stderr)
|
||||
return_code = 1
|
||||
|
||||
sys.exit(return_code)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
main()
|
||||
except KeyboardInterrupt:
|
||||
sys.exit(1)
|
||||
@@ -70,7 +70,7 @@ do
|
||||
done
|
||||
|
||||
docker_exec(){
|
||||
CMD="docker exec -it so-elastalert elastalert-test-rule /opt/elastalert/rules/$RULE_NAME --config /opt/config/elastalert_config.yaml $OPTIONS"
|
||||
CMD="docker exec -it so-elastalert elastalert-test-rule /opt/elastalert/rules/$RULE_NAME --config /opt/elastalert/config.yaml $OPTIONS"
|
||||
if [ "${RESULTS_TO_LOG,,}" = "y" ] ; then
|
||||
$CMD > "$FILE_SAVE_LOCATION"
|
||||
else
|
||||
|
||||
155
salt/common/tools/sbin/so-elastic-auth-password-reset
Normal file
155
salt/common/tools/sbin/so-elastic-auth-password-reset
Normal file
@@ -0,0 +1,155 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
source $(dirname $0)/so-common
|
||||
require_manager
|
||||
|
||||
user=$1
|
||||
elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
|
||||
elasticAuthPillarFile=${ELASTIC_AUTH_PILLAR_FILE:-/opt/so/saltstack/local/pillar/elasticsearch/auth.sls}
|
||||
|
||||
if [[ $# -ne 1 ]]; then
|
||||
echo "Usage: $0 <user>"
|
||||
echo ""
|
||||
echo " where <user> is one of the following:"
|
||||
echo ""
|
||||
echo " all: Reset the password for the so_elastic, so_kibana, so_logstash, so_beats, and so_monitor users"
|
||||
echo " so_elastic: Reset the password for the so_elastic user"
|
||||
echo " so_kibana: Reset the password for the so_kibana user"
|
||||
echo " so_logstash: Reset the password for the so_logstash user"
|
||||
echo " so_beats: Reset the password for the so_beats user"
|
||||
echo " so_monitor: Reset the password for the so_monitor user"
|
||||
echo ""
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# function to create a lock so that the so-user sync cronjob can't run while this is running
|
||||
function lock() {
|
||||
# Obtain file descriptor lock
|
||||
exec 99>/var/tmp/so-user.lock || fail "Unable to create lock descriptor; if the system was not shutdown gracefully you may need to remove /var/tmp/so-user.lock manually."
|
||||
flock -w 10 99 || fail "Another process is using so-user; if the system was not shutdown gracefully you may need to remove /var/tmp/so-user.lock manually."
|
||||
trap 'rm -f /var/tmp/so-user.lock' EXIT
|
||||
}
|
||||
|
||||
function unlock() {
|
||||
rm -f /var/tmp/so-user.lock
|
||||
}
|
||||
|
||||
function fail() {
|
||||
msg=$1
|
||||
echo "$1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
function removeSingleUserPass() {
|
||||
local user=$1
|
||||
sed -i '/user: '"${user}"'/{N;/pass: /d}' "${elasticAuthPillarFile}"
|
||||
}
|
||||
|
||||
function removeAllUserPass() {
|
||||
local userList=("so_elastic" "so_kibana" "so_logstash" "so_beats" "so_monitor")
|
||||
|
||||
for u in ${userList[@]}; do
|
||||
removeSingleUserPass "$u"
|
||||
done
|
||||
}
|
||||
|
||||
function removeElasticUsersFile() {
|
||||
rm -f "$elasticUsersFile"
|
||||
}
|
||||
|
||||
function createElasticAuthPillar() {
|
||||
salt-call state.apply elasticsearch.auth queue=True
|
||||
}
|
||||
|
||||
# this will disable highstate to prevent a highstate from starting while the script is running
|
||||
# will also disable salt.minion-state-apply-test allow so-salt-minion-check cronjob to restart salt-minion service incase
|
||||
function disableSaltStates() {
|
||||
printf "\nDisabling salt.minion-state-apply-test and highstate from running.\n\n"
|
||||
salt-call state.disable salt.minion-state-apply-test
|
||||
salt-call state.disable highstate
|
||||
}
|
||||
|
||||
function enableSaltStates() {
|
||||
printf "\nEnabling salt.minion-state-apply-test and highstate.\n\n"
|
||||
salt-call state.enable salt.minion-state-apply-test
|
||||
salt-call state.enable highstate
|
||||
}
|
||||
|
||||
function killAllSaltJobs() {
|
||||
printf "\nKilling all running salt jobs.\n\n"
|
||||
salt-call saltutil.kill_all_jobs
|
||||
}
|
||||
|
||||
function soUserSync() {
|
||||
# apply this state to update /opt/so/saltstack/local/salt/elasticsearch/curl.config on the manager
|
||||
salt-call state.sls_id elastic_curl_config_distributed manager queue=True
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-node or G@role:so-heavynode' saltutil.kill_all_jobs
|
||||
# apply this state to get the curl.config
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-node or G@role:so-heavynode' state.sls_id elastic_curl_config common queue=True
|
||||
$(dirname $0)/so-user sync
|
||||
printf "\nApplying logstash state to the appropriate nodes.\n\n"
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-node or G@role:so-heavynode' state.apply logstash queue=True
|
||||
printf "\nApplying filebeat state to the appropriate nodes.\n\n"
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-node or G@role:so-heavynode or G@role:so-sensor or G@role:so-fleet' state.apply filebeat queue=True
|
||||
printf "\nApplying kibana state to the appropriate nodes.\n\n"
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch' state.apply kibana queue=True
|
||||
printf "\nApplying curator state to the appropriate nodes.\n\n"
|
||||
salt -C 'G@role:so-standalone or G@role:so-eval or G@role:so-import or G@role:so-manager or G@role:so-managersearch or G@role:so-node or G@role:so-heavynode' state.apply curator queue=True
|
||||
}
|
||||
|
||||
function highstateManager() {
|
||||
killAllSaltJobs
|
||||
printf "\nRunning highstate on the manager to finalize password reset.\n\n"
|
||||
salt-call state.highstate -linfo queue=True
|
||||
}
|
||||
|
||||
case "${user}" in
|
||||
|
||||
so_elastic | so_kibana | so_logstash | so_beats | so_monitor)
|
||||
lock
|
||||
killAllSaltJobs
|
||||
disableSaltStates
|
||||
removeSingleUserPass "$user"
|
||||
createElasticAuthPillar
|
||||
removeElasticUsersFile
|
||||
unlock
|
||||
soUserSync
|
||||
enableSaltStates
|
||||
highstateManager
|
||||
;;
|
||||
|
||||
all)
|
||||
lock
|
||||
killAllSaltJobs
|
||||
disableSaltStates
|
||||
removeAllUserPass
|
||||
createElasticAuthPillar
|
||||
removeElasticUsersFile
|
||||
unlock
|
||||
soUserSync
|
||||
enableSaltStates
|
||||
highstateManager
|
||||
;;
|
||||
|
||||
*)
|
||||
fail "Unsupported user: $user"
|
||||
;;
|
||||
|
||||
esac
|
||||
|
||||
exit 0
|
||||
57
salt/common/tools/sbin/so-elasticsearch-roles-load
Executable file
57
salt/common/tools/sbin/so-elasticsearch-roles-load
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{%- set mainint = salt['pillar.get']('host:mainint') %}
|
||||
{%- set MYIP = salt['grains.get']('ip_interfaces:' ~ mainint)[0] %}
|
||||
|
||||
default_conf_dir=/opt/so/conf
|
||||
ELASTICSEARCH_HOST="{{ MYIP }}"
|
||||
ELASTICSEARCH_PORT=9200
|
||||
|
||||
# Define a default directory to load roles from
|
||||
ELASTICSEARCH_ROLES="$default_conf_dir/elasticsearch/roles/"
|
||||
|
||||
# Wait for ElasticSearch to initialize
|
||||
echo -n "Waiting for ElasticSearch..."
|
||||
COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
{{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
sleep 1
|
||||
echo -n "."
|
||||
fi
|
||||
done
|
||||
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
|
||||
echo
|
||||
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
||||
echo
|
||||
fi
|
||||
|
||||
cd ${ELASTICSEARCH_ROLES}
|
||||
|
||||
echo "Loading templates..."
|
||||
for role in *; do
|
||||
name=$(echo "$role" | cut -d. -f1)
|
||||
so-elasticsearch-query _security/role/$name -XPUT -d @"$role"
|
||||
done
|
||||
|
||||
cd - >/dev/null
|
||||
@@ -54,7 +54,7 @@ PIPELINES=$({{ ELASTICCURL }} -sk https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_
|
||||
if [[ "$PIPELINES" -lt 5 ]]; then
|
||||
echo "Setting up ingest pipeline(s)"
|
||||
|
||||
for MODULE in activemq apache auditd aws azure barracuda bluecoat cef checkpoint cisco coredns crowdstrike cyberark cylance elasticsearch envoyproxy f5 fortinet gcp google_workspace googlecloud gsuite haproxy ibmmq icinga iis imperva infoblox iptables juniper kafka kibana logstash microsoft misp mongodb mssql mysql nats netscout nginx o365 okta osquery panw postgresql rabbitmq radware redis santa snort snyk sonicwall sophos squid suricata system tomcat traefik zeek zscaler
|
||||
for MODULE in activemq apache auditd aws azure barracuda bluecoat cef checkpoint cisco coredns crowdstrike cyberark cylance elasticsearch envoyproxy f5 fortinet gcp google_workspace googlecloud gsuite haproxy ibmmq icinga iis imperva infoblox iptables juniper kafka kibana logstash microsoft mongodb mssql mysql nats netscout nginx o365 okta osquery panw postgresql rabbitmq radware redis santa snort snyk sonicwall sophos squid suricata system threatintel tomcat traefik zeek zscaler
|
||||
do
|
||||
echo "Loading $MODULE"
|
||||
docker exec -i so-filebeat filebeat setup modules -pipelines -modules $MODULE -c $FB_MODULE_YML
|
||||
|
||||
@@ -35,6 +35,7 @@ def showUsage(options, args):
|
||||
print('')
|
||||
print(' General commands:')
|
||||
print(' help - Prints this usage information.')
|
||||
print(' apply - Apply the firewall state.')
|
||||
print('')
|
||||
print(' Host commands:')
|
||||
print(' listhostgroups - Lists the known host groups.')
|
||||
@@ -66,11 +67,11 @@ def checkDefaultPortsOption(options):
|
||||
|
||||
def checkApplyOption(options):
|
||||
if "--apply" in options:
|
||||
return apply()
|
||||
return apply(None, None)
|
||||
|
||||
def loadYaml(filename):
|
||||
file = open(filename, "r")
|
||||
return yaml.load(file.read())
|
||||
return yaml.safe_load(file.read())
|
||||
|
||||
def writeYaml(filename, content):
|
||||
file = open(filename, "w")
|
||||
@@ -328,7 +329,7 @@ def removehost(options, args):
|
||||
code = checkApplyOption(options)
|
||||
return code
|
||||
|
||||
def apply():
|
||||
def apply(options, args):
|
||||
proc = subprocess.run(['salt-call', 'state.apply', 'firewall', 'queue=True'])
|
||||
return proc.returncode
|
||||
|
||||
@@ -356,7 +357,8 @@ def main():
|
||||
"addport": addport,
|
||||
"removeport": removeport,
|
||||
"addhostgroup": addhostgroup,
|
||||
"addportgroup": addportgroup
|
||||
"addportgroup": addportgroup,
|
||||
"apply": apply
|
||||
}
|
||||
|
||||
code=1
|
||||
|
||||
@@ -2,11 +2,16 @@
|
||||
|
||||
#so-fleet-setup $FleetEmail $FleetPassword
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [[ $# -ne 2 ]] ; then
|
||||
echo "Username or Password was not set - exiting now."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
USER_EMAIL=$1
|
||||
USER_PW=$2
|
||||
|
||||
# Checking to see if required containers are started...
|
||||
if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
|
||||
echo "Starting Docker Containers..."
|
||||
@@ -17,8 +22,16 @@ fi
|
||||
|
||||
docker exec so-fleet fleetctl config set --address https://127.0.0.1:8080 --tls-skip-verify --url-prefix /fleet
|
||||
docker exec so-fleet bash -c 'while [[ "$(curl -s -o /dev/null --insecure -w ''%{http_code}'' https://127.0.0.1:8080/fleet)" != "301" ]]; do sleep 5; done'
|
||||
docker exec so-fleet fleetctl setup --email $1 --password $2
|
||||
|
||||
# Create Security Onion Fleet Service Account + Setup Fleet
|
||||
FLEET_SA_EMAIL=$(lookup_pillar_secret fleet_sa_email)
|
||||
FLEET_SA_PW=$(lookup_pillar_secret fleet_sa_password)
|
||||
docker exec so-fleet fleetctl setup --email $FLEET_SA_EMAIL --password $FLEET_SA_PW --name SO_ServiceAccount --org-name SO
|
||||
|
||||
# Create User Account
|
||||
echo "$USER_PW" | so-fleet-user-add "$USER_EMAIL"
|
||||
|
||||
# Import Packs & Configs
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/Windows/osquery.yaml
|
||||
docker exec so-fleet fleetctl apply -f /packs/so/so-default.yml
|
||||
|
||||
@@ -18,7 +18,7 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <new-user-name>"
|
||||
echo "Usage: $0 <new-user-email>"
|
||||
echo ""
|
||||
echo "Adds a new user to Fleet. The new password will be read from STDIN."
|
||||
exit 1
|
||||
@@ -28,37 +28,42 @@ if [ $# -ne 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
MYSQL_PASS=$(lookup_pillar_secret mysql)
|
||||
FLEET_IP=$(lookup_pillar fleet_ip)
|
||||
FLEET_USER=$USER
|
||||
USER_EMAIL=$1
|
||||
FLEET_SA_EMAIL=$(lookup_pillar_secret fleet_sa_email)
|
||||
FLEET_SA_PW=$(lookup_pillar_secret fleet_sa_password)
|
||||
MYSQL_PW=$(lookup_pillar_secret mysql)
|
||||
|
||||
# Read password for new user from stdin
|
||||
test -t 0
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Enter new password:"
|
||||
fi
|
||||
read -rs FLEET_PASS
|
||||
read -rs USER_PASS
|
||||
|
||||
if ! check_password "$FLEET_PASS"; then
|
||||
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
|
||||
exit 2
|
||||
fi
|
||||
check_password_and_exit "$USER_PASS"
|
||||
|
||||
# Config fleetctl & login with the SO Service Account
|
||||
CONFIG_OUTPUT=$(docker exec so-fleet fleetctl config set --address https://127.0.0.1:8080 --tls-skip-verify --url-prefix /fleet 2>&1 )
|
||||
SALOGIN_OUTPUT=$(docker exec so-fleet fleetctl login --email $FLEET_SA_EMAIL --password $FLEET_SA_PW 2>&1)
|
||||
|
||||
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Failed to generate Fleet password hash"
|
||||
echo "Unable to add user to Fleet; Fleet Service account login failed"
|
||||
echo "$SALOGIN_OUTPUT"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
|
||||
"INSERT INTO users (password,salt,username,email,admin,enabled) VALUES ('$FLEET_HASH','','$FLEET_USER','$FLEET_USER',1,1)" 2>&1)
|
||||
# Create New User
|
||||
CREATE_OUTPUT=$(docker exec so-fleet fleetctl user create --email $USER_EMAIL --name $USER_EMAIL --password $USER_PASS --global-role admin 2>&1)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully added user to Fleet"
|
||||
else
|
||||
echo "Unable to add user to Fleet; user might already exist"
|
||||
echo "$MYSQL_OUTPUT"
|
||||
echo "$CREATE_OUTPUT"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Disable forced password reset
|
||||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PW fleet -e \
|
||||
"UPDATE users SET admin_forced_password_reset = 0 WHERE email = '$USER_EMAIL'" 2>&1)
|
||||
56
salt/common/tools/sbin/so-fleet-user-delete
Normal file
56
salt/common/tools/sbin/so-fleet-user-delete
Normal file
@@ -0,0 +1,56 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <user-email>"
|
||||
echo ""
|
||||
echo "Deletes a user in Fleet"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER_EMAIL=$1
|
||||
FLEET_SA_EMAIL=$(lookup_pillar_secret fleet_sa_email)
|
||||
FLEET_SA_PW=$(lookup_pillar_secret fleet_sa_password)
|
||||
|
||||
# Config fleetctl & login with the SO Service Account
|
||||
CONFIG_OUTPUT=$(docker exec so-fleet fleetctl config set --address https://127.0.0.1:8080 --tls-skip-verify --url-prefix /fleet 2>&1 )
|
||||
SALOGIN_OUTPUT=$(docker exec so-fleet fleetctl login --email $FLEET_SA_EMAIL --password $FLEET_SA_PW 2>&1)
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to delete user from Fleet; Fleet Service account login failed"
|
||||
echo "$SALOGIN_OUTPUT"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Delete User
|
||||
DELETE_OUTPUT=$(docker exec so-fleet fleetctl user delete --email $USER_EMAIL 2>&1)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully deleted user from Fleet"
|
||||
else
|
||||
echo "Unable to delete user from Fleet"
|
||||
echo "$DELETE_OUTPUT"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
|
||||
75
salt/common/tools/sbin/so-fleet-user-update
Executable file
75
salt/common/tools/sbin/so-fleet-user-update
Executable file
@@ -0,0 +1,75 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <user-name>"
|
||||
echo ""
|
||||
echo "Update password for an existing Fleet user. The new password will be read from STDIN."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
MYSQL_PASS=$(lookup_pillar_secret mysql)
|
||||
FLEET_IP=$(lookup_pillar fleet_ip)
|
||||
FLEET_USER=$USER
|
||||
|
||||
# test existence of user
|
||||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
|
||||
"SELECT count(1) FROM users WHERE email='$FLEET_USER'" 2>/dev/null | tail -1)
|
||||
if [[ $? -ne 0 ]] || [[ $MYSQL_OUTPUT -ne 1 ]] ; then
|
||||
echo "Test for email [${FLEET_USER}] failed"
|
||||
echo " expect 1 hit in users database, return $MYSQL_OUTPUT hit(s)."
|
||||
echo "Unable to update Fleet user password."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Read password for new user from stdin
|
||||
test -t 0
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Enter new password:"
|
||||
fi
|
||||
read -rs FLEET_PASS
|
||||
|
||||
if ! check_password "$FLEET_PASS"; then
|
||||
echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_PASS'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Failed to generate Fleet password hash"
|
||||
exit 2
|
||||
fi
|
||||
|
||||
|
||||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
|
||||
"UPDATE users SET password='$FLEET_HASH', salt='' where email='$FLEET_USER'" 2>&1)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully updated Fleet user password"
|
||||
else
|
||||
echo "Unable to update Fleet user password"
|
||||
echo "$MYSQL_OUTPUT"
|
||||
exit 2
|
||||
fi
|
||||
17
salt/common/tools/sbin/so-grafana-dashboard-folder-delete
Executable file
17
salt/common/tools/sbin/so-grafana-dashboard-folder-delete
Executable file
@@ -0,0 +1,17 @@
|
||||
# this script is used to delete the default Grafana dashboard folders that existed prior to Grafana dashboard and Salt management changes in 2.3.70
|
||||
|
||||
folders=$(curl -X GET http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders | jq -r '.[] | @base64')
|
||||
delfolder=("Manager" "Manager Search" "Sensor Nodes" "Search Nodes" "Standalone" "Eval Mode")
|
||||
|
||||
for row in $folders; do
|
||||
title=$(echo ${row} | base64 --decode | jq -r '.title')
|
||||
uid=$(echo ${row} | base64 --decode | jq -r '.uid')
|
||||
|
||||
if [[ " ${delfolder[@]} " =~ " ${title} " ]]; then
|
||||
curl -X DELETE http://admin:{{salt['pillar.get']('secrets:grafana_admin')}}@localhost:3000/api/folders/$uid
|
||||
fi
|
||||
done
|
||||
|
||||
echo "so-grafana-dashboard-folder-delete has been run to delete default Grafana dashboard folders that existed prior to 2.3.70" > /opt/so/state/so-grafana-dashboard-folder-delete-complete
|
||||
|
||||
exit 0
|
||||
@@ -17,6 +17,7 @@
|
||||
|
||||
# NOTE: This script depends on so-common
|
||||
IMAGEREPO=security-onion-solutions
|
||||
STATUS_CONF='/opt/so/conf/so-status/so-status.conf'
|
||||
|
||||
# shellcheck disable=SC2120
|
||||
container_list() {
|
||||
@@ -138,6 +139,11 @@ update_docker_containers() {
|
||||
cat $SIGNPATH/KEYS | gpg --import - >> "$LOG_FILE" 2>&1
|
||||
fi
|
||||
|
||||
# If downloading for soup, check if any optional images need to be pulled
|
||||
if [[ $CURLTYPE == 'soup' ]]; then
|
||||
grep -q "so-logscan" "$STATUS_CONF" && TRUSTED_CONTAINERS+=("so-logscan")
|
||||
fi
|
||||
|
||||
# Download the containers from the interwebs
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
|
||||
58
salt/common/tools/sbin/so-image-pull
Executable file
58
salt/common/tools/sbin/so-image-pull
Executable file
@@ -0,0 +1,58 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
. /usr/sbin/so-image-common
|
||||
|
||||
usage() {
|
||||
read -r -d '' message <<- EOM
|
||||
usage: so-image-pull [-h] IMAGE [IMAGE ...]
|
||||
|
||||
positional arguments:
|
||||
IMAGE One or more 'so-' prefixed images to download and verify.
|
||||
|
||||
optional arguments:
|
||||
-h, --help Show this help message and exit.
|
||||
EOM
|
||||
echo "$message"
|
||||
exit 1
|
||||
}
|
||||
|
||||
for arg; do
|
||||
shift
|
||||
[[ "$arg" = "--quiet" || "$arg" = "-q" ]] && quiet=true && continue
|
||||
set -- "$@" "$arg"
|
||||
done
|
||||
|
||||
if [[ $# -eq 0 || $# -gt 1 ]] || [[ $1 == '-h' || $1 == '--help' ]]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
TRUSTED_CONTAINERS=("$@")
|
||||
set_version
|
||||
|
||||
for image in "${TRUSTED_CONTAINERS[@]}"; do
|
||||
if ! docker images | grep "$image" | grep ":5000" | grep -q "$VERSION"; then
|
||||
if [[ $quiet == true ]]; then
|
||||
update_docker_containers "$image" "" "" "/dev/null"
|
||||
else
|
||||
update_docker_containers "$image" "" "" ""
|
||||
fi
|
||||
else
|
||||
echo "$image:$VERSION image exists."
|
||||
fi
|
||||
done
|
||||
176
salt/common/tools/sbin/so-import-evtx
Executable file
176
salt/common/tools/sbin/so-import-evtx
Executable file
@@ -0,0 +1,176 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
{%- set VERSION = salt['pillar.get']('global:soversion') %}
|
||||
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{%- set MANAGERIP = salt['pillar.get']('global:managerip') -%}
|
||||
{%- set URLBASE = salt['pillar.get']('global:url_base') %}
|
||||
{% set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %}
|
||||
{% set ES_PW = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %}
|
||||
|
||||
INDEX_DATE=$(date +'%Y.%m.%d')
|
||||
RUNID=$(cat /dev/urandom | tr -dc 'a-z0-9' | fold -w 8 | head -n 1)
|
||||
LOG_FILE=/nsm/import/evtx-import.log
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
function usage {
|
||||
cat << EOF
|
||||
Usage: $0 <evtx-file-1> [evtx-file-2] [evtx-file-*]
|
||||
|
||||
Imports one or more evtx files into Security Onion. The evtx files will be analyzed and made available for review in the Security Onion toolset.
|
||||
EOF
|
||||
}
|
||||
|
||||
|
||||
function evtx2es() {
|
||||
EVTX=$1
|
||||
HASH=$2
|
||||
|
||||
ES_PW=$(lookup_pillar "auth:users:so_elastic_user:pass" "elasticsearch")
|
||||
ES_USER=$(lookup_pillar "auth:users:so_elastic_user:user" "elasticsearch")
|
||||
|
||||
docker run --rm \
|
||||
-v "$EVTX:/tmp/$RUNID.evtx" \
|
||||
--entrypoint evtx2es \
|
||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} \
|
||||
--host {{ MANAGERIP }} --scheme https \
|
||||
--index so-beats-$INDEX_DATE --pipeline import.wel \
|
||||
--login $ES_USER --pwd $ES_PW \
|
||||
"/tmp/$RUNID.evtx" >> $LOG_FILE 2>&1
|
||||
|
||||
docker run --rm \
|
||||
-v "$EVTX:/tmp/import.evtx" \
|
||||
-v "/nsm/import/evtx-end_newest:/tmp/newest" \
|
||||
-v "/nsm/import/evtx-start_oldest:/tmp/oldest" \
|
||||
--entrypoint '/evtx_calc_timestamps.sh' \
|
||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }}
|
||||
}
|
||||
|
||||
# if no parameters supplied, display usage
|
||||
if [ $# -eq 0 ]; then
|
||||
usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# ensure this is a Manager node
|
||||
require_manager
|
||||
|
||||
# verify that all parameters are files
|
||||
for i in "$@"; do
|
||||
if ! [ -f "$i" ]; then
|
||||
usage
|
||||
echo "\"$i\" is not a valid file!"
|
||||
exit 2
|
||||
fi
|
||||
done
|
||||
|
||||
# track if we have any valid or invalid evtx
|
||||
INVALID_EVTXS="no"
|
||||
VALID_EVTXS="no"
|
||||
|
||||
# track oldest start and newest end so that we can generate the Kibana search hyperlink at the end
|
||||
START_OLDEST="2050-12-31"
|
||||
END_NEWEST="1971-01-01"
|
||||
|
||||
touch /nsm/import/evtx-start_oldest
|
||||
touch /nsm/import/evtx-end_newest
|
||||
|
||||
echo $START_OLDEST > /nsm/import/evtx-start_oldest
|
||||
echo $END_NEWEST > /nsm/import/evtx-end_newest
|
||||
|
||||
# paths must be quoted in case they include spaces
|
||||
for EVTX in "$@"; do
|
||||
EVTX=$(/usr/bin/realpath "$EVTX")
|
||||
echo "Processing Import: ${EVTX}"
|
||||
|
||||
# generate a unique hash to assist with dedupe checks
|
||||
HASH=$(md5sum "${EVTX}" | awk '{ print $1 }')
|
||||
HASH_DIR=/nsm/import/${HASH}
|
||||
echo "- assigning unique identifier to import: $HASH"
|
||||
|
||||
if [ -d $HASH_DIR ]; then
|
||||
echo "- this EVTX has already been imported; skipping"
|
||||
INVALID_EVTXS="yes"
|
||||
else
|
||||
VALID_EVTXS="yes"
|
||||
|
||||
EVTX_DIR=$HASH_DIR/evtx
|
||||
mkdir -p $EVTX_DIR
|
||||
|
||||
# import evtx and write them to import ingest pipeline
|
||||
echo "- importing logs to Elasticsearch..."
|
||||
evtx2es "${EVTX}" $HASH
|
||||
|
||||
# compare $START to $START_OLDEST
|
||||
START=$(cat /nsm/import/evtx-start_oldest)
|
||||
START_COMPARE=$(date -d $START +%s)
|
||||
START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s)
|
||||
if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then
|
||||
START_OLDEST=$START
|
||||
fi
|
||||
|
||||
# compare $ENDNEXT to $END_NEWEST
|
||||
END=$(cat /nsm/import/evtx-end_newest)
|
||||
ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"`
|
||||
ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s)
|
||||
END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s)
|
||||
if [ $ENDNEXT_COMPARE -gt $END_NEWEST_COMPARE ]; then
|
||||
END_NEWEST=$ENDNEXT
|
||||
fi
|
||||
|
||||
cp -f "${EVTX}" "${EVTX_DIR}"/data.evtx
|
||||
chmod 644 "${EVTX_DIR}"/data.evtx
|
||||
|
||||
fi # end of valid evtx
|
||||
|
||||
echo
|
||||
|
||||
done # end of for-loop processing evtx files
|
||||
|
||||
# remove temp files
|
||||
echo "Cleaning up:"
|
||||
for TEMP_EVTX in ${TEMP_EVTXS[@]}; do
|
||||
echo "- removing temporary evtx $TEMP_EVTX"
|
||||
rm -f $TEMP_EVTX
|
||||
done
|
||||
|
||||
# output final messages
|
||||
if [ "$INVALID_EVTXS" = "yes" ]; then
|
||||
echo
|
||||
echo "Please note! One or more evtx was invalid! You can scroll up to see which ones were invalid."
|
||||
fi
|
||||
|
||||
START_OLDEST_FORMATTED=`date +%Y-%m-%d --date="$START_OLDEST"`
|
||||
START_OLDEST_SLASH=$(echo $START_OLDEST_FORMATTED | sed -e 's/-/%2F/g')
|
||||
END_NEWEST_SLASH=$(echo $END_NEWEST | sed -e 's/-/%2F/g')
|
||||
|
||||
if [ "$VALID_EVTXS" = "yes" ]; then
|
||||
cat << EOF
|
||||
|
||||
Import complete!
|
||||
|
||||
You can use the following hyperlink to view data in the time range of your import. You can triple-click to quickly highlight the entire hyperlink and you can then copy it into your browser:
|
||||
https://{{ URLBASE }}/#/hunt?q=import.id:${RUNID}%20%7C%20groupby%20event.module%20event.dataset&t=${START_OLDEST_SLASH}%2000%3A00%3A00%20AM%20-%20${END_NEWEST_SLASH}%2000%3A00%3A00%20AM&z=UTC
|
||||
|
||||
or you can manually set your Time Range to be (in UTC):
|
||||
From: $START_OLDEST_FORMATTED To: $END_NEWEST
|
||||
|
||||
Please note that it may take 30 seconds or more for events to appear in Hunt.
|
||||
EOF
|
||||
fi
|
||||
0
salt/common/tools/sbin/so-influxdb-drop-autogen
Normal file → Executable file
0
salt/common/tools/sbin/so-influxdb-drop-autogen
Normal file → Executable file
@@ -8,9 +8,9 @@ fi
|
||||
|
||||
echo "This tool will update a manager's IP address to the new IP assigned to the management network interface."
|
||||
|
||||
echo
|
||||
echo ""
|
||||
echo "WARNING: This tool is still undergoing testing, use at your own risk!"
|
||||
echo
|
||||
echo ""
|
||||
|
||||
if [ -z "$OLD_IP" ]; then
|
||||
OLD_IP=$(lookup_pillar "managerip")
|
||||
@@ -27,7 +27,7 @@ if [ -z "$NEW_IP" ]; then
|
||||
NEW_IP=$(ip -4 addr list $iface | grep inet | cut -d' ' -f6 | cut -d/ -f1)
|
||||
|
||||
if [ -z "$NEW_IP" ]; then
|
||||
fail "Unable to detect new IP on interface $iface. "
|
||||
fail "Unable to detect new IP on interface $iface."
|
||||
fi
|
||||
|
||||
echo "Detected new IP $NEW_IP on interface $iface."
|
||||
@@ -39,9 +39,9 @@ fi
|
||||
|
||||
echo "About to change old IP $OLD_IP to new IP $NEW_IP."
|
||||
|
||||
echo
|
||||
echo ""
|
||||
read -n 1 -p "Would you like to continue? (y/N) " CONTINUE
|
||||
echo
|
||||
echo ""
|
||||
|
||||
if [ "$CONTINUE" == "y" ]; then
|
||||
for file in $(grep -rlI $OLD_IP /opt/so/saltstack /etc); do
|
||||
@@ -49,6 +49,11 @@ if [ "$CONTINUE" == "y" ]; then
|
||||
sed -i "s|$OLD_IP|$NEW_IP|g" $file
|
||||
done
|
||||
|
||||
echo "Granting MySQL root user permissions on $NEW_IP"
|
||||
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'$NEW_IP' IDENTIFIED BY '$(lookup_pillar_secret 'mysql')' WITH GRANT OPTION;" &> /dev/null
|
||||
echo "Removing MySQL root user from $OLD_IP"
|
||||
docker exec -i so-mysql mysql --user=root --password=$(lookup_pillar_secret 'mysql') -e "DROP USER 'root'@'$OLD_IP';" &> /dev/null
|
||||
|
||||
echo "The IP has been changed from $OLD_IP to $NEW_IP."
|
||||
|
||||
echo
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
@@ -17,42 +17,14 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <user-name>"
|
||||
echo ""
|
||||
echo "Enables or disables a user in Fleet"
|
||||
exit 1
|
||||
}
|
||||
echo $banner
|
||||
echo "Running kibana.so_savedobjects_defaults Salt state to restore default saved objects."
|
||||
printf "This could take a while if another Salt job is running. \nRun this command with --force to stop all Salt jobs before proceeding.\n"
|
||||
echo $banner
|
||||
|
||||
if [ $# -ne 2 ]; then
|
||||
usage
|
||||
fi
|
||||
if [ "$1" = "--force" ]; then
|
||||
printf "\nForce-stopping all Salt jobs before proceeding\n\n"
|
||||
salt-call saltutil.kill_all_jobs
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
MYSQL_PASS=$(lookup_pillar_secret mysql)
|
||||
FLEET_IP=$(lookup_pillar fleet_ip)
|
||||
FLEET_USER=$USER
|
||||
|
||||
case "${2^^}" in
|
||||
FALSE | NO | 0)
|
||||
FLEET_STATUS=0
|
||||
;;
|
||||
TRUE | YES | 1)
|
||||
FLEET_STATUS=1
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
;;
|
||||
esac
|
||||
|
||||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PASS fleet -e \
|
||||
"UPDATE users SET enabled=$FLEET_STATUS WHERE username='$FLEET_USER'" 2>&1)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully updated user in Fleet"
|
||||
else
|
||||
echo "Failed to update user in Fleet"
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
salt-call state.apply kibana.so_savedobjects_defaults -linfo queue=True
|
||||
@@ -1,5 +1,5 @@
|
||||
. /usr/sbin/so-common
|
||||
|
||||
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
|
||||
wait_for_web_response "http://localhost:5601/app/kibana" "Elastic" 300 "{{ ELASTICCURL }}"
|
||||
## This hackery will be removed if using Elastic Auth ##
|
||||
|
||||
@@ -9,5 +9,9 @@ SESSIONCOOKIE=$({{ ELASTICCURL }} -c - -X GET http://localhost:5601/ | grep sid
|
||||
# Disable certain Features from showing up in the Kibana UI
|
||||
echo
|
||||
echo "Setting up default Space:"
|
||||
{% if HIGHLANDER %}
|
||||
{{ ELASTICCURL }} -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["enterpriseSearch"]} ' >> /opt/so/log/kibana/misc.log
|
||||
{% else %}
|
||||
{{ ELASTICCURL }} -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","siem","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","fleet"]} ' >> /opt/so/log/kibana/misc.log
|
||||
{% endif %}
|
||||
echo
|
||||
|
||||
303
salt/common/tools/sbin/so-learn
Executable file
303
salt/common/tools/sbin/so-learn
Executable file
@@ -0,0 +1,303 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
from itertools import chain
|
||||
from typing import List
|
||||
|
||||
import signal
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import subprocess
|
||||
import argparse
|
||||
import textwrap
|
||||
import yaml
|
||||
import multiprocessing
|
||||
import docker
|
||||
import pty
|
||||
|
||||
minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
|
||||
so_status_conf = '/opt/so/conf/so-status/so-status.conf'
|
||||
proc: subprocess.CompletedProcess = None
|
||||
|
||||
# Temp store of modules, will likely be broken out into salt
|
||||
def get_learn_modules():
|
||||
return {
|
||||
'logscan': { 'cpu_period': get_cpu_period(fraction=0.25), 'enabled': False, 'description': 'Scan log files against pre-trained models to alert on anomalies.' }
|
||||
}
|
||||
|
||||
|
||||
def get_cpu_period(fraction: float):
|
||||
multiplier = 10000
|
||||
|
||||
num_cores = multiprocessing.cpu_count()
|
||||
if num_cores <= 2:
|
||||
fraction = 1.
|
||||
|
||||
num_used_cores = int(num_cores * fraction)
|
||||
cpu_period = num_used_cores * multiplier
|
||||
return cpu_period
|
||||
|
||||
|
||||
def sigint_handler(*_):
|
||||
print('Exiting gracefully on Ctrl-C')
|
||||
if proc is not None: proc.send_signal(signal.SIGINT)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def find_minion_pillar() -> str:
|
||||
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
|
||||
|
||||
result = []
|
||||
for root, _, files in os.walk(minion_pillar_dir):
|
||||
for f_minion_id in files:
|
||||
if re.search(regex, f_minion_id):
|
||||
result.append(os.path.join(root, f_minion_id))
|
||||
|
||||
if len(result) == 0:
|
||||
print('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?', file=sys.stderr)
|
||||
sys.exit(3)
|
||||
elif len(result) > 1:
|
||||
res_str = ', '.join(f'\"{result}\"')
|
||||
print('(This should not happen, the system is in an error state if you see this message.)\n', file=sys.stderr)
|
||||
print('More than one manager-type pillar exists, minion id\'s listed below:', file=sys.stderr)
|
||||
print(f' {res_str}', file=sys.stderr)
|
||||
sys.exit(3)
|
||||
else:
|
||||
return result[0]
|
||||
|
||||
|
||||
def read_pillar(pillar: str):
|
||||
try:
|
||||
with open(pillar, 'r') as pillar_file:
|
||||
loaded_yaml = yaml.safe_load(pillar_file.read())
|
||||
if loaded_yaml is None:
|
||||
print(f'Could not parse {pillar}', file=sys.stderr)
|
||||
sys.exit(3)
|
||||
return loaded_yaml
|
||||
except:
|
||||
print(f'Could not open {pillar}', file=sys.stderr)
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
def write_pillar(pillar: str, content: dict):
|
||||
try:
|
||||
with open(pillar, 'w') as pillar_file:
|
||||
yaml.dump(content, pillar_file, default_flow_style=False)
|
||||
except:
|
||||
print(f'Could not open {pillar}', file=sys.stderr)
|
||||
sys.exit(3)
|
||||
|
||||
|
||||
def mod_so_status(action: str, item: str):
|
||||
with open(so_status_conf, 'a+') as conf:
|
||||
conf.seek(0)
|
||||
containers = conf.readlines()
|
||||
|
||||
if f'so-{item}\n' in containers:
|
||||
if action == 'remove': containers.remove(f'so-{item}\n')
|
||||
if action == 'add': pass
|
||||
else:
|
||||
if action == 'remove': pass
|
||||
if action == 'add': containers.append(f'so-{item}\n')
|
||||
|
||||
[containers.remove(c_name) for c_name in containers if c_name == '\n'] # remove extra newlines
|
||||
|
||||
conf.seek(0)
|
||||
conf.truncate(0)
|
||||
conf.writelines(containers)
|
||||
|
||||
|
||||
def create_pillar_if_not_exist(pillar:str, content: dict):
|
||||
pillar_dict = content
|
||||
|
||||
if pillar_dict.get('learn', {}).get('modules') is None:
|
||||
pillar_dict['learn'] = {}
|
||||
pillar_dict['learn']['modules'] = get_learn_modules()
|
||||
content.update()
|
||||
write_pillar(pillar, content)
|
||||
|
||||
return content
|
||||
|
||||
|
||||
def salt_call(module: str):
|
||||
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', f'learn.{module}', 'queue=True']
|
||||
|
||||
print(f' Applying salt state for {module} module...')
|
||||
proc = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
return_code = proc.returncode
|
||||
if return_code != 0:
|
||||
print(f' [ERROR] Failed to apply salt state for {module} module.')
|
||||
|
||||
return return_code
|
||||
|
||||
|
||||
def pull_image(module: str):
|
||||
container_basename = f'so-{module}'
|
||||
|
||||
client = docker.from_env()
|
||||
image_list = client.images.list(filters={ 'dangling': False })
|
||||
tag_list = list(chain.from_iterable(list(map(lambda x: x.attrs.get('RepoTags'), image_list))))
|
||||
basename_match = list(filter(lambda x: f'{container_basename}' in x, tag_list))
|
||||
local_registry_match = list(filter(lambda x: ':5000' in x, basename_match))
|
||||
|
||||
if len(local_registry_match) == 0:
|
||||
print(f'Pulling and verifying missing image for {module} (may take several minutes) ...')
|
||||
pull_command = ['so-image-pull', '--quiet', container_basename]
|
||||
|
||||
proc = subprocess.run(pull_command, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
|
||||
return_code = proc.returncode
|
||||
if return_code != 0:
|
||||
print(f'[ERROR] Failed to pull image so-{module}, skipping state.')
|
||||
else:
|
||||
return_code = 0
|
||||
return return_code
|
||||
|
||||
|
||||
def apply(module_list: List):
|
||||
return_code = 0
|
||||
for module in module_list:
|
||||
salt_ret = salt_call(module)
|
||||
# Only update return_code if the command returned a non-zero return
|
||||
if salt_ret != 0:
|
||||
return_code = salt_ret
|
||||
|
||||
return return_code
|
||||
|
||||
|
||||
def check_apply(args: dict):
|
||||
if args.apply:
|
||||
print('Configuration updated. Applying changes:')
|
||||
return apply(args.modules)
|
||||
else:
|
||||
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
|
||||
answer = input(message)
|
||||
while answer.lower() not in [ 'y', 'n', '' ]:
|
||||
answer = input(message)
|
||||
if answer.lower() in [ 'n', '' ]:
|
||||
return 0
|
||||
else:
|
||||
print('Applying changes:')
|
||||
return apply(args.modules)
|
||||
|
||||
|
||||
def enable_disable_modules(args, enable: bool):
|
||||
pillar_modules = args.pillar_dict.get('learn', {}).get('modules')
|
||||
pillar_mod_names = args.pillar_dict.get('learn', {}).get('modules').keys()
|
||||
|
||||
action_str = 'add' if enable else 'remove'
|
||||
|
||||
if 'all' in args.modules:
|
||||
for module, details in pillar_modules.items():
|
||||
details['enabled'] = enable
|
||||
mod_so_status(action_str, module)
|
||||
if enable: pull_image(module)
|
||||
args.pillar_dict.update()
|
||||
write_pillar(args.pillar, args.pillar_dict)
|
||||
else:
|
||||
write_needed = False
|
||||
for module in args.modules:
|
||||
if module in pillar_mod_names:
|
||||
if pillar_modules[module]['enabled'] == enable:
|
||||
state_str = 'enabled' if enable else 'disabled'
|
||||
print(f'{module} module already {state_str}.', file=sys.stderr)
|
||||
else:
|
||||
if enable and pull_image(module) != 0:
|
||||
continue
|
||||
pillar_modules[module]['enabled'] = enable
|
||||
mod_so_status(action_str, module)
|
||||
write_needed = True
|
||||
if write_needed:
|
||||
args.pillar_dict.update()
|
||||
write_pillar(args.pillar, args.pillar_dict)
|
||||
|
||||
cmd_ret = check_apply(args)
|
||||
return cmd_ret
|
||||
|
||||
|
||||
def enable_modules(args):
|
||||
enable_disable_modules(args, enable=True)
|
||||
|
||||
|
||||
def disable_modules(args):
|
||||
enable_disable_modules(args, enable=False)
|
||||
|
||||
|
||||
def list_modules(*_):
|
||||
print('Available ML modules:')
|
||||
for module, details in get_learn_modules().items():
|
||||
print(f' - { module } : {details["description"]}')
|
||||
return 0
|
||||
|
||||
|
||||
def main():
|
||||
beta_str = 'BETA - SUBJECT TO CHANGE\n'
|
||||
|
||||
apply_help='After ACTION the chosen modules, apply any necessary salt states.'
|
||||
enable_apply_help = apply_help.replace('ACTION', 'enabling')
|
||||
disable_apply_help = apply_help.replace('ACTION', 'disabling')
|
||||
|
||||
signal.signal(signal.SIGINT, sigint_handler)
|
||||
|
||||
if os.geteuid() != 0:
|
||||
print('You must run this script as root', file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
|
||||
|
||||
subcommand_desc = textwrap.dedent(
|
||||
"""\
|
||||
enable Enable one or more ML modules.
|
||||
disable Disable one or more ML modules.
|
||||
list List all available ML modules.
|
||||
"""
|
||||
)
|
||||
|
||||
subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
|
||||
|
||||
module_help_str = 'One or more ML modules, which can be listed using \'so-learn list\'. Use the keyword \'all\' to apply the action to all available modules.'
|
||||
|
||||
enable = subparsers.add_parser('enable')
|
||||
enable.set_defaults(func=enable_modules)
|
||||
enable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
|
||||
enable.add_argument('--apply', action='store_const', const=True, required=False, help=enable_apply_help)
|
||||
|
||||
disable = subparsers.add_parser('disable')
|
||||
disable.set_defaults(func=disable_modules)
|
||||
disable.add_argument('modules', metavar='ML_MODULE', nargs='+', help=module_help_str)
|
||||
disable.add_argument('--apply', action='store_const', const=True, required=False, help=disable_apply_help)
|
||||
|
||||
list = subparsers.add_parser('list')
|
||||
list.set_defaults(func=list_modules)
|
||||
|
||||
args = main_parser.parse_args(sys.argv[1:])
|
||||
args.pillar = find_minion_pillar()
|
||||
args.pillar_dict = create_pillar_if_not_exist(args.pillar, read_pillar(args.pillar))
|
||||
|
||||
if hasattr(args, 'func'):
|
||||
exit_code = args.func(args)
|
||||
else:
|
||||
if args.command is None:
|
||||
print(beta_str)
|
||||
main_parser.print_help()
|
||||
sys.exit(0)
|
||||
|
||||
sys.exit(exit_code)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
22
salt/common/tools/sbin/so-playbook-import
Executable file
22
salt/common/tools/sbin/so-playbook-import
Executable file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
ENABLEPLAY=${1:-False}
|
||||
|
||||
docker exec so-soctopus /usr/local/bin/python -c "import playbook; print(playbook.play_import($ENABLEPLAY))"
|
||||
@@ -17,53 +17,101 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
check_lsi_raid() {
|
||||
# For use for LSI on Ubuntu
|
||||
#MEGA=/opt/MegaRAID/MegeCli/MegaCli64
|
||||
#LSIRC=$($MEGA -LDInfo -Lall -aALL | grep Optimal)
|
||||
# Open Source Centos
|
||||
MEGA=/opt/mega/megasasctl
|
||||
LSIRC=$($MEGA | grep optimal)
|
||||
|
||||
if [[ $LSIRC ]]; then
|
||||
# Raid is good
|
||||
LSIRAID=0
|
||||
appliance_check() {
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
APPLIANCE=1
|
||||
{%- if grains['sosmodel'] in ['SO2AMI01', 'SO2GCI01', 'SO2AZI01'] %}
|
||||
exit 0
|
||||
{%- endif %}
|
||||
DUDEYOUGOTADELL=$(dmidecode |grep Dell)
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
APPTYPE=dell
|
||||
else
|
||||
LSIRAID=1
|
||||
APPTYPE=sm
|
||||
fi
|
||||
mkdir -p /opt/so/log/raid
|
||||
|
||||
{%- else %}
|
||||
echo "This is not an appliance"
|
||||
exit 0
|
||||
{%- endif %}
|
||||
}
|
||||
|
||||
check_nsm_raid() {
|
||||
PERCCLI=$(/opt/raidtools/perccli/perccli64 /c0/v0 show|grep RAID|grep Optl)
|
||||
MEGACTL=$(/opt/raidtools/megasasctl |grep optimal)
|
||||
|
||||
if [[ $APPLIANCE == '1' ]]; then
|
||||
if [[ -n $PERCCLI ]]; then
|
||||
HWRAID=0
|
||||
elif [[ -n $MEGACTL ]]; then
|
||||
HWRAID=0
|
||||
else
|
||||
HWRAID=1
|
||||
fi
|
||||
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
check_boss_raid() {
|
||||
MVCLI=$(/usr/local/bin/mvcli info -o vd |grep status |grep functional)
|
||||
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
if [[ -n $MVCLI ]]; then
|
||||
BOSSRAID=0
|
||||
else
|
||||
BOSSRAID=1
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_software_raid() {
|
||||
if [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
SWRC=$(grep "_" /proc/mdstat)
|
||||
|
||||
if [[ $SWRC ]]; then
|
||||
if [[ -n $SWRC ]]; then
|
||||
# RAID is failed in some way
|
||||
SWRAID=1
|
||||
else
|
||||
SWRAID=0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# This script checks raid status if you use SO appliances
|
||||
|
||||
# See if this is an appliance
|
||||
|
||||
appliance_check
|
||||
check_nsm_raid
|
||||
check_boss_raid
|
||||
{%- if salt['grains.get']('sosmodel', '') %}
|
||||
mkdir -p /opt/so/log/raid
|
||||
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
|
||||
#check_boss_raid
|
||||
{%- if grains['sosmodel'] in ['SOSMN', 'SOSSNNV'] %}
|
||||
check_software_raid
|
||||
echo "nsmraid=$SWRAID" > /opt/so/log/raid/status.log
|
||||
{%- elif grains['sosmodel'] in ['SOS1000F', 'SOS1000', 'SOSSN7200', 'SOS10K', 'SOS4000'] %}
|
||||
#check_boss_raid
|
||||
check_lsi_raid
|
||||
echo "nsmraid=$LSIRAID" > /opt/so/log/raid/status.log
|
||||
{%- else %}
|
||||
exit 0
|
||||
{%- endif %}
|
||||
{%- else %}
|
||||
exit 0
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
|
||||
if [[ -n $SWRAID ]]; then
|
||||
if [[ $SWRAID == '0' && $BOSSRAID == '0' ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
elif [[ -n $DUDEYOUGOTADELL ]]; then
|
||||
if [[ $BOSSRAID == '0' && $HWRAID == '0' ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
elif [[ "$APPTYPE" == 'sm' ]]; then
|
||||
if [[ -n "$HWRAID" ]]; then
|
||||
RAIDSTATUS=0
|
||||
else
|
||||
RAIDSTATUS=1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "nsmraid=$RAIDSTATUS" > /opt/so/log/raid/status.log
|
||||
|
||||
|
||||
|
||||
@@ -17,4 +17,4 @@
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
docker exec -it so-redis redis-cli llen logstash:unparsed
|
||||
docker exec so-redis redis-cli llen logstash:unparsed
|
||||
|
||||
@@ -405,7 +405,7 @@ def main():
|
||||
enabled_list.set_defaults(func=list_enabled_rules)
|
||||
|
||||
|
||||
search_term_help='A quoted regex search term (ex: "\$EXTERNAL_NET")'
|
||||
search_term_help='A properly escaped regex search term (ex: "\\\$EXTERNAL_NET")'
|
||||
replace_term_help='The text to replace the search term with'
|
||||
|
||||
# Modify actions
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
#!/bin/bash
|
||||
got_root() {
|
||||
|
||||
# Make sure you are root
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
. /usr/sbin/so-common
|
||||
|
||||
}
|
||||
argstr=""
|
||||
for arg in "$@"; do
|
||||
argstr="${argstr} \"${arg}\""
|
||||
done
|
||||
|
||||
got_root
|
||||
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat $1"
|
||||
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat --force ${argstr}"
|
||||
|
||||
@@ -92,6 +92,10 @@ if [ $CURRENT_TIME -ge $((SYSTEM_START_TIME+$UPTIME_REQ)) ]; then
|
||||
log "last highstate completed at `date -d @$LAST_HIGHSTATE_END`" I
|
||||
log "checking if any jobs are running" I
|
||||
logCmd "salt-call --local saltutil.running" I
|
||||
log "ensure salt.minion-state-apply-test is enabled" I
|
||||
logCmd "salt-call state.enable salt.minion-state-apply-test" I
|
||||
log "ensure highstate is enabled" I
|
||||
logCmd "salt-call state.enable highstate" I
|
||||
log "killing all salt-minion processes" I
|
||||
logCmd "pkill -9 -ef /usr/bin/salt-minion" I
|
||||
log "starting salt-minion service" I
|
||||
|
||||
@@ -31,7 +31,7 @@ if [[ $# -lt 1 ]]; then
|
||||
echo "Usage: $0 <pcap-sample(s)>"
|
||||
echo
|
||||
echo "All PCAPs must be placed in the /opt/so/samples directory unless replaying"
|
||||
echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP sampes"
|
||||
echo "a sample pcap that is included in the so-tcpreplay image. Those PCAP samples"
|
||||
echo "are located in the /opt/samples directory inside of the image."
|
||||
echo
|
||||
echo "Customer provided PCAP example:"
|
||||
|
||||
@@ -41,10 +41,7 @@ if [[ $? == 0 ]]; then
|
||||
fi
|
||||
read -rs THEHIVE_PASS
|
||||
|
||||
if ! check_password "$THEHIVE_PASS"; then
|
||||
echo "Password is invalid. Please exclude single quotes, double quotes and backslashes from the password."
|
||||
exit 2
|
||||
fi
|
||||
check_password_and_exit "$THEHIVE_PASS"
|
||||
|
||||
# Create new user in TheHive
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
|
||||
|
||||
57
salt/common/tools/sbin/so-thehive-user-update
Executable file
57
salt/common/tools/sbin/so-thehive-user-update
Executable file
@@ -0,0 +1,57 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
usage() {
|
||||
echo "Usage: $0 <user-name>"
|
||||
echo ""
|
||||
echo "Update password for an existing TheHive user. The new password will be read from STDIN."
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ $# -ne 1 ]; then
|
||||
usage
|
||||
fi
|
||||
|
||||
USER=$1
|
||||
|
||||
THEHIVE_KEY=$(lookup_pillar hivekey)
|
||||
THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api"
|
||||
THEHIVE_USER=$USER
|
||||
|
||||
# Read password for new user from stdin
|
||||
test -t 0
|
||||
if [[ $? == 0 ]]; then
|
||||
echo "Enter new password:"
|
||||
fi
|
||||
read -rs THEHIVE_PASS
|
||||
|
||||
if ! check_password "$THEHIVE_PASS"; then
|
||||
echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password."
|
||||
exit 2
|
||||
fi
|
||||
|
||||
# Change password for user in TheHive
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user/${THEHIVE_USER}/password/set" -d "{\"password\" : \"$THEHIVE_PASS\"}")
|
||||
if [[ -z "$resp" ]]; then
|
||||
echo "Successfully updated TheHive user password"
|
||||
else
|
||||
echo "Unable to update TheHive user password"
|
||||
echo $resp
|
||||
exit 2
|
||||
fi
|
||||
@@ -18,11 +18,17 @@
|
||||
|
||||
source $(dirname $0)/so-common
|
||||
|
||||
if [[ $# -lt 1 || $# -gt 2 ]]; then
|
||||
echo "Usage: $0 <list|add|update|enable|disable|validate|valemail|valpass> [email]"
|
||||
DEFAULT_ROLE=analyst
|
||||
|
||||
if [[ $# -lt 1 || $# -gt 3 ]]; then
|
||||
echo "Usage: $0 <operation> [email] [role]"
|
||||
echo ""
|
||||
echo " where <operation> is one of the following:"
|
||||
echo ""
|
||||
echo " list: Lists all user email addresses currently defined in the identity system"
|
||||
echo " add: Adds a new user to the identity system; requires 'email' parameter"
|
||||
echo " add: Adds a new user to the identity system; requires 'email' parameter, while 'role' parameter is optional and defaults to $DEFAULT_ROLE"
|
||||
echo " addrole: Grants a role to an existing user; requires 'email' and 'role' parameters"
|
||||
echo " delrole: Removes a role from an existing user; requires 'email' and 'role' parameters"
|
||||
echo " update: Updates a user's password; requires 'email' parameter"
|
||||
echo " enable: Enables a user; requires 'email' parameter"
|
||||
echo " disable: Disables a user; requires 'email' parameter"
|
||||
@@ -36,14 +42,18 @@ fi
|
||||
|
||||
operation=$1
|
||||
email=$2
|
||||
role=$3
|
||||
|
||||
kratosUrl=${KRATOS_URL:-http://127.0.0.1:4434}
|
||||
databasePath=${KRATOS_DB_PATH:-/opt/so/conf/kratos/db/db.sqlite}
|
||||
bcryptRounds=${BCRYPT_ROUNDS:-12}
|
||||
elasticUsersFile=${ELASTIC_USERS_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users}
|
||||
elasticRolesFile=${ELASTIC_ROLES_FILE:-/opt/so/saltstack/local/salt/elasticsearch/files/users_roles}
|
||||
socRolesFile=${SOC_ROLES_FILE:-/opt/so/conf/soc/soc_users_roles}
|
||||
esUID=${ELASTIC_UID:-930}
|
||||
esGID=${ELASTIC_GID:-930}
|
||||
soUID=${SOCORE_UID:-939}
|
||||
soGID=${SOCORE_GID:-939}
|
||||
|
||||
function lock() {
|
||||
# Obtain file descriptor lock
|
||||
@@ -80,7 +90,7 @@ function findIdByEmail() {
|
||||
email=$1
|
||||
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||
identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
|
||||
identityId=$(echo "${response}" | jq -r ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
|
||||
echo $identityId
|
||||
}
|
||||
|
||||
@@ -89,17 +99,23 @@ function validatePassword() {
|
||||
|
||||
len=$(expr length "$password")
|
||||
if [[ $len -lt 6 ]]; then
|
||||
echo "Password does not meet the minimum requirements"
|
||||
exit 2
|
||||
fail "Password does not meet the minimum requirements"
|
||||
fi
|
||||
if [[ $len -gt 72 ]]; then
|
||||
fail "Password is too long (max: 72)"
|
||||
fi
|
||||
check_password_and_exit "$password"
|
||||
}
|
||||
|
||||
function validateEmail() {
|
||||
email=$1
|
||||
# (?:[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*|"(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21\x23-\x5b\x5d-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])*")@(?:(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?|\[(?:(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9]))\.){3}(?:(2(5[0-5]|[0-4][0-9])|1[0-9][0-9]|[1-9]?[0-9])|[a-z0-9-]*[a-z0-9]:(?:[\x01-\x08\x0b\x0c\x0e-\x1f\x21-\x5a\x53-\x7f]|\\[\x01-\x09\x0b\x0c\x0e-\x7f])+)\])
|
||||
if [[ ! "$email" =~ ^[[:alnum:]._%+-]+@[[:alnum:].-]+\.[[:alpha:]]{2,}$ ]]; then
|
||||
echo "Email address is invalid"
|
||||
exit 3
|
||||
fail "Email address is invalid"
|
||||
fi
|
||||
|
||||
if [[ "$email" =~ [A-Z] ]]; then
|
||||
fail "Email addresses cannot contain uppercase letters"
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -127,21 +143,51 @@ function updatePassword() {
|
||||
validatePassword "$password"
|
||||
fi
|
||||
|
||||
if [[ -n $identityId ]]; then
|
||||
if [[ -n "$identityId" ]]; then
|
||||
# Generate password hash
|
||||
passwordHash=$(hashPassword "$password")
|
||||
# Update DB with new hash
|
||||
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB) where identity_id='${identityId}';" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to update password"
|
||||
fi
|
||||
}
|
||||
|
||||
function createElasticFile() {
|
||||
function createFile() {
|
||||
filename=$1
|
||||
tmpFile=${filename}
|
||||
truncate -s 0 "$tmpFile"
|
||||
chmod 600 "$tmpFile"
|
||||
chown "${esUID}:${esGID}" "$tmpFile"
|
||||
uid=$2
|
||||
gid=$3
|
||||
|
||||
mkdir -p $(dirname "$filename")
|
||||
truncate -s 0 "$filename"
|
||||
chmod 600 "$filename"
|
||||
chown "${uid}:${gid}" "$filename"
|
||||
}
|
||||
|
||||
function ensureRoleFileExists() {
|
||||
if [[ ! -f "$socRolesFile" || ! -s "$socRolesFile" ]]; then
|
||||
# Generate the new users file
|
||||
rolesTmpFile="${socRolesFile}.tmp"
|
||||
createFile "$rolesTmpFile" "$soUID" "$soGID"
|
||||
|
||||
if [[ -f "$databasePath" ]]; then
|
||||
echo "Migrating roles to new file: $socRolesFile"
|
||||
|
||||
echo "select 'superuser:' || id from identities;" | sqlite3 "$databasePath" \
|
||||
>> "$rolesTmpFile"
|
||||
[[ $? != 0 ]] && fail "Unable to read identities from database"
|
||||
|
||||
echo "The following users have all been migrated with the super user role:"
|
||||
cat "${rolesTmpFile}"
|
||||
else
|
||||
echo "Database file does not exist yet, installation is likely not yet complete."
|
||||
fi
|
||||
|
||||
if [[ -d "$socRolesFile" ]]; then
|
||||
echo "Removing invalid roles directory created by Docker"
|
||||
rm -fr "$socRolesFile"
|
||||
fi
|
||||
mv "${rolesTmpFile}" "${socRolesFile}"
|
||||
fi
|
||||
}
|
||||
|
||||
function syncElasticSystemUser() {
|
||||
@@ -172,53 +218,56 @@ function syncElasticSystemRole() {
|
||||
}
|
||||
|
||||
function syncElastic() {
|
||||
echo "Syncing users between SOC and Elastic..."
|
||||
echo "Syncing users and roles between SOC and Elastic..."
|
||||
|
||||
usersTmpFile="${elasticUsersFile}.tmp"
|
||||
createFile "${usersTmpFile}" "$esUID" "$esGID"
|
||||
rolesTmpFile="${elasticRolesFile}.tmp"
|
||||
createElasticFile "${usersTmpFile}"
|
||||
createElasticFile "${rolesTmpFile}"
|
||||
createFile "${rolesTmpFile}" "$esUID" "$esGID"
|
||||
|
||||
authPillarJson=$(lookup_salt_value "auth" "elasticsearch" "pillar" "json")
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_elastic_user" "$usersTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile"
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_kibana_user" "$usersTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile"
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_logstash_user" "$usersTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile"
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_beats_user" "$usersTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile"
|
||||
|
||||
syncElasticSystemUser "$authPillarJson" "so_monitor_user" "$usersTmpFile"
|
||||
|
||||
syncElasticSystemRole "$authPillarJson" "so_elastic_user" "superuser" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_kibana_user" "superuser" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_logstash_user" "superuser" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_beats_user" "superuser" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_collector" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "remote_monitoring_agent" "$rolesTmpFile"
|
||||
syncElasticSystemRole "$authPillarJson" "so_monitor_user" "monitoring_user" "$rolesTmpFile"
|
||||
|
||||
if [[ -f "$databasePath" ]]; then
|
||||
# Generate the new users file
|
||||
if [[ -f "$databasePath" && -f "$socRolesFile" ]]; then
|
||||
# Append the SOC users
|
||||
echo "select '{\"user\":\"' || ici.identifier || '\", \"data\":' || ic.config || '}'" \
|
||||
"from identity_credential_identifiers ici, identity_credentials ic " \
|
||||
"where ici.identity_credential_id=ic.id and instr(ic.config, 'hashed_password') " \
|
||||
"from identity_credential_identifiers ici, identity_credentials ic, identities i " \
|
||||
"where " \
|
||||
" ici.identity_credential_id=ic.id " \
|
||||
" and ic.identity_id=i.id " \
|
||||
" and instr(ic.config, 'hashed_password') " \
|
||||
" and i.state == 'active' " \
|
||||
"order by ici.identifier;" | \
|
||||
sqlite3 "$databasePath" | \
|
||||
jq -r '.user + ":" + .data.hashed_password' \
|
||||
>> "$usersTmpFile"
|
||||
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
|
||||
|
||||
# Generate the new users_roles file
|
||||
|
||||
echo "select 'superuser:' || ici.identifier " \
|
||||
# Append the user roles
|
||||
while IFS="" read -r rolePair || [ -n "$rolePair" ]; do
|
||||
userId=$(echo "$rolePair" | cut -d: -f2)
|
||||
role=$(echo "$rolePair" | cut -d: -f1)
|
||||
echo "select '$role:' || ici.identifier " \
|
||||
"from identity_credential_identifiers ici, identity_credentials ic " \
|
||||
"where ici.identity_credential_id=ic.id and instr(ic.config, 'hashed_password') " \
|
||||
"order by ici.identifier;" | \
|
||||
sqlite3 "$databasePath" \
|
||||
>> "$rolesTmpFile"
|
||||
[[ $? != 0 ]] && fail "Unable to read credential IDs from database"
|
||||
"where ici.identity_credential_id=ic.id and ic.identity_id = '$userId';" | \
|
||||
sqlite3 "$databasePath" >> "$rolesTmpFile"
|
||||
done < "$socRolesFile"
|
||||
|
||||
else
|
||||
echo "Database file does not exist yet, skipping users export"
|
||||
echo "Database file or soc roles file does not exist yet, skipping users export"
|
||||
fi
|
||||
|
||||
if [[ -s "${usersTmpFile}" ]]; then
|
||||
@@ -236,15 +285,22 @@ function syncElastic() {
|
||||
}
|
||||
|
||||
function syncAll() {
|
||||
ensureRoleFileExists
|
||||
|
||||
# Check if a sync is needed. Sync is not needed if the following are true:
|
||||
# - user database entries are all older than the elastic users file
|
||||
# - soc roles file last modify date is older than the elastic roles file
|
||||
if [[ -z "$FORCE_SYNC" && -f "$databasePath" && -f "$elasticUsersFile" ]]; then
|
||||
usersFileAgeSecs=$(echo $(($(date +%s) - $(date +%s -r "$elasticUsersFile"))))
|
||||
staleCount=$(echo "select count(*) from identity_credentials where updated_at >= Datetime('now', '-${usersFileAgeSecs} seconds');" \
|
||||
| sqlite3 "$databasePath")
|
||||
if [[ "$staleCount" == "0" ]]; then
|
||||
if [[ "$staleCount" == "0" && "$elasticRolesFile" -nt "$socRolesFile" ]]; then
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
syncElastic
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
@@ -252,11 +308,64 @@ function listUsers() {
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
|
||||
users=$(echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort)
|
||||
for user in $users; do
|
||||
roles=$(grep "$user" "$elasticRolesFile" | cut -d: -f1 | tr '\n' ' ')
|
||||
echo "$user: $roles"
|
||||
done
|
||||
}
|
||||
|
||||
function addUserRole() {
|
||||
email=$1
|
||||
role=$2
|
||||
|
||||
adjustUserRole "$email" "$role" "add"
|
||||
}
|
||||
|
||||
function deleteUserRole() {
|
||||
email=$1
|
||||
role=$2
|
||||
|
||||
adjustUserRole "$email" "$role" "del"
|
||||
}
|
||||
|
||||
function adjustUserRole() {
|
||||
email=$1
|
||||
role=$2
|
||||
op=$3
|
||||
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
ensureRoleFileExists
|
||||
|
||||
filename="$socRolesFile"
|
||||
hasRole=0
|
||||
grep "$role:" "$socRolesFile" | grep -q "$identityId" && hasRole=1
|
||||
if [[ "$op" == "add" ]]; then
|
||||
if [[ "$hasRole" == "1" ]]; then
|
||||
echo "User '$email' already has the role: $role"
|
||||
return 1
|
||||
else
|
||||
echo "$role:$identityId" >> "$filename"
|
||||
fi
|
||||
elif [[ "$op" == "del" ]]; then
|
||||
if [[ "$hasRole" -ne 1 ]]; then
|
||||
fail "User '$email' does not have the role: $role"
|
||||
else
|
||||
sed "/^$role:$identityId\$/d" "$filename" > "$filename.tmp"
|
||||
cat "$filename".tmp > "$filename"
|
||||
rm -f "$filename".tmp
|
||||
fi
|
||||
else
|
||||
fail "Unsupported role adjustment operation: $op"
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
function createUser() {
|
||||
email=$1
|
||||
role=$2
|
||||
|
||||
now=$(date -u +%FT%TZ)
|
||||
addUserJson=$(cat <<EOF
|
||||
@@ -270,16 +379,30 @@ EOF
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities -d "$addUserJson")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
identityId=$(echo "${response}" | jq ".id")
|
||||
if [[ ${identityId} == "null" ]]; then
|
||||
identityId=$(echo "${response}" | jq -r ".id")
|
||||
if [[ "${identityId}" == "null" ]]; then
|
||||
code=$(echo "${response}" | jq ".error.code")
|
||||
[[ "${code}" == "409" ]] && fail "User already exists"
|
||||
|
||||
reason=$(echo "${response}" | jq ".error.message")
|
||||
[[ $? == 0 ]] && fail "Unable to add user: ${reason}"
|
||||
else
|
||||
updatePassword "$identityId"
|
||||
addUserRole "$email" "$role"
|
||||
fi
|
||||
}
|
||||
|
||||
updatePassword $identityId
|
||||
function migrateLockedUsers() {
|
||||
# This is a migration function to convert locked users from prior to 2.3.90
|
||||
# to inactive users using the newer Kratos functionality. This should only
|
||||
# find locked users once.
|
||||
lockedEmails=$(curl -s http://localhost:4434/identities | jq -r '.[] | select(.traits.status == "locked") | .traits.email')
|
||||
if [[ -n "$lockedEmails" ]]; then
|
||||
echo "Disabling locked users..."
|
||||
for email in $lockedEmails; do
|
||||
updateStatus "$email" locked
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
function updateStatus() {
|
||||
@@ -292,24 +415,18 @@ function updateStatus() {
|
||||
response=$(curl -Ss -L "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
|
||||
schemaId=$(echo "$response" | jq -r .schema_id)
|
||||
|
||||
# Capture traits and remove obsolete 'status' trait if exists
|
||||
traitBlock=$(echo "$response" | jq -c .traits | sed -re 's/,?"status":".*?"//')
|
||||
|
||||
state="active"
|
||||
if [[ "$status" == "locked" ]]; then
|
||||
config=$(echo $oldConfig | sed -e 's/hashed/locked/')
|
||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to lock credential record"
|
||||
|
||||
echo "delete from sessions where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to invalidate sessions"
|
||||
else
|
||||
config=$(echo $oldConfig | sed -e 's/locked/hashed/')
|
||||
echo "update identity_credentials set config=CAST('${config}' as BLOB) where identity_id=${identityId};" | sqlite3 "$databasePath"
|
||||
[[ $? != 0 ]] && fail "Unable to unlock credential record"
|
||||
state="inactive"
|
||||
fi
|
||||
|
||||
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
|
||||
response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson")
|
||||
[[ $? != 0 ]] && fail "Unable to mark user as locked"
|
||||
|
||||
body="{ \"schema_id\": \"$schemaId\", \"state\": \"$state\", \"traits\": $traitBlock }"
|
||||
response=$(curl -fSsL -XPUT "${kratosUrl}/identities/$identityId" -d "$body")
|
||||
[[ $? != 0 ]] && fail "Unable to update user"
|
||||
}
|
||||
|
||||
function updateUser() {
|
||||
@@ -318,7 +435,7 @@ function updateUser() {
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
updatePassword $identityId
|
||||
updatePassword "$identityId"
|
||||
}
|
||||
|
||||
function deleteUser() {
|
||||
@@ -329,6 +446,11 @@ function deleteUser() {
|
||||
|
||||
response=$(curl -Ss -XDELETE -L "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
rolesTmpFile="${socRolesFile}.tmp"
|
||||
createFile "$rolesTmpFile" "$soUID" "$soGID"
|
||||
grep -v "$identityId" "$socRolesFile" > "$rolesTmpFile"
|
||||
mv "$rolesTmpFile" "$socRolesFile"
|
||||
}
|
||||
|
||||
case "${operation}" in
|
||||
@@ -339,7 +461,7 @@ case "${operation}" in
|
||||
lock
|
||||
validateEmail "$email"
|
||||
updatePassword
|
||||
createUser "$email"
|
||||
createUser "$email" "${role:-$DEFAULT_ROLE}"
|
||||
syncAll
|
||||
echo "Successfully added new user to SOC"
|
||||
check_container thehive && echo "$password" | so-thehive-user-add "$email"
|
||||
@@ -351,6 +473,31 @@ case "${operation}" in
|
||||
listUsers
|
||||
;;
|
||||
|
||||
"addrole")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
[[ "$role" == "" ]] && fail "Role must be provided"
|
||||
|
||||
lock
|
||||
validateEmail "$email"
|
||||
if addUserRole "$email" "$role"; then
|
||||
syncElastic
|
||||
echo "Successfully added role to user"
|
||||
fi
|
||||
;;
|
||||
|
||||
"delrole")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
[[ "$role" == "" ]] && fail "Role must be provided"
|
||||
|
||||
lock
|
||||
validateEmail "$email"
|
||||
deleteUserRole "$email" "$role"
|
||||
syncElastic
|
||||
echo "Successfully removed role from user"
|
||||
;;
|
||||
|
||||
"update")
|
||||
verifyEnvironment
|
||||
[[ "$email" == "" ]] && fail "Email address must be provided"
|
||||
@@ -370,7 +517,7 @@ case "${operation}" in
|
||||
syncAll
|
||||
echo "Successfully enabled user"
|
||||
check_container thehive && so-thehive-user-enable "$email" true
|
||||
check_container fleet && so-fleet-user-enable "$email" true
|
||||
echo "Fleet user will need to be recreated manually with so-fleet-user-add"
|
||||
;;
|
||||
|
||||
"disable")
|
||||
@@ -382,7 +529,7 @@ case "${operation}" in
|
||||
syncAll
|
||||
echo "Successfully disabled user"
|
||||
check_container thehive && so-thehive-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-delete "$email"
|
||||
;;
|
||||
|
||||
"delete")
|
||||
@@ -394,7 +541,7 @@ case "${operation}" in
|
||||
syncAll
|
||||
echo "Successfully deleted user"
|
||||
check_container thehive && so-thehive-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-enable "$email" false
|
||||
check_container fleet && so-fleet-user-delete "$email"
|
||||
;;
|
||||
|
||||
"sync")
|
||||
@@ -418,6 +565,11 @@ case "${operation}" in
|
||||
echo "Password is acceptable"
|
||||
;;
|
||||
|
||||
"migrate")
|
||||
migrateLockedUsers
|
||||
echo "User migration complete"
|
||||
;;
|
||||
|
||||
*)
|
||||
fail "Unsupported operation: $operation"
|
||||
;;
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020,2021 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
@@ -20,13 +19,8 @@ echo "Starting to check for yara rule updates at $(date)..."
|
||||
|
||||
output_dir="/opt/so/saltstack/default/salt/strelka/rules"
|
||||
mkdir -p $output_dir
|
||||
|
||||
repos="$output_dir/repos.txt"
|
||||
ignorefile="$output_dir/ignore.txt"
|
||||
|
||||
deletecounter=0
|
||||
newcounter=0
|
||||
updatecounter=0
|
||||
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
|
||||
@@ -35,58 +29,21 @@ echo "Airgap mode enabled."
|
||||
clone_dir="/nsm/repo/rules/strelka"
|
||||
repo_name="signature-base"
|
||||
mkdir -p /opt/so/saltstack/default/salt/strelka/rules/signature-base
|
||||
|
||||
# Ensure a copy of the license is available for the rules
|
||||
[ -f $clone_dir/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
|
||||
|
||||
# Copy over rules
|
||||
for i in $(find $clone_dir/yara -name "*.yar*"); do
|
||||
rule_name=$(echo $i | awk -F '/' '{print $NF}')
|
||||
repo_sum=$(sha256sum $i | awk '{print $1}')
|
||||
|
||||
# Check rules against those in ignore list -- don't copy if ignored.
|
||||
if ! grep -iq $rule_name $ignorefile; then
|
||||
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
|
||||
|
||||
# For existing rules, check to see if they need to be updated, by comparing checksums
|
||||
if [ $existing_rules -gt 0 ];then
|
||||
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
|
||||
if [ "$repo_sum" != "$local_sum" ]; then
|
||||
echo "Checksums do not match!"
|
||||
echo "Updating $rule_name..."
|
||||
cp $i $output_dir/$repo_name;
|
||||
((updatecounter++))
|
||||
fi
|
||||
else
|
||||
# If rule doesn't exist already, we'll add it
|
||||
echo "Adding new rule: $rule_name..."
|
||||
echo "Adding rule: $rule_name..."
|
||||
cp $i $output_dir/$repo_name
|
||||
((newcounter++))
|
||||
fi
|
||||
fi;
|
||||
done
|
||||
|
||||
# Check to see if we have any old rules that need to be removed
|
||||
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
|
||||
is_repo_rule=$(find $clone_dir -name "$i" | wc -l)
|
||||
if [ $is_repo_rule -eq 0 ]; then
|
||||
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
|
||||
rm $output_dir/$repo_name/$i
|
||||
((deletecounter++))
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Done!"
|
||||
|
||||
if [ "$newcounter" -gt 0 ];then
|
||||
echo "$newcounter new rules added."
|
||||
fi
|
||||
|
||||
if [ "$updatecounter" -gt 0 ];then
|
||||
echo "$updatecounter rules updated."
|
||||
fi
|
||||
|
||||
if [ "$deletecounter" -gt 0 ];then
|
||||
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
|
||||
echo "$newcounter rules added."
|
||||
fi
|
||||
|
||||
{% else %}
|
||||
@@ -99,50 +56,21 @@ if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
|
||||
if ! $(echo "$repo" | grep -qE '^#'); then
|
||||
# Remove old repo if existing bc of previous error condition or unexpected disruption
|
||||
repo_name=`echo $repo | awk -F '/' '{print $NF}'`
|
||||
[ -d $repo_name ] && rm -rf $repo_name
|
||||
[ -d $output_dir/$repo_name ] && rm -rf $output_dir/$repo_name
|
||||
|
||||
# Clone repo and make appropriate directories for rules
|
||||
|
||||
git clone $repo $clone_dir/$repo_name
|
||||
echo "Analyzing rules from $clone_dir/$repo_name..."
|
||||
mkdir -p $output_dir/$repo_name
|
||||
# Ensure a copy of the license is available for the rules
|
||||
[ -f $clone_dir/$repo_name/LICENSE ] && cp $clone_dir/$repo_name/LICENSE $output_dir/$repo_name
|
||||
|
||||
# Copy over rules
|
||||
for i in $(find $clone_dir/$repo_name -name "*.yar*"); do
|
||||
rule_name=$(echo $i | awk -F '/' '{print $NF}')
|
||||
repo_sum=$(sha256sum $i | awk '{print $1}')
|
||||
|
||||
# Check rules against those in ignore list -- don't copy if ignored.
|
||||
if ! grep -iq $rule_name $ignorefile; then
|
||||
existing_rules=$(find $output_dir/$repo_name/ -name $rule_name | wc -l)
|
||||
|
||||
# For existing rules, check to see if they need to be updated, by comparing checksums
|
||||
if [ $existing_rules -gt 0 ];then
|
||||
local_sum=$(sha256sum $output_dir/$repo_name/$rule_name | awk '{print $1}')
|
||||
if [ "$repo_sum" != "$local_sum" ]; then
|
||||
echo "Checksums do not match!"
|
||||
echo "Updating $rule_name..."
|
||||
cp $i $output_dir/$repo_name;
|
||||
((updatecounter++))
|
||||
fi
|
||||
else
|
||||
# If rule doesn't exist already, we'll add it
|
||||
echo "Adding new rule: $rule_name..."
|
||||
echo "Adding rule: $rule_name..."
|
||||
cp $i $output_dir/$repo_name
|
||||
((newcounter++))
|
||||
fi
|
||||
fi;
|
||||
done
|
||||
|
||||
# Check to see if we have any old rules that need to be removed
|
||||
for i in $(find $output_dir/$repo_name -name "*.yar*" | awk -F '/' '{print $NF}'); do
|
||||
is_repo_rule=$(find $clone_dir/$repo_name -name "$i" | wc -l)
|
||||
if [ $is_repo_rule -eq 0 ]; then
|
||||
echo "Could not find $i in source $repo_name repo...removing from $output_dir/$repo_name..."
|
||||
rm $output_dir/$repo_name/$i
|
||||
((deletecounter++))
|
||||
fi
|
||||
done
|
||||
rm -rf $clone_dir/$repo_name
|
||||
fi
|
||||
@@ -151,15 +79,7 @@ if [ "$gh_status" == "200" ] || [ "$gh_status" == "301" ]; then
|
||||
echo "Done!"
|
||||
|
||||
if [ "$newcounter" -gt 0 ];then
|
||||
echo "$newcounter new rules added."
|
||||
fi
|
||||
|
||||
if [ "$updatecounter" -gt 0 ];then
|
||||
echo "$updatecounter rules updated."
|
||||
fi
|
||||
|
||||
if [ "$deletecounter" -gt 0 ];then
|
||||
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
|
||||
echo "$newcounter rules added."
|
||||
fi
|
||||
|
||||
else
|
||||
|
||||
@@ -27,6 +27,7 @@ SOUP_LOG=/root/soup.log
|
||||
INFLUXDB_MIGRATION_LOG=/opt/so/log/influxdb/soup_migration.log
|
||||
WHATWOULDYOUSAYYAHDOHERE=soup
|
||||
whiptail_title='Security Onion UPdater'
|
||||
NOTIFYCUSTOMELASTICCONFIG=false
|
||||
|
||||
check_err() {
|
||||
local exit_code=$1
|
||||
@@ -105,9 +106,11 @@ add_common() {
|
||||
|
||||
airgap_mounted() {
|
||||
# Let's see if the ISO is already mounted.
|
||||
if [ -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||||
if [[ -f /tmp/soagupdate/SecurityOnion/VERSION ]]; then
|
||||
echo "The ISO is already mounted"
|
||||
else
|
||||
if [[ -z $ISOLOC ]]; then
|
||||
echo "This is airgap. Ask for a location."
|
||||
echo ""
|
||||
cat << EOF
|
||||
In order for soup to proceed, the path to the downloaded Security Onion ISO file, or the path to the CD-ROM or equivalent device containing the ISO media must be provided.
|
||||
@@ -116,6 +119,7 @@ Or, if you have burned the new ISO onto an optical disk then the path might look
|
||||
|
||||
EOF
|
||||
read -rp 'Enter the path to the new Security Onion ISO content: ' ISOLOC
|
||||
fi
|
||||
if [[ -f $ISOLOC ]]; then
|
||||
# Mounting the ISO image
|
||||
mkdir -p /tmp/soagupdate
|
||||
@@ -131,7 +135,7 @@ EOF
|
||||
elif [[ -f $ISOLOC/SecurityOnion/VERSION ]]; then
|
||||
ln -s $ISOLOC /tmp/soagupdate
|
||||
echo "Found the update content"
|
||||
else
|
||||
elif [[ -b $ISOLOC ]]; then
|
||||
mkdir -p /tmp/soagupdate
|
||||
mount $ISOLOC /tmp/soagupdate
|
||||
if [ ! -f /tmp/soagupdate/SecurityOnion/VERSION ]; then
|
||||
@@ -141,6 +145,10 @@ EOF
|
||||
else
|
||||
echo "Device has been mounted!"
|
||||
fi
|
||||
else
|
||||
echo "Could not find Security Onion ISO content at ${ISOLOC}"
|
||||
echo "Ensure the path you entered is correct, and that you verify the ISO that you downloaded."
|
||||
exit 0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
@@ -150,7 +158,7 @@ airgap_update_dockers() {
|
||||
# Let's copy the tarball
|
||||
if [[ ! -f $AGDOCKER/registry.tar ]]; then
|
||||
echo "Unable to locate registry. Exiting"
|
||||
exit 1
|
||||
exit 0
|
||||
else
|
||||
echo "Stopping the registry docker"
|
||||
docker stop so-dockerregistry
|
||||
@@ -182,6 +190,50 @@ check_airgap() {
|
||||
fi
|
||||
}
|
||||
|
||||
# {% raw %}
|
||||
|
||||
check_local_mods() {
|
||||
local salt_local=/opt/so/saltstack/local
|
||||
|
||||
local_mod_arr=()
|
||||
|
||||
while IFS= read -r -d '' local_file; do
|
||||
stripped_path=${local_file#"$salt_local"}
|
||||
default_file="${DEFAULT_SALT_DIR}${stripped_path}"
|
||||
if [[ -f $default_file ]]; then
|
||||
file_diff=$(diff "$default_file" "$local_file" )
|
||||
if [[ $(echo "$file_diff" | grep -c "^<") -gt 0 ]]; then
|
||||
local_mod_arr+=( "$local_file" )
|
||||
fi
|
||||
fi
|
||||
done< <(find $salt_local -type f -print0)
|
||||
|
||||
if [[ ${#local_mod_arr} -gt 0 ]]; then
|
||||
echo "Potentially breaking changes found in the following files (check ${DEFAULT_SALT_DIR} for original copy):"
|
||||
for file_str in "${local_mod_arr[@]}"; do
|
||||
echo " $file_str"
|
||||
done
|
||||
echo ""
|
||||
echo "To reference this list later, check $SOUP_LOG"
|
||||
sleep 10
|
||||
fi
|
||||
}
|
||||
|
||||
# {% endraw %}
|
||||
|
||||
check_pillar_items() {
|
||||
local pillar_output=$(salt-call pillar.items --out=json)
|
||||
|
||||
cond=$(jq '.local | has("_errors")' <<< "$pillar_output")
|
||||
if [[ "$cond" == "true" ]]; then
|
||||
printf "\nThere is an issue rendering the manager's pillars. Please correct the issues in the sls files mentioned below before running SOUP again.\n\n"
|
||||
jq '.local._errors[]' <<< "$pillar_output"
|
||||
exit 0
|
||||
else
|
||||
printf "\nThe manager's pillars can be rendered. We can proceed with SOUP.\n\n"
|
||||
fi
|
||||
}
|
||||
|
||||
check_sudoers() {
|
||||
if grep -q "so-setup" /etc/sudoers; then
|
||||
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
||||
@@ -251,25 +303,31 @@ check_os_updates() {
|
||||
OSUPDATES=$(yum -q list updates | wc -l)
|
||||
fi
|
||||
if [[ "$OSUPDATES" -gt 0 ]]; then
|
||||
echo $NEEDUPDATES
|
||||
if [[ -z $UNATTENDED ]]; then
|
||||
echo "$NEEDUPDATES"
|
||||
echo ""
|
||||
read -p "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
|
||||
|
||||
read -rp "Press U to update OS packages (recommended), C to continue without updates, or E to exit: " confirm
|
||||
if [[ "$confirm" == [cC] ]]; then
|
||||
echo "Continuing without updating packages"
|
||||
elif [[ "$confirm" == [uU] ]]; then
|
||||
echo "Applying Grid Updates"
|
||||
set +e
|
||||
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
|
||||
set -e
|
||||
update_flag=true
|
||||
else
|
||||
echo "Exiting soup"
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
update_flag=true
|
||||
fi
|
||||
else
|
||||
echo "Looks like you have an updated OS"
|
||||
fi
|
||||
|
||||
if [[ $update_flag == true ]]; then
|
||||
set +e
|
||||
run_check_net_err "salt '*' -b 5 state.apply patch.os queue=True" 'Could not apply OS updates, please check your network connection.'
|
||||
set -e
|
||||
fi
|
||||
}
|
||||
|
||||
clean_dockers() {
|
||||
@@ -335,12 +393,11 @@ preupgrade_changes() {
|
||||
# This function is to add any new pillar items if needed.
|
||||
echo "Checking to see if changes are needed."
|
||||
|
||||
[[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2
|
||||
[[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3
|
||||
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
|
||||
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_2.3.0_to_2.3.20
|
||||
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_2.3.2X_to_2.3.30
|
||||
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_2.3.3X_to_2.3.50
|
||||
[[ "$INSTALLEDVERSION" == 2.3.0 || "$INSTALLEDVERSION" == 2.3.1 || "$INSTALLEDVERSION" == 2.3.2 || "$INSTALLEDVERSION" == 2.3.10 ]] && up_to_2.3.20
|
||||
[[ "$INSTALLEDVERSION" == 2.3.20 || "$INSTALLEDVERSION" == 2.3.21 ]] && up_to_2.3.30
|
||||
[[ "$INSTALLEDVERSION" == 2.3.30 || "$INSTALLEDVERSION" == 2.3.40 ]] && up_to_2.3.50
|
||||
[[ "$INSTALLEDVERSION" == 2.3.50 || "$INSTALLEDVERSION" == 2.3.51 || "$INSTALLEDVERSION" == 2.3.52 || "$INSTALLEDVERSION" == 2.3.60 || "$INSTALLEDVERSION" == 2.3.61 || "$INSTALLEDVERSION" == 2.3.70 ]] && up_to_2.3.80
|
||||
[[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90
|
||||
true
|
||||
}
|
||||
|
||||
@@ -348,119 +405,66 @@ postupgrade_changes() {
|
||||
# This function is to add any new pillar items if needed.
|
||||
echo "Running post upgrade processes."
|
||||
|
||||
[[ "$POSTVERSION" =~ rc.1 ]] && post_rc1_to_rc2
|
||||
[[ "$POSTVERSION" == 2.3.20 || "$POSTVERSION" == 2.3.21 ]] && post_2.3.2X_to_2.3.30
|
||||
[[ "$POSTVERSION" == 2.3.30 ]] && post_2.3.30_to_2.3.40
|
||||
[[ "$POSTVERSION" == 2.3.50 ]] && post_2.3.5X_to_2.3.60
|
||||
[[ "$POSTVERSION" == 2.3.0 || "$POSTVERSION" == 2.3.1 || "$POSTVERSION" == 2.3.2 || "$POSTVERSION" == 2.3.10 || "$POSTVERSION" == 2.3.20 ]] && post_to_2.3.21
|
||||
[[ "$POSTVERSION" == 2.3.21 || "$POSTVERSION" == 2.3.30 ]] && post_to_2.3.40
|
||||
[[ "$POSTVERSION" == 2.3.40 || "$POSTVERSION" == 2.3.50 || "$POSTVERSION" == 2.3.51 || "$POSTVERSION" == 2.3.52 ]] && post_to_2.3.60
|
||||
[[ "$POSTVERSION" == 2.3.60 || "$POSTVERSION" == 2.3.61 || "$POSTVERSION" == 2.3.70 || "$POSTVERSION" == 2.3.80 ]] && post_to_2.3.90
|
||||
true
|
||||
}
|
||||
|
||||
post_rc1_to_2.3.21() {
|
||||
post_to_2.3.21() {
|
||||
salt-call state.apply playbook.OLD_db_init
|
||||
rm -f /opt/so/rules/elastalert/playbook/*.yaml
|
||||
so-playbook-ruleupdate >> /root/soup_playbook_rule_update.log 2>&1 &
|
||||
POSTVERSION=2.3.21
|
||||
}
|
||||
|
||||
post_2.3.2X_to_2.3.30() {
|
||||
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
|
||||
POSTVERSION=2.3.30
|
||||
}
|
||||
|
||||
post_2.3.30_to_2.3.40() {
|
||||
post_to_2.3.40() {
|
||||
so-playbook-sigma-refresh >> /root/soup_playbook_sigma_refresh.log 2>&1 &
|
||||
so-kibana-space-defaults
|
||||
POSTVERSION=2.3.40
|
||||
}
|
||||
|
||||
post_2.3.5X_to_2.3.60() {
|
||||
post_to_2.3.60() {
|
||||
for table in identity_recovery_addresses selfservice_recovery_flows selfservice_registration_flows selfservice_verification_flows identities identity_verification_tokens identity_credentials selfservice_settings_flows identity_recovery_tokens continuity_containers identity_credential_identifiers identity_verifiable_addresses courier_messages selfservice_errors sessions selfservice_login_flows
|
||||
do
|
||||
echo "Forcing Kratos network migration: $table"
|
||||
sqlite3 /opt/so/conf/kratos/db/db.sqlite "update $table set nid=(select id from networks limit 1);"
|
||||
done
|
||||
|
||||
POSTVERSION=2.3.60
|
||||
}
|
||||
|
||||
post_to_2.3.90() {
|
||||
# Do Kibana dashboard things
|
||||
salt-call state.apply kibana.so_savedobjects_defaults queue=True
|
||||
|
||||
rc1_to_rc2() {
|
||||
# Create FleetDM service account
|
||||
FLEET_MANAGER=$(lookup_pillar fleet_manager)
|
||||
if [[ "$FLEET_MANAGER" == "True" ]]; then
|
||||
FLEET_SA_EMAIL=$(lookup_pillar_secret fleet_sa_email)
|
||||
FLEET_SA_PW=$(lookup_pillar_secret fleet_sa_password)
|
||||
MYSQL_PW=$(lookup_pillar_secret mysql)
|
||||
|
||||
# Move the static file to global.sls
|
||||
echo "Migrating static.sls to global.sls"
|
||||
mv -v /opt/so/saltstack/local/pillar/static.sls /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1
|
||||
sed -i '1c\global:' /opt/so/saltstack/local/pillar/global.sls >> "$SOUP_LOG" 2>&1
|
||||
FLEET_HASH=$(docker exec so-soctopus python -c "import bcrypt; print(bcrypt.hashpw('$FLEET_SA_PW'.encode('utf-8'), bcrypt.gensalt()).decode('utf-8'));" 2>&1)
|
||||
MYSQL_OUTPUT=$(docker exec so-mysql mysql -u root --password=$MYSQL_PW fleet -e \
|
||||
"INSERT INTO users (password,salt,email,name,global_role) VALUES ('$FLEET_HASH','','$FLEET_SA_EMAIL','$FLEET_SA_EMAIL','admin')" 2>&1)
|
||||
|
||||
# Moving baseurl from minion sls file to inside global.sls
|
||||
local line=$(grep '^ url_base:' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls)
|
||||
sed -i '/^ url_base:/d' /opt/so/saltstack/local/pillar/minions/$MINIONID.sls;
|
||||
sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls;
|
||||
|
||||
# Adding play values to the global.sls
|
||||
local HIVEPLAYSECRET=$(get_random_value)
|
||||
local CORTEXPLAYSECRET=$(get_random_value)
|
||||
sed -i "/^global:/a \\ hiveplaysecret: $HIVEPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
|
||||
sed -i "/^global:/a \\ cortexplaysecret: $CORTEXPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
|
||||
|
||||
# Move storage nodes to hostname for SSL
|
||||
# Get a list we can use:
|
||||
grep -A1 searchnode /opt/so/saltstack/local/pillar/data/nodestab.sls | grep -v '\-\-' | sed '$!N;s/\n/ /' | awk '{print $1,$3}' | awk '/_searchnode:/{gsub(/\_searchnode:/, "_searchnode"); print}' >/tmp/nodes.txt
|
||||
# Remove the nodes from cluster settings
|
||||
while read p; do
|
||||
local NAME=$(echo $p | awk '{print $1}')
|
||||
local IP=$(echo $p | awk '{print $2}')
|
||||
echo "Removing the old cross cluster config for $NAME"
|
||||
curl -XPUT -H 'Content-Type: application/json' http://localhost:9200/_cluster/settings -d '{"persistent":{"cluster":{"remote":{"'$NAME'":{"skip_unavailable":null,"seeds":null}}}}}'
|
||||
done </tmp/nodes.txt
|
||||
# Add the nodes back using hostname
|
||||
while read p; do
|
||||
local NAME=$(echo $p | awk '{print $1}')
|
||||
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
|
||||
local IP=$(echo $p | awk '{print $2}')
|
||||
echo "Adding the new cross cluster config for $NAME"
|
||||
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
|
||||
done </tmp/nodes.txt
|
||||
|
||||
INSTALLEDVERSION=rc.2
|
||||
}
|
||||
|
||||
rc2_to_rc3() {
|
||||
|
||||
# move location of local.rules
|
||||
cp /opt/so/saltstack/default/salt/idstools/localrules/local.rules /opt/so/saltstack/local/salt/idstools/local.rules
|
||||
|
||||
if [ -f /opt/so/saltstack/local/salt/idstools/localrules/local.rules ]; then
|
||||
cat /opt/so/saltstack/local/salt/idstools/localrules/local.rules >> /opt/so/saltstack/local/salt/idstools/local.rules
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully added service account to Fleet"
|
||||
else
|
||||
echo "Unable to add service account to Fleet"
|
||||
echo "$MYSQL_OUTPUT"
|
||||
fi
|
||||
rm -rf /opt/so/saltstack/local/salt/idstools/localrules
|
||||
rm -rf /opt/so/saltstack/default/salt/idstools/localrules
|
||||
|
||||
# Rename mdengine to MDENGINE
|
||||
sed -i "s/ zeekversion/ mdengine/g" /opt/so/saltstack/local/pillar/global.sls
|
||||
# Enable Strelka Rules
|
||||
sed -i "/ rules:/c\ rules: 1" /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
INSTALLEDVERSION=rc.3
|
||||
|
||||
}
|
||||
|
||||
rc3_to_2.3.0() {
|
||||
# Fix Tab Complete
|
||||
if [ ! -f /etc/profile.d/securityonion.sh ]; then
|
||||
echo "complete -cf sudo" > /etc/profile.d/securityonion.sh
|
||||
fi
|
||||
|
||||
{
|
||||
echo "redis_settings:"
|
||||
echo " redis_maxmemory: 827"
|
||||
echo "playbook:"
|
||||
echo " api_key: de6639318502476f2fa5aa06f43f51fb389a3d7f"
|
||||
} >> /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
sed -i 's/playbook:/playbook_db:/' /opt/so/saltstack/local/pillar/secrets.sls
|
||||
{
|
||||
echo "playbook_admin: $(get_random_value)"
|
||||
echo "playbook_automation: $(get_random_value)"
|
||||
} >> /opt/so/saltstack/local/pillar/secrets.sls
|
||||
|
||||
INSTALLEDVERSION=2.3.0
|
||||
POSTVERSION=2.3.90
|
||||
}
|
||||
|
||||
up_2.3.0_to_2.3.20(){
|
||||
|
||||
up_to_2.3.20(){
|
||||
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
|
||||
# Remove PCAP from global
|
||||
sed '/pcap:/d' /opt/so/saltstack/local/pillar/global.sls
|
||||
@@ -498,7 +502,7 @@ up_2.3.0_to_2.3.20(){
|
||||
INSTALLEDVERSION=2.3.20
|
||||
}
|
||||
|
||||
up_2.3.2X_to_2.3.30() {
|
||||
up_to_2.3.30() {
|
||||
# Replace any curly brace scalars with the same scalar in single quotes
|
||||
readarray -t minion_pillars <<< "$(find /opt/so/saltstack/local/pillar/minions -type f -name '*.sls')"
|
||||
for pillar in "${minion_pillars[@]}"; do
|
||||
@@ -521,32 +525,7 @@ up_2.3.2X_to_2.3.30() {
|
||||
INSTALLEDVERSION=2.3.30
|
||||
}
|
||||
|
||||
upgrade_to_2.3.50_repo() {
|
||||
echo "Performing repo changes."
|
||||
if [[ "$OS" == "centos" ]]; then
|
||||
# Import GPG Keys
|
||||
gpg_rpm_import
|
||||
echo "Disabling fastestmirror."
|
||||
disable_fastestmirror
|
||||
echo "Deleting unneeded repo files."
|
||||
DELREPOS=('CentOS-Base' 'CentOS-CR' 'CentOS-Debuginfo' 'docker-ce' 'CentOS-fasttrack' 'CentOS-Media' 'CentOS-Sources' 'CentOS-Vault' 'CentOS-x86_64-kernel' 'epel' 'epel-testing' 'saltstack' 'wazuh')
|
||||
|
||||
for DELREPO in "${DELREPOS[@]}"; do
|
||||
if [[ -f "/etc/yum.repos.d/$DELREPO.repo" ]]; then
|
||||
echo "Deleting $DELREPO.repo"
|
||||
rm -f "/etc/yum.repos.d/$DELREPO.repo"
|
||||
fi
|
||||
done
|
||||
if [[ $is_airgap -eq 1 ]]; then
|
||||
# Copy the new repo file if not airgap
|
||||
cp $UPDATE_DIR/salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/
|
||||
yum clean all
|
||||
yum repolist
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
up_2.3.3X_to_2.3.50() {
|
||||
up_to_2.3.50() {
|
||||
|
||||
cat <<EOF > /tmp/supersed.txt
|
||||
/so-zeek:/ {
|
||||
@@ -578,6 +557,91 @@ EOF
|
||||
INSTALLEDVERSION=2.3.50
|
||||
}
|
||||
|
||||
up_to_2.3.80() {
|
||||
|
||||
# Remove watermark settings from global.sls
|
||||
sed -i '/ cluster_routing_allocation_disk/d' /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
# Add new indices to the global
|
||||
sed -i '/ index_settings:/a \\ so-elasticsearch: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ index_settings:/a \\ so-logstash: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ index_settings:/a \\ so-kibana: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ index_settings:/a \\ so-redis: \n shards: 1 \n warm: 7 \n close: 30 \n delete: 365' /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
# Do some pillar formatting
|
||||
tc=$(grep -w true_cluster /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print tolower($2)'}| xargs)
|
||||
|
||||
if [[ "$tc" == "true" ]]; then
|
||||
tcname=$(grep -w true_cluster_name /opt/so/saltstack/local/pillar/global.sls | awk -F: {'print $2'})
|
||||
sed -i "/^elasticsearch:/a \\ config: \n cluster: \n name: $tcname" /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ true_cluster_name/d' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed -i '/ esclustername/d' /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
for file in /opt/so/saltstack/local/pillar/minions/*.sls; do
|
||||
if [[ ${file} != *"manager.sls"* ]]; then
|
||||
noderoutetype=$(grep -w node_route_type $file | awk -F: {'print $2'})
|
||||
if [ -n "$noderoutetype" ]; then
|
||||
sed -i "/^elasticsearch:/a \\ config: \n node: \n attr: \n box_type: $noderoutetype" $file
|
||||
sed -i '/ node_route_type/d' $file
|
||||
noderoutetype=''
|
||||
fi
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# check for local es config to inform user that the config in local is now ignored and those options need to be placed in the pillar
|
||||
if [ -f "/opt/so/saltstack/local/salt/elasticsearch/files/elasticsearch.yml" ]; then
|
||||
NOTIFYCUSTOMELASTICCONFIG=true
|
||||
fi
|
||||
|
||||
INSTALLEDVERSION=2.3.80
|
||||
}
|
||||
|
||||
up_to_2.3.90() {
|
||||
for i in manager managersearch eval standalone; do
|
||||
if compgen -G "/opt/so/saltstack/local/pillar/minions/*_$i.sls" > /dev/null; then
|
||||
echo "soc:" >> /opt/so/saltstack/local/pillar/minions/*_$i.sls
|
||||
sed -i "/^soc:/a \\ es_index_patterns: '*:so-*,*:endgame-*'" /opt/so/saltstack/local/pillar/minions/*_$i.sls
|
||||
fi
|
||||
done
|
||||
|
||||
# Create Endgame Hostgroup
|
||||
so-firewall addhostgroup endgame
|
||||
|
||||
# Force influx to generate a new cert
|
||||
mv /etc/pki/influxdb.crt /etc/pki/influxdb.crt.2390upgrade
|
||||
mv /etc/pki/influxdb.key /etc/pki/influxdb.key.2390upgrade
|
||||
|
||||
# remove old common ingest pipeline in default
|
||||
rm -vf /opt/so/saltstack/default/salt/elasticsearch/files/ingest/common
|
||||
# if custom common, move from local ingest to local ingest-dynamic
|
||||
mkdir -vp /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic
|
||||
if [[ -f "/opt/so/saltstack/local/salt/elasticsearch/files/ingest/common" ]]; then
|
||||
mv -v /opt/so/saltstack/local/salt/elasticsearch/files/ingest/common /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
|
||||
# since json file, we need to wrap with raw
|
||||
sed -i '1s/^/{{'{% raw %}'}}\n/' /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
|
||||
sed -i -e '$a{{'{% endraw %}'}}\n' /opt/so/saltstack/local/salt/elasticsearch/files/ingest-dynamic/common
|
||||
fi
|
||||
|
||||
# Generate FleetDM Service Account creds if they do not exist
|
||||
if grep -q "fleet_sa_email" /opt/so/saltstack/local/pillar/secrets.sls; then
|
||||
echo "FleetDM Service Account credentials already created..."
|
||||
else
|
||||
echo "Generating FleetDM Service Account credentials..."
|
||||
FLEETSAPASS=$(get_random_value)
|
||||
printf '%s\n'\
|
||||
" fleet_sa_email: service.account@securityonion.invalid"\
|
||||
" fleet_sa_password: $FLEETSAPASS"\
|
||||
>> /opt/so/saltstack/local/pillar/secrets.sls
|
||||
|
||||
fi
|
||||
|
||||
sed -i -re 's/^(playbook_admin.*|playbook_automation.*)/ \1/g' /opt/so/saltstack/local/pillar/secrets.sls
|
||||
|
||||
INSTALLEDVERSION=2.3.90
|
||||
}
|
||||
|
||||
|
||||
verify_upgradespace() {
|
||||
CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//')
|
||||
if [ "$CURRENTSPACE" -lt "10" ]; then
|
||||
@@ -593,7 +657,7 @@ upgrade_space() {
|
||||
clean_dockers
|
||||
if ! verify_upgradespace; then
|
||||
echo "There is not enough space to perform the upgrade. Please free up space and try again"
|
||||
exit 1
|
||||
exit 0
|
||||
fi
|
||||
else
|
||||
echo "You have enough space for upgrade. Proceeding with soup."
|
||||
@@ -618,8 +682,8 @@ thehive_maint() {
|
||||
done
|
||||
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
||||
echo "Migrating thehive databases if needed."
|
||||
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate"
|
||||
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate"
|
||||
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1
|
||||
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -719,17 +783,48 @@ upgrade_salt() {
|
||||
fi
|
||||
}
|
||||
|
||||
upgrade_to_2.3.50_repo() {
|
||||
echo "Performing repo changes."
|
||||
if [[ "$OS" == "centos" ]]; then
|
||||
# Import GPG Keys
|
||||
gpg_rpm_import
|
||||
echo "Disabling fastestmirror."
|
||||
disable_fastestmirror
|
||||
echo "Deleting unneeded repo files."
|
||||
DELREPOS=('CentOS-Base' 'CentOS-CR' 'CentOS-Debuginfo' 'docker-ce' 'CentOS-fasttrack' 'CentOS-Media' 'CentOS-Sources' 'CentOS-Vault' 'CentOS-x86_64-kernel' 'epel' 'epel-testing' 'saltstack' 'wazuh')
|
||||
|
||||
for DELREPO in "${DELREPOS[@]}"; do
|
||||
if [[ -f "/etc/yum.repos.d/$DELREPO.repo" ]]; then
|
||||
echo "Deleting $DELREPO.repo"
|
||||
rm -f "/etc/yum.repos.d/$DELREPO.repo"
|
||||
fi
|
||||
done
|
||||
if [[ $is_airgap -eq 1 ]]; then
|
||||
# Copy the new repo file if not airgap
|
||||
cp $UPDATE_DIR/salt/repo/client/files/centos/securityonion.repo /etc/yum.repos.d/
|
||||
yum clean all
|
||||
yum repolist
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
verify_latest_update_script() {
|
||||
#we need to render soup and so-common first since they contain jinja
|
||||
salt-call slsutil.renderer $UPDATE_DIR/salt/common/tools/sbin/soup default_renderer='jinja' --local --out=newline_values_only --out-indent=-4 --out-file=/tmp/soup
|
||||
sed -i -e '$a\' /tmp/soup
|
||||
salt-call slsutil.renderer $UPDATE_DIR/salt/common/tools/sbin/so-common default_renderer='jinja' --local --out=newline_values_only --out-indent=-4 --out-file=/tmp/so-common
|
||||
sed -i -e '$a\' /tmp/so-common
|
||||
# Check to see if the update scripts match. If not run the new one.
|
||||
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
|
||||
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
|
||||
CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||||
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||||
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||||
CURRENTSOUP=$(md5sum /usr/sbin/soup | awk '{print $1}')
|
||||
GITSOUP=$(md5sum /tmp/soup | awk '{print $1}')
|
||||
CURRENTCMN=$(md5sum /usr/sbin/so-common | awk '{print $1}')
|
||||
GITCMN=$(md5sum /tmp/so-common | awk '{print $1}')
|
||||
CURRENTIMGCMN=$(md5sum /usr/sbin/so-image-common | awk '{print $1}')
|
||||
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||||
|
||||
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
|
||||
echo "This version of the soup script is up to date. Proceeding."
|
||||
rm -f /tmp/soup /tmp/so-common
|
||||
else
|
||||
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
@@ -742,40 +837,43 @@ verify_latest_update_script() {
|
||||
fi
|
||||
}
|
||||
|
||||
apply_hotfix() {
|
||||
if [[ "$INSTALLEDVERSION" == "2.3.90" && "$HOTFIXVERSION" == "WAZUH" ]] ; then
|
||||
FILE="/nsm/wazuh/etc/ossec.conf"
|
||||
echo "Detecting if ossec.conf needs corrected..."
|
||||
if head -1 $FILE | grep -q "xml version"; then
|
||||
echo "$FILE has an XML header; removing"
|
||||
sed -i 1d $FILE
|
||||
so-wazuh-restart
|
||||
else
|
||||
echo "$FILE does not have an XML header, so no changes are necessary."
|
||||
fi
|
||||
else
|
||||
echo "Skipping ossec.conf check ($INSTALLEDVERSION/$HOTFIXVERSION)"
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
main() {
|
||||
set -e
|
||||
set +e
|
||||
trap 'check_err $?' EXIT
|
||||
|
||||
echo "### Preparing soup at $(date) ###"
|
||||
while getopts ":b" opt; do
|
||||
case "$opt" in
|
||||
b ) # process option b
|
||||
shift
|
||||
BATCHSIZE=$1
|
||||
if ! [[ "$BATCHSIZE" =~ ^[0-9]+$ ]]; then
|
||||
echo "Batch size must be a number greater than 0."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
\? )
|
||||
echo "Usage: cmd [-b]"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
check_pillar_items
|
||||
|
||||
echo "Checking to see if this is an airgap install."
|
||||
echo ""
|
||||
check_airgap
|
||||
if [[ $is_airgap -eq 0 && $UNATTENDED == true && -z $ISOLOC ]]; then
|
||||
echo "Missing file argument (-f <FILENAME>) for unattended airgap upgrade."
|
||||
exit 0
|
||||
fi
|
||||
echo "Checking to see if this is a manager."
|
||||
echo ""
|
||||
require_manager
|
||||
set_minionid
|
||||
echo "Checking to see if this is an airgap install."
|
||||
echo ""
|
||||
check_airgap
|
||||
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
||||
echo ""
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
# Let's mount the ISO since this is airgap
|
||||
echo "This is airgap. Ask for a location."
|
||||
airgap_mounted
|
||||
else
|
||||
echo "Cloning Security Onion github repo into $UPDATE_DIR."
|
||||
@@ -810,9 +908,10 @@ main() {
|
||||
set -e
|
||||
|
||||
if [ "$is_hotfix" == "true" ]; then
|
||||
echo "Applying $HOTFIXVERSION"
|
||||
echo "Applying $HOTFIXVERSION hotfix"
|
||||
copy_new_files
|
||||
echo ""
|
||||
apply_hotfix
|
||||
echo "Hotfix applied"
|
||||
update_version
|
||||
salt-call state.highstate -l info queue=True
|
||||
else
|
||||
@@ -852,7 +951,6 @@ main() {
|
||||
echo "Upgrading Salt"
|
||||
# Update the repo files so it can actually upgrade
|
||||
upgrade_salt
|
||||
fi
|
||||
|
||||
echo "Checking if Salt was upgraded."
|
||||
echo ""
|
||||
@@ -863,11 +961,12 @@ main() {
|
||||
echo "Once the issue is resolved, run soup again."
|
||||
echo "Exiting."
|
||||
echo ""
|
||||
exit 1
|
||||
exit 0
|
||||
else
|
||||
echo "Salt upgrade success."
|
||||
echo ""
|
||||
fi
|
||||
fi
|
||||
|
||||
preupgrade_changes
|
||||
echo ""
|
||||
@@ -922,8 +1021,6 @@ main() {
|
||||
set +e
|
||||
salt-call state.highstate -l info queue=True
|
||||
set -e
|
||||
echo ""
|
||||
echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete."
|
||||
|
||||
echo ""
|
||||
echo "Stopping Salt Master to remove ACL"
|
||||
@@ -946,6 +1043,13 @@ main() {
|
||||
[[ $is_airgap -eq 0 ]] && unmount_update
|
||||
thehive_maint
|
||||
|
||||
echo ""
|
||||
echo "Upgrade to $NEWVERSION complete."
|
||||
|
||||
# Everything beyond this is post-upgrade checking, don't fail past this point if something here causes an error
|
||||
set +e
|
||||
|
||||
echo "Checking the number of minions."
|
||||
NUM_MINIONS=$(ls /opt/so/saltstack/local/pillar/minions/*_*.sls | wc -l)
|
||||
if [[ $UPGRADESALT -eq 1 ]] && [[ $NUM_MINIONS -gt 1 ]]; then
|
||||
if [[ $is_airgap -eq 0 ]]; then
|
||||
@@ -956,8 +1060,15 @@ main() {
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Checking for local modifications."
|
||||
check_local_mods
|
||||
|
||||
echo "Checking sudoers file."
|
||||
check_sudoers
|
||||
|
||||
echo "Checking for necessary user migrations."
|
||||
so-user migrate
|
||||
|
||||
if [[ -n $lsl_msg ]]; then
|
||||
case $lsl_msg in
|
||||
'distributed')
|
||||
@@ -993,10 +1104,56 @@ EOF
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$NOTIFYCUSTOMELASTICCONFIG" = true ] ; then
|
||||
|
||||
cat << EOF
|
||||
|
||||
|
||||
A custom Elasticsearch configuration has been found at /opt/so/saltstack/local/elasticsearch/files/elasticsearch.yml. This file is no longer referenced in Security Onion versions >= 2.3.80.
|
||||
|
||||
If you still need those customizations, you'll need to manually migrate them to the new Elasticsearch config as shown at https://docs.securityonion.net/en/2.3/elasticsearch.html.
|
||||
|
||||
EOF
|
||||
|
||||
fi
|
||||
|
||||
echo "### soup has been served at $(date) ###"
|
||||
}
|
||||
|
||||
cat << EOF
|
||||
while getopts ":b:f:y" opt; do
|
||||
case ${opt} in
|
||||
b )
|
||||
BATCHSIZE="$OPTARG"
|
||||
if ! [[ "$BATCHSIZE" =~ ^[1-9][0-9]*$ ]]; then
|
||||
echo "Batch size must be a number greater than 0."
|
||||
exit 1
|
||||
fi
|
||||
;;
|
||||
y )
|
||||
if [[ ! -f /opt/so/state/yeselastic.txt ]]; then
|
||||
echo "Cannot run soup in unattended mode. You must run soup manually to accept the Elastic License."
|
||||
exit 1
|
||||
else
|
||||
UNATTENDED=true
|
||||
fi
|
||||
;;
|
||||
f )
|
||||
ISOLOC="$OPTARG"
|
||||
;;
|
||||
\? )
|
||||
echo "Usage: soup [-b] [-y] [-f <iso location>]"
|
||||
exit 1
|
||||
;;
|
||||
: )
|
||||
echo "Invalid option: $OPTARG requires an argument"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
shift $((OPTIND - 1))
|
||||
|
||||
if [[ -z $UNATTENDED ]]; then
|
||||
cat << EOF
|
||||
|
||||
SOUP - Security Onion UPdater
|
||||
|
||||
@@ -1008,7 +1165,8 @@ Press Enter to continue or Ctrl-C to cancel.
|
||||
|
||||
EOF
|
||||
|
||||
read -r input
|
||||
read -r input
|
||||
fi
|
||||
|
||||
echo "### Preparing soup at $(date) ###"
|
||||
main "$@" | tee -a $SOUP_LOG
|
||||
|
||||
|
||||
29
salt/curator/files/action/so-aws-close.yml
Normal file
29
salt/curator/files/action/so-aws-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-aws:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close aws indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-aws.*|so-aws.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-aws-delete.yml
Normal file
29
salt/curator/files/action/so-aws-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete aws indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-aws.*|so-aws.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-aws-warm.yml
Normal file
24
salt/curator/files/action/so-aws-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-aws:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-aws
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-azure-close.yml
Normal file
29
salt/curator/files/action/so-azure-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-azure:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close azure indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-azure.*|so-azure.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-azure-delete.yml
Normal file
29
salt/curator/files/action/so-azure-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-azure:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete azure indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-azure.*|so-azure.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-azure-warm.yml
Normal file
24
salt/curator/files/action/so-azure-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-azure:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-azure
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-barracuda-close.yml
Normal file
29
salt/curator/files/action/so-barracuda-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close barracuda indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-barracuda.*|so-barracuda.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-barracuda-delete.yml
Normal file
29
salt/curator/files/action/so-barracuda-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete barracuda indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-barracuda.*|so-barracuda.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-barracuda-warm.yml
Normal file
24
salt/curator/files/action/so-barracuda-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-barracuda:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-barracuda
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-beats-delete.yml
Normal file
29
salt/curator/files/action/so-beats-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-beats:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete beats indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-beats.*|so-beats.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-beats-warm.yml
Normal file
24
salt/curator/files/action/so-beats-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-beats:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-beats
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-bluecoat-close.yml
Normal file
29
salt/curator/files/action/so-bluecoat-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close bluecoat indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-bluecoat.*|so-bluecoat.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-bluecoat-delete.yml
Normal file
29
salt/curator/files/action/so-bluecoat-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete bluecoat indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-bluecoat.*|so-bluecoat.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-bluecoat-warm.yml
Normal file
24
salt/curator/files/action/so-bluecoat-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-bluecoat:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-bluecoat
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-cef-close.yml
Normal file
29
salt/curator/files/action/so-cef-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cef:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close cef indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cef.*|so-cef.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-cef-delete.yml
Normal file
29
salt/curator/files/action/so-cef-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cef:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete cef indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cef.*|so-cef.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-cef-warm.yml
Normal file
24
salt/curator/files/action/so-cef-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cef:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-cef
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-checkpoint-close.yml
Normal file
29
salt/curator/files/action/so-checkpoint-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close checkpoint indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-checkpoint.*|so-checkpoint.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-checkpoint-delete.yml
Normal file
29
salt/curator/files/action/so-checkpoint-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete checkpoint indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-checkpoint.*|so-checkpoint.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-checkpoint-warm.yml
Normal file
24
salt/curator/files/action/so-checkpoint-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-checkpoint:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-checkpoint
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-cisco-close.yml
Normal file
29
salt/curator/files/action/so-cisco-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cisco:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close cisco indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cisco.*|so-cisco.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-cisco-delete.yml
Normal file
29
salt/curator/files/action/so-cisco-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete cisco indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cisco.*|so-cisco.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-cisco-warm.yml
Normal file
24
salt/curator/files/action/so-cisco-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cisco:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-cisco
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-cyberark-close.yml
Normal file
29
salt/curator/files/action/so-cyberark-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close cyberark indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cyberark.*|so-cyberark.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-cyberark-delete.yml
Normal file
29
salt/curator/files/action/so-cyberark-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete cyberark indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cyberark.*|so-cyberark.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-cyberark-warm.yml
Normal file
24
salt/curator/files/action/so-cyberark-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cyberark:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-cyberark
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-cylance-close.yml
Normal file
29
salt/curator/files/action/so-cylance-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-cylance:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close cylance indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cylance.*|so-cylance.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-cylance-delete.yml
Normal file
29
salt/curator/files/action/so-cylance-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete cylance indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-cylance.*|so-cylance.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-cylance-warm.yml
Normal file
24
salt/curator/files/action/so-cylance-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-cylance:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-cylance
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-elasticsearch-close.yml
Normal file
29
salt/curator/files/action/so-elasticsearch-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close elasticsearch indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-elasticsearch.*|so-elasticsearch.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-elasticsearch-delete.yml
Normal file
29
salt/curator/files/action/so-elasticsearch-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete elasticsearch indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-elasticsearch.*|so-elasticsearch.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-elasticsearch-warm.yml
Normal file
24
salt/curator/files/action/so-elasticsearch-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-elasticsearch:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-elasticsearch
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-endgame-close.yml
Normal file
29
salt/curator/files/action/so-endgame-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-endgame:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close Endgame indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-endgame.*|so-endgame.*|endgame.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
27
salt/curator/files/action/so-endgame-delete.yml
Normal file
27
salt/curator/files/action/so-endgame-delete.yml
Normal file
@@ -0,0 +1,27 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-endgame:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete Endgame indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-endgame.*|so-endgame.*|endgame.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
23
salt/curator/files/action/so-endgame-warm.yml
Normal file
23
salt/curator/files/action/so-endgame-warm.yml
Normal file
@@ -0,0 +1,23 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-endgame:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-endgame.*|so-endgame.*|endgame.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
29
salt/curator/files/action/so-f5-close.yml
Normal file
29
salt/curator/files/action/so-f5-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-f5:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close f5 indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-f5.*|so-f5.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-f5-delete.yml
Normal file
29
salt/curator/files/action/so-f5-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-f5:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete f5 indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-f5.*|so-f5.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-f5-warm.yml
Normal file
24
salt/curator/files/action/so-f5-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-f5:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-f5
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-firewall-delete.yml
Normal file
29
salt/curator/files/action/so-firewall-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-firewall:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete firewall indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-firewall.*|so-firewall.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-firewall-warm.yml
Normal file
24
salt/curator/files/action/so-firewall-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-firewall:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-firewall
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-fortinet-close.yml
Normal file
29
salt/curator/files/action/so-fortinet-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close fortinet indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-fortinet.*|so-fortinet.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-fortinet-delete.yml
Normal file
29
salt/curator/files/action/so-fortinet-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete fortinet indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-fortinet.*|so-fortinet.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-fortinet-warm.yml
Normal file
24
salt/curator/files/action/so-fortinet-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-fortinet:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-fortinet
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-gcp-close.yml
Normal file
29
salt/curator/files/action/so-gcp-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-gcp:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close gcp indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-gcp.*|so-gcp.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-gcp-delete.yml
Normal file
29
salt/curator/files/action/so-gcp-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-gcp:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete gcp indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-gcp.*|so-gcp.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-gcp-warm.yml
Normal file
24
salt/curator/files/action/so-gcp-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-gcp:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-gcp
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
29
salt/curator/files/action/so-google_workspace-close.yml
Normal file
29
salt/curator/files/action/so-google_workspace-close.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set cur_close_days = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:close', 30) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: close
|
||||
description: >-
|
||||
Close google_workspace indices older than {{cur_close_days}} days.
|
||||
options:
|
||||
delete_aliases: False
|
||||
timeout_override:
|
||||
continue_if_exception: False
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-google_workspace.*|so-google_workspace.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{cur_close_days}}
|
||||
exclude:
|
||||
29
salt/curator/files/action/so-google_workspace-delete.yml
Normal file
29
salt/curator/files/action/so-google_workspace-delete.yml
Normal file
@@ -0,0 +1,29 @@
|
||||
{%- set DELETE_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:delete', 365) -%}
|
||||
---
|
||||
# Remember, leave a key empty if there is no value. None will be a string,
|
||||
# not a Python "NoneType"
|
||||
#
|
||||
# Also remember that all examples have 'disable_action' set to True. If you
|
||||
# want to use this action as a template, be sure to set this to False after
|
||||
# copying it.
|
||||
actions:
|
||||
1:
|
||||
action: delete_indices
|
||||
description: >-
|
||||
Delete google_workspace indices when older than {{ DELETE_DAYS }} days.
|
||||
options:
|
||||
ignore_empty_list: True
|
||||
disable_action: False
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: regex
|
||||
value: '^(logstash-google_workspace.*|so-google_workspace.*)$'
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ DELETE_DAYS }}
|
||||
exclude:
|
||||
|
||||
|
||||
24
salt/curator/files/action/so-google_workspace-warm.yml
Normal file
24
salt/curator/files/action/so-google_workspace-warm.yml
Normal file
@@ -0,0 +1,24 @@
|
||||
{%- set WARM_DAYS = salt['pillar.get']('elasticsearch:index_settings:so-google_workspace:warm', 7) -%}
|
||||
actions:
|
||||
1:
|
||||
action: allocation
|
||||
description: "Apply shard allocation filtering rules to the specified indices"
|
||||
options:
|
||||
key: box_type
|
||||
value: warm
|
||||
allocation_type: require
|
||||
wait_for_completion: true
|
||||
timeout_override:
|
||||
continue_if_exception: false
|
||||
disable_action: false
|
||||
filters:
|
||||
- filtertype: pattern
|
||||
kind: prefix
|
||||
value: so-google_workspace
|
||||
- filtertype: age
|
||||
source: name
|
||||
direction: older
|
||||
timestring: '%Y.%m.%d'
|
||||
unit: days
|
||||
unit_count: {{ WARM_DAYS }}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user