mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 17:22:49 +01:00
Compare commits
934 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
36207d0440 | ||
|
|
88bfe7c49c | ||
|
|
7116c2103b | ||
|
|
b49355d346 | ||
|
|
aecde2dd54 | ||
|
|
f2d8c7f10d | ||
|
|
627d4da432 | ||
|
|
a18c89d804 | ||
|
|
7b8f5aa8a9 | ||
|
|
1f9151b407 | ||
|
|
def8dc0e1e | ||
|
|
88be7bca3f | ||
|
|
a0f00e09c1 | ||
|
|
def08895d5 | ||
|
|
2fee2ca143 | ||
|
|
7453626b06 | ||
|
|
4ccb80c9c8 | ||
|
|
ad45779978 | ||
|
|
83326518c4 | ||
|
|
66f62b912e | ||
|
|
4bbedfa027 | ||
|
|
5275583098 | ||
|
|
e756bbc430 | ||
|
|
dea88e4c68 | ||
|
|
dec6cdd3c5 | ||
|
|
dbf82a891f | ||
|
|
96bd1e72a7 | ||
|
|
1a463bccaf | ||
|
|
b0db910e7a | ||
|
|
90dcad7e6f | ||
|
|
9493aad1a5 | ||
|
|
bf76c1b58c | ||
|
|
575098e368 | ||
|
|
39425c1ba8 | ||
|
|
6448ddc31a | ||
|
|
89a9816d50 | ||
|
|
412e8eeccb | ||
|
|
6ccbe47f10 | ||
|
|
6fcc11eac2 | ||
|
|
b6f2cdce8c | ||
|
|
370a2cdb81 | ||
|
|
96ebb98fc6 | ||
|
|
336ec18e09 | ||
|
|
d99596ad06 | ||
|
|
1f523deaea | ||
|
|
e0dc6cbb41 | ||
|
|
5719b12968 | ||
|
|
73ad89f4ba | ||
|
|
011dc48d96 | ||
|
|
027929bb6d | ||
|
|
345710a48d | ||
|
|
90e499f6e9 | ||
|
|
23110d3b33 | ||
|
|
384456a991 | ||
|
|
6e84227525 | ||
|
|
3ff99da302 | ||
|
|
2d497cb724 | ||
|
|
eecb323459 | ||
|
|
2e278586f2 | ||
|
|
81e2b4d572 | ||
|
|
96b72d46be | ||
|
|
09b5e6d227 | ||
|
|
9c8fc5e6ed | ||
|
|
6ba3c16c75 | ||
|
|
d670f96dc0 | ||
|
|
a959b4b2cd | ||
|
|
142649b396 | ||
|
|
e464117e8a | ||
|
|
aa0d43b1db | ||
|
|
bdbb466d69 | ||
|
|
8889c79afd | ||
|
|
448d0e079e | ||
|
|
f0999abd8e | ||
|
|
c68b87db56 | ||
|
|
a1fc354a89 | ||
|
|
b858136672 | ||
|
|
af149d04a9 | ||
|
|
a4897d2063 | ||
|
|
805e25f495 | ||
|
|
4ca4141819 | ||
|
|
f1be6cc259 | ||
|
|
e30d7a8d8e | ||
|
|
87882b4d91 | ||
|
|
082fd51b05 | ||
|
|
04a26df4f7 | ||
|
|
e3c8018824 | ||
|
|
7909834722 | ||
|
|
06dd3432f8 | ||
|
|
6cab65a548 | ||
|
|
e58ca93896 | ||
|
|
15347d1209 | ||
|
|
c7c3d004ca | ||
|
|
1825776271 | ||
|
|
951556902c | ||
|
|
7ba10ee698 | ||
|
|
343e9f8b2c | ||
|
|
e89c06f71b | ||
|
|
f7d02763e8 | ||
|
|
f70d828aa6 | ||
|
|
3da7a26e88 | ||
|
|
922534a5da | ||
|
|
80a61d3316 | ||
|
|
bf1f00d2fe | ||
|
|
cbd59ed86a | ||
|
|
efe44323cb | ||
|
|
aa281f849f | ||
|
|
f4c4a16f54 | ||
|
|
aa479b9c8e | ||
|
|
3e2a9cc884 | ||
|
|
a533e6fa35 | ||
|
|
de3f86724a | ||
|
|
4e04f31b8e | ||
|
|
7a314b5935 | ||
|
|
61ae187d03 | ||
|
|
73d23e6d17 | ||
|
|
8faf80a03b | ||
|
|
b5ed973abd | ||
|
|
85aac4ad75 | ||
|
|
fd7fe72b2a | ||
|
|
c5a3597564 | ||
|
|
66495e6bae | ||
|
|
42c8f1e325 | ||
|
|
bb61c1f745 | ||
|
|
e4eea6a616 | ||
|
|
09b3a4a0dd | ||
|
|
b8e8510dd2 | ||
|
|
eb735c7289 | ||
|
|
2f2867804a | ||
|
|
d877fac786 | ||
|
|
c88a1a943d | ||
|
|
e3335a3106 | ||
|
|
0a77a28e06 | ||
|
|
6eb64227ae | ||
|
|
5a95181b2b | ||
|
|
2fc151d923 | ||
|
|
db276d9020 | ||
|
|
33fde42dbc | ||
|
|
e0e38ac37f | ||
|
|
75c5abef30 | ||
|
|
0915ae30e4 | ||
|
|
14f28e38be | ||
|
|
870cc6b79b | ||
|
|
3c7a8fe92f | ||
|
|
b6a0e692c6 | ||
|
|
fbcc62d5c5 | ||
|
|
733f5a5021 | ||
|
|
25f2075e22 | ||
|
|
5c4103681c | ||
|
|
ab856532e6 | ||
|
|
58bcc79c54 | ||
|
|
1f1cfde3ac | ||
|
|
bc6a0c1e6f | ||
|
|
8302119756 | ||
|
|
21e107f2e8 | ||
|
|
cd6a945a24 | ||
|
|
4ee944448f | ||
|
|
42833b2086 | ||
|
|
d9d7f49b96 | ||
|
|
86313796a5 | ||
|
|
24fce27e62 | ||
|
|
45faa7fda4 | ||
|
|
c2cf2c4987 | ||
|
|
379f1d98d8 | ||
|
|
f689722559 | ||
|
|
d09daef094 | ||
|
|
0b2e2739bd | ||
|
|
ea1bd63f60 | ||
|
|
af15f0eb38 | ||
|
|
101ddd18a5 | ||
|
|
3a903501fd | ||
|
|
8db79ae852 | ||
|
|
e05da4efc2 | ||
|
|
30e69bf7b2 | ||
|
|
0a48f7d5dc | ||
|
|
c320efe7e4 | ||
|
|
617ed2a7c2 | ||
|
|
522399e4ab | ||
|
|
a2e48f91b2 | ||
|
|
987008811c | ||
|
|
c5c053d24a | ||
|
|
75ea648cf9 | ||
|
|
e29fa7ba70 | ||
|
|
282b4090ce | ||
|
|
e983322a18 | ||
|
|
6b479c5a89 | ||
|
|
223856c0b9 | ||
|
|
795cacecf3 | ||
|
|
f3ce2fc71e | ||
|
|
51650147ef | ||
|
|
950c05e53d | ||
|
|
652c4d49c9 | ||
|
|
6ceecbd524 | ||
|
|
a8f1ec37a3 | ||
|
|
813fe77582 | ||
|
|
b41ba1ea3c | ||
|
|
4899ea23f8 | ||
|
|
4210d25fae | ||
|
|
65d994a2f8 | ||
|
|
997e2735e3 | ||
|
|
d6fa739c60 | ||
|
|
f34a10a986 | ||
|
|
35be785f7a | ||
|
|
5d955bcdb7 | ||
|
|
5f756549b1 | ||
|
|
8915e49288 | ||
|
|
2d9c6a42bf | ||
|
|
35ea6c36d2 | ||
|
|
64dc9f8d4e | ||
|
|
d88364c9fd | ||
|
|
08ab36927d | ||
|
|
6fc3232637 | ||
|
|
4363b082bb | ||
|
|
8ea088c3fc | ||
|
|
b5e0b21400 | ||
|
|
19d27c7d68 | ||
|
|
38324c226e | ||
|
|
4fe2de2637 | ||
|
|
edc8ccd1b6 | ||
|
|
3136c66780 | ||
|
|
134d9bc89a | ||
|
|
d724fe7357 | ||
|
|
fca50660a2 | ||
|
|
1c1b835c71 | ||
|
|
7b43c2955e | ||
|
|
ff1cfb578f | ||
|
|
7458313d3d | ||
|
|
39dce13cf6 | ||
|
|
916db4acec | ||
|
|
2e516629f9 | ||
|
|
3273a63662 | ||
|
|
660c768f8f | ||
|
|
ebade0a5a6 | ||
|
|
ac85cbc3f1 | ||
|
|
b5bfad07dc | ||
|
|
3049718660 | ||
|
|
80ce8b5e41 | ||
|
|
2c208ec943 | ||
|
|
76fff28dfa | ||
|
|
af8295a651 | ||
|
|
ddcf5dec5b | ||
|
|
967111decc | ||
|
|
94253e92a6 | ||
|
|
f410c451cd | ||
|
|
786665d8cf | ||
|
|
c41d4373b7 | ||
|
|
95570976a8 | ||
|
|
a84f816eff | ||
|
|
4ce3ec7582 | ||
|
|
f96365baba | ||
|
|
9c919f3c92 | ||
|
|
cf0ec2f78f | ||
|
|
3e322c38eb | ||
|
|
46d2342c8b | ||
|
|
d004263b71 | ||
|
|
fc7fe23590 | ||
|
|
cc5d54764a | ||
|
|
8fe43d6d56 | ||
|
|
69ae4577f5 | ||
|
|
467f9923b0 | ||
|
|
c819729cd6 | ||
|
|
54d8dcdbb0 | ||
|
|
2d4fe58299 | ||
|
|
4b5b936abb | ||
|
|
2d6feea5c5 | ||
|
|
38028a543a | ||
|
|
b7bc8db3b2 | ||
|
|
81b86bf7f2 | ||
|
|
ff6951cd95 | ||
|
|
141d7a35c9 | ||
|
|
c2e7e42509 | ||
|
|
0e8f547087 | ||
|
|
9517cb2a58 | ||
|
|
c303cdff09 | ||
|
|
e7a927188b | ||
|
|
8a8885e14f | ||
|
|
8e9458ca84 | ||
|
|
5d2acf4011 | ||
|
|
8964444eeb | ||
|
|
ec81e8565f | ||
|
|
040b435278 | ||
|
|
704f024441 | ||
|
|
65d8005629 | ||
|
|
7fddf99648 | ||
|
|
f52c30bff5 | ||
|
|
19a33c5c2a | ||
|
|
19b36f0468 | ||
|
|
95a664e12a | ||
|
|
38afd67108 | ||
|
|
979f171828 | ||
|
|
8f9081618f | ||
|
|
7fb264b4fe | ||
|
|
d20560385f | ||
|
|
e1147398cc | ||
|
|
8864428a00 | ||
|
|
ea9bbfd1aa | ||
|
|
0c4ee94472 | ||
|
|
39bf60feb7 | ||
|
|
35653d2e66 | ||
|
|
eb2364b926 | ||
|
|
9bb485cdc9 | ||
|
|
fe2662cab8 | ||
|
|
995a377432 | ||
|
|
e3a41c2a94 | ||
|
|
ddca9563e5 | ||
|
|
e2ee0db727 | ||
|
|
4dfd49ef39 | ||
|
|
65334d15ea | ||
|
|
1e32a01657 | ||
|
|
bafefb980b | ||
|
|
426769588a | ||
|
|
a183be489c | ||
|
|
b29ffcac92 | ||
|
|
78f5727f6f | ||
|
|
0d3754200f | ||
|
|
bc40a2bfc5 | ||
|
|
f074179656 | ||
|
|
b6e36d4d06 | ||
|
|
2e6be747d9 | ||
|
|
1a11c24f03 | ||
|
|
d15064b294 | ||
|
|
d3ef46a5f6 | ||
|
|
9d837f7b45 | ||
|
|
e62b52da1b | ||
|
|
79ec1de83a | ||
|
|
9fb8a6d482 | ||
|
|
5344d30d56 | ||
|
|
4051111999 | ||
|
|
316a1c02f1 | ||
|
|
c07f62f8d1 | ||
|
|
cdc7a5cc7c | ||
|
|
10a3e6f414 | ||
|
|
2a3951ab36 | ||
|
|
67a8c4e8cb | ||
|
|
177819447b | ||
|
|
3be1c9ae32 | ||
|
|
ac3b5e4f1b | ||
|
|
b79e1c3225 | ||
|
|
d3065005ca | ||
|
|
26e97d5875 | ||
|
|
d68726f6ef | ||
|
|
f81da406da | ||
|
|
afd466cd2b | ||
|
|
6d228a836f | ||
|
|
1805effdc0 | ||
|
|
1170b04a87 | ||
|
|
c0b43d3319 | ||
|
|
6cc9d1c076 | ||
|
|
1c55bb6db2 | ||
|
|
3d0003555a | ||
|
|
0830f63c4e | ||
|
|
adbd8d6956 | ||
|
|
80d0080f70 | ||
|
|
af6e14dc6f | ||
|
|
8b6b7cbd11 | ||
|
|
e65c53dbb1 | ||
|
|
ceef07b74b | ||
|
|
280cde43ff | ||
|
|
81b9658499 | ||
|
|
04c6bed779 | ||
|
|
6b4af30fc1 | ||
|
|
1e2b404836 | ||
|
|
276c011a4f | ||
|
|
34fd80182e | ||
|
|
57e9f69c97 | ||
|
|
0542e0aa04 | ||
|
|
d0e7b5b55a | ||
|
|
ad74b4b3e0 | ||
|
|
ce70e0a61f | ||
|
|
8a4defcffa | ||
|
|
bddc3d6df9 | ||
|
|
4bb1ad9799 | ||
|
|
bc0c395b7f | ||
|
|
67dc71ab49 | ||
|
|
c95619d335 | ||
|
|
bfbc0f354c | ||
|
|
5c6e9e0e3a | ||
|
|
7291d64e82 | ||
|
|
695cce0b50 | ||
|
|
42126f125b | ||
|
|
2bfc48be35 | ||
|
|
7d1cf56160 | ||
|
|
1fd2196dd5 | ||
|
|
65b84f1bd7 | ||
|
|
fcfd3e3758 | ||
|
|
ee3708a428 | ||
|
|
b146700303 | ||
|
|
1ec8b52353 | ||
|
|
f8346cde08 | ||
|
|
e162be2e1d | ||
|
|
4f4f64a47d | ||
|
|
4cd1086efa | ||
|
|
2184c3b8ee | ||
|
|
65d28f98b5 | ||
|
|
aa8d9c12a0 | ||
|
|
f31d459a24 | ||
|
|
88c2ee0d36 | ||
|
|
d13733e716 | ||
|
|
86922a2388 | ||
|
|
65440f9aef | ||
|
|
12c661101a | ||
|
|
79b63ed14b | ||
|
|
cc4357d567 | ||
|
|
b9267ee015 | ||
|
|
5c310327e4 | ||
|
|
4311f66110 | ||
|
|
a8644478b5 | ||
|
|
4436f02f6d | ||
|
|
3cf8afc1dd | ||
|
|
f1e33b6eea | ||
|
|
0d9b22fe2d | ||
|
|
a08923030b | ||
|
|
1ec4af1a4d | ||
|
|
5ae78d4108 | ||
|
|
3bae243915 | ||
|
|
8234b6f835 | ||
|
|
55231eab25 | ||
|
|
e956ee9324 | ||
|
|
a343e3f31e | ||
|
|
2ff738a61c | ||
|
|
c226c1d902 | ||
|
|
7a49c55ea0 | ||
|
|
cc50eba6cb | ||
|
|
5c25dcf192 | ||
|
|
c744d389f7 | ||
|
|
76c917d977 | ||
|
|
1908a68330 | ||
|
|
d22040fb5d | ||
|
|
372f694cc1 | ||
|
|
1c079f7ff4 | ||
|
|
4e6e29e7dc | ||
|
|
43a244e0da | ||
|
|
e958246457 | ||
|
|
b210092534 | ||
|
|
e820c6fa42 | ||
|
|
71a409f210 | ||
|
|
a5823be0ac | ||
|
|
13c261178a | ||
|
|
2f0eaff8b3 | ||
|
|
977eea131e | ||
|
|
fb9b07b0eb | ||
|
|
6a010bb3e6 | ||
|
|
51b3e066be | ||
|
|
7dfb8f5b12 | ||
|
|
23f2dee840 | ||
|
|
4275fcbf22 | ||
|
|
ee97f5eaac | ||
|
|
0a807621cc | ||
|
|
8577fa63a3 | ||
|
|
50175f7e42 | ||
|
|
3173c6fd3c | ||
|
|
069908ec56 | ||
|
|
09f3199cc2 | ||
|
|
adec9ad48b | ||
|
|
8b3262ce1b | ||
|
|
4fad0e3a98 | ||
|
|
fddfb8eb92 | ||
|
|
210a7bc65b | ||
|
|
8a7ff3260d | ||
|
|
2f27b6f2fa | ||
|
|
52e909007f | ||
|
|
80aeffe1ad | ||
|
|
cbca2d702f | ||
|
|
af44cce423 | ||
|
|
7d81080076 | ||
|
|
6194d85180 | ||
|
|
88675ec2ee | ||
|
|
9c0a1bc8b9 | ||
|
|
52babc686d | ||
|
|
9370e5b8bc | ||
|
|
6c1d5451eb | ||
|
|
f50e6ab929 | ||
|
|
67f18a02ea | ||
|
|
7f491545fa | ||
|
|
9b33201ba5 | ||
|
|
aefcb9a491 | ||
|
|
fee52f8b86 | ||
|
|
e434ccd3d3 | ||
|
|
70a0cbae23 | ||
|
|
04263101cf | ||
|
|
312f99966e | ||
|
|
667800d830 | ||
|
|
2fba02f71b | ||
|
|
4ce0b770a5 | ||
|
|
1de862985c | ||
|
|
4e40392c55 | ||
|
|
d1fe79b642 | ||
|
|
f96cc35d37 | ||
|
|
388f1e753d | ||
|
|
42382d00d8 | ||
|
|
b086f5e5c1 | ||
|
|
0b0f9854f9 | ||
|
|
3107f46940 | ||
|
|
202c672798 | ||
|
|
6ac1bc5623 | ||
|
|
e002015ce2 | ||
|
|
61b5e009c7 | ||
|
|
f3aadcd553 | ||
|
|
71370d4522 | ||
|
|
c287b5f826 | ||
|
|
4286ac0dfd | ||
|
|
adc937295b | ||
|
|
96bf2c57e7 | ||
|
|
5f7a28dd5d | ||
|
|
3560ba933b | ||
|
|
9c20450832 | ||
|
|
d71daef2e9 | ||
|
|
c3ae80e2c1 | ||
|
|
2098dd16ff | ||
|
|
3b4c9e02e7 | ||
|
|
adc99ff06d | ||
|
|
f9b26c9a8f | ||
|
|
41a123c22b | ||
|
|
966089e1d0 | ||
|
|
3034d5ef98 | ||
|
|
5ab169ea52 | ||
|
|
f858027da1 | ||
|
|
c7517b37fa | ||
|
|
2f315ba5a0 | ||
|
|
ed883f173b | ||
|
|
a46ad6fe81 | ||
|
|
42fc0add5e | ||
|
|
f6c2983bd1 | ||
|
|
0b8e19bfc8 | ||
|
|
bee829697e | ||
|
|
ed025851ca | ||
|
|
94ab77b14d | ||
|
|
b113dce140 | ||
|
|
a2ef12eb6a | ||
|
|
eb0b909cd2 | ||
|
|
7ef2056f17 | ||
|
|
b12f29d48a | ||
|
|
5fd1fd9b0d | ||
|
|
ad0ecff8c5 | ||
|
|
88b6ae1b2f | ||
|
|
9772fd181c | ||
|
|
cfff8319bb | ||
|
|
0dc7c8b0e7 | ||
|
|
3ccd8b40b2 | ||
|
|
ca94bd12cf | ||
|
|
d650e68472 | ||
|
|
70f9bad827 | ||
|
|
c3d6e168ae | ||
|
|
5c9c1915f1 | ||
|
|
32912f2c87 | ||
|
|
fb70e1e40c | ||
|
|
4106d88338 | ||
|
|
93f57b73e2 | ||
|
|
4fa0b6be0e | ||
|
|
7ec2d85286 | ||
|
|
763d5425a5 | ||
|
|
4be594cbb9 | ||
|
|
e6fd3160ca | ||
|
|
07871987e4 | ||
|
|
3c33a38098 | ||
|
|
b24bf9b6a9 | ||
|
|
373d9256f2 | ||
|
|
dde7e0bd11 | ||
|
|
017c9c9874 | ||
|
|
871f919c27 | ||
|
|
f67c26a8f2 | ||
|
|
038e8fceb7 | ||
|
|
8c6adc21a8 | ||
|
|
75b26fb2af | ||
|
|
8258b782fc | ||
|
|
d73542d274 | ||
|
|
1092aa2cb1 | ||
|
|
8668cf9a9c | ||
|
|
b9440364f7 | ||
|
|
4f0ebfaf1f | ||
|
|
b090656269 | ||
|
|
16e0a26869 | ||
|
|
bc362acf82 | ||
|
|
79cbc747ea | ||
|
|
2269695e75 | ||
|
|
710afe9355 | ||
|
|
ac236a0538 | ||
|
|
eb7e8079ec | ||
|
|
8512042132 | ||
|
|
a234e1c898 | ||
|
|
25c91192a1 | ||
|
|
22f19bbe9e | ||
|
|
3b31a8d8cb | ||
|
|
cd868d1edb | ||
|
|
b31ea84c00 | ||
|
|
4ed6355186 | ||
|
|
e51c2152fa | ||
|
|
7af1b7a539 | ||
|
|
debbe965fe | ||
|
|
3bbaca41c9 | ||
|
|
f2d25439e2 | ||
|
|
472fdd935e | ||
|
|
14304c0f28 | ||
|
|
6a60890c36 | ||
|
|
687120ce4a | ||
|
|
5e3f99c567 | ||
|
|
c2ed0a6c72 | ||
|
|
8ed6a3ed78 | ||
|
|
0511c851a2 | ||
|
|
0c7db56053 | ||
|
|
7fae7500e8 | ||
|
|
25b771d36f | ||
|
|
6febc290a8 | ||
|
|
9e9a023377 | ||
|
|
f069b8cced | ||
|
|
0d42bfb7f4 | ||
|
|
4ccc898054 | ||
|
|
2010712929 | ||
|
|
0ad0255e8c | ||
|
|
ca28cc7a17 | ||
|
|
0fce6823db | ||
|
|
0db072d9b2 | ||
|
|
0c3a7a6214 | ||
|
|
a58b487a0a | ||
|
|
061b8d5b9b | ||
|
|
ff1dab283c | ||
|
|
319867ef10 | ||
|
|
c21131b77a | ||
|
|
638d9ddee3 | ||
|
|
dded28a54a | ||
|
|
7132011ece | ||
|
|
3a622ee71e | ||
|
|
fdc1468a11 | ||
|
|
691f64f8a3 | ||
|
|
a29def504e | ||
|
|
8160ef104d | ||
|
|
52ee26c334 | ||
|
|
d2c1fed2df | ||
|
|
1521224100 | ||
|
|
97f5f8438c | ||
|
|
978ba5b3ad | ||
|
|
80b926bc31 | ||
|
|
a4df3623be | ||
|
|
4a80c37167 | ||
|
|
8e88c350d5 | ||
|
|
a6a9f03cb0 | ||
|
|
3a9c9e3d99 | ||
|
|
307af1248c | ||
|
|
0224adb7c8 | ||
|
|
f4a804b88c | ||
|
|
ea88fa7319 | ||
|
|
c9bfd8a253 | ||
|
|
ee0e1ce8d7 | ||
|
|
814aa85dba | ||
|
|
c5ddddda2a | ||
|
|
c75536db6d | ||
|
|
c11d8367fa | ||
|
|
8320421d42 | ||
|
|
33bf799b47 | ||
|
|
047ab95e68 | ||
|
|
2eb3378b62 | ||
|
|
a354a6279b | ||
|
|
578250a994 | ||
|
|
e68f90c3b5 | ||
|
|
5a9211693c | ||
|
|
1e2df983af | ||
|
|
d85c99abf3 | ||
|
|
c0897c7e5a | ||
|
|
b4989c6c0e | ||
|
|
7a79ef6ddb | ||
|
|
8aa3a508fa | ||
|
|
b320a1d63e | ||
|
|
2a119d7824 | ||
|
|
73c17b77ae | ||
|
|
edb0d71e87 | ||
|
|
6ff1922788 | ||
|
|
758bee3a20 | ||
|
|
529da993aa | ||
|
|
5a95159ec3 | ||
|
|
fc9c31706d | ||
|
|
9548b3df54 | ||
|
|
d3f65ac1a8 | ||
|
|
cb46c13054 | ||
|
|
a4d3e109e6 | ||
|
|
711f5ab38f | ||
|
|
ea1227de9d | ||
|
|
f9b52677d7 | ||
|
|
533a65205f | ||
|
|
ea1f53b40c | ||
|
|
0f4f029e92 | ||
|
|
da9a915421 | ||
|
|
280fc501f9 | ||
|
|
625307ac5f | ||
|
|
44677ad521 | ||
|
|
1c326f561b | ||
|
|
7b64f93bce | ||
|
|
15f243f0ce | ||
|
|
edb00c2058 | ||
|
|
9e612e98ed | ||
|
|
1fc94a8f59 | ||
|
|
c58039ab47 | ||
|
|
1fca5e65df | ||
|
|
9a59ceee4e | ||
|
|
c5bf9bf90d | ||
|
|
676b4f0777 | ||
|
|
6557155a8a | ||
|
|
d3227bbcb1 | ||
|
|
7f218e5297 | ||
|
|
b3c527e7a9 | ||
|
|
54d732a060 | ||
|
|
22b7de819c | ||
|
|
dba30fb0ed | ||
|
|
7ca8fefded | ||
|
|
95b24b1684 | ||
|
|
66cd91c0a7 | ||
|
|
64199c81e1 | ||
|
|
ae5bc297dd | ||
|
|
f5a1bd4074 | ||
|
|
407a655717 | ||
|
|
0e19594c97 | ||
|
|
ff4d7a6cb6 | ||
|
|
f647a06239 | ||
|
|
d122ca1ba3 | ||
|
|
5616aa6beb | ||
|
|
394fa727cb | ||
|
|
9960cf0592 | ||
|
|
059c4e03e1 | ||
|
|
7e578d2ce0 | ||
|
|
12125deecb | ||
|
|
51256983da | ||
|
|
0718dbbd4d | ||
|
|
6b2ab67c58 | ||
|
|
64fd27fd78 | ||
|
|
7eb0dab6c7 | ||
|
|
0caf054da0 | ||
|
|
21b284fb10 | ||
|
|
3d1412a138 | ||
|
|
c7b4a5351c | ||
|
|
a95129b8c2 | ||
|
|
695bace3e8 | ||
|
|
47cac59adb | ||
|
|
1a75ebdca3 | ||
|
|
8da070d511 | ||
|
|
d2ea197ce0 | ||
|
|
b528fe1a03 | ||
|
|
3abe8cb397 | ||
|
|
2911e37b70 | ||
|
|
4fed5c2518 | ||
|
|
a5833f1f77 | ||
|
|
b27b2e358b | ||
|
|
915aaf58f2 | ||
|
|
f058fb460d | ||
|
|
f7394559d4 | ||
|
|
ec3f35c360 | ||
|
|
fea6e6f4f9 | ||
|
|
cb75b2df65 | ||
|
|
4369b8d0f6 | ||
|
|
5cb8d0beda | ||
|
|
b4446cba9a | ||
|
|
1e41b9ba31 | ||
|
|
b2759c4c7c | ||
|
|
6b144903fc | ||
|
|
3825becd1b | ||
|
|
2aa21512e5 | ||
|
|
3150367b1d | ||
|
|
3ac9c43b7b | ||
|
|
b643363e82 | ||
|
|
8d5c29340e | ||
|
|
1e9e156a87 | ||
|
|
a364f13d24 | ||
|
|
3d70698647 | ||
|
|
e989fc7041 | ||
|
|
49af35b440 | ||
|
|
4592e2d4d7 | ||
|
|
ec64314b70 | ||
|
|
cf001875c2 | ||
|
|
c7367eea38 | ||
|
|
db31cf3083 | ||
|
|
8edb1529a9 | ||
|
|
e8616e4d46 | ||
|
|
3bf57382ce | ||
|
|
def993f4ed | ||
|
|
96ec483ae4 | ||
|
|
6169758f4e | ||
|
|
1c91e2d50b | ||
|
|
57e7e61f21 | ||
|
|
93ab4b5d4f | ||
|
|
00fc256c37 | ||
|
|
887f412e48 | ||
|
|
aa9aa59213 | ||
|
|
a859aa4f48 | ||
|
|
82a7b7e02d | ||
|
|
85ea61bf98 | ||
|
|
7f4b8e8183 | ||
|
|
1f8b139462 | ||
|
|
562a016579 | ||
|
|
e040009d0b | ||
|
|
7dca988c11 | ||
|
|
f007ef0ef5 | ||
|
|
bfe98433f6 | ||
|
|
05549a2362 | ||
|
|
7e090b0894 | ||
|
|
8a645edb34 | ||
|
|
24a54a326c | ||
|
|
184d163d65 | ||
|
|
bb0cf9b8c7 | ||
|
|
3113d5fbdb | ||
|
|
6420ee0310 | ||
|
|
033f5dbb9c | ||
|
|
1c4abcef15 | ||
|
|
2acb930a2e | ||
|
|
37c630d6ab | ||
|
|
71a260a000 | ||
|
|
6359e03ba6 | ||
|
|
b489fee8b5 | ||
|
|
91221c4332 | ||
|
|
57d8f25422 | ||
|
|
3abd1c9f16 | ||
|
|
b14c1d0999 | ||
|
|
13be0da484 | ||
|
|
3385d98a2a | ||
|
|
361b13dc88 | ||
|
|
98c669e80b | ||
|
|
b02d434a0e | ||
|
|
3ee9f23d26 | ||
|
|
348c2feee2 | ||
|
|
b238c492e4 | ||
|
|
97207bd006 | ||
|
|
bed70ab6bf | ||
|
|
8173cb589b | ||
|
|
563a606e0e | ||
|
|
8d952eca7e | ||
|
|
8f7dffea4b | ||
|
|
7ea8dc84b6 | ||
|
|
453247971e | ||
|
|
741e17a637 | ||
|
|
fedf334ee9 | ||
|
|
8fee19ee1b | ||
|
|
697bc53aec | ||
|
|
5a705fc0f2 | ||
|
|
7b17b4abc7 | ||
|
|
a043bc7cc4 | ||
|
|
72dc267ab5 | ||
|
|
970be4d530 | ||
|
|
474c4e54b4 | ||
|
|
d4dd4aa416 | ||
|
|
5054138be9 | ||
|
|
83c23dd5de | ||
|
|
42e00514f5 | ||
|
|
e75f8ba257 | ||
|
|
564ac3a4ff | ||
|
|
c58deef2e0 | ||
|
|
0ad65c8cd4 | ||
|
|
0aaf8d6d9a | ||
|
|
37ede9b993 | ||
|
|
5395983fc7 | ||
|
|
3648e293a1 | ||
|
|
ecfd1bbe4d | ||
|
|
12acc2e123 | ||
|
|
8d84718c91 | ||
|
|
3809573963 | ||
|
|
571550c019 | ||
|
|
e613bb3740 | ||
|
|
4662837075 | ||
|
|
892ca294dc | ||
|
|
45fd325307 | ||
|
|
653561ad95 | ||
|
|
f75badf43a | ||
|
|
c61199618a | ||
|
|
d9c021e86a | ||
|
|
951f6ab3e2 | ||
|
|
da488945e0 | ||
|
|
b6f1cfada6 | ||
|
|
85e0b2cab3 | ||
|
|
c8a6b232d5 | ||
|
|
fdb7cb90e3 | ||
|
|
73b83584e6 | ||
|
|
801f4aae8e | ||
|
|
c066cc67dc | ||
|
|
1185e43064 | ||
|
|
51ca661219 | ||
|
|
50a767ca6c | ||
|
|
174bbc6cd9 | ||
|
|
6a08086dfa | ||
|
|
a3579b88ae | ||
|
|
6a3e921924 | ||
|
|
4a0796359b | ||
|
|
0bfdef274b | ||
|
|
92d397d573 | ||
|
|
0b6b6e38fc | ||
|
|
aa59eff1ac | ||
|
|
172ca9aa8d | ||
|
|
79c4f07ff7 | ||
|
|
460a391460 | ||
|
|
905fcd06a6 | ||
|
|
0b7f1fb189 | ||
|
|
712dc6b277 | ||
|
|
b93709e05f | ||
|
|
32294eb2ed | ||
|
|
2da656ff95 | ||
|
|
ef1e05db3e | ||
|
|
798abdbcde | ||
|
|
8805fef187 | ||
|
|
aafd365f2b | ||
|
|
5f43380aa0 | ||
|
|
844ffe8fdf | ||
|
|
1e14d66f54 | ||
|
|
e2d95e0deb | ||
|
|
4765ef5f5c | ||
|
|
d63358c8f0 | ||
|
|
d37ddf584a | ||
|
|
eaa41266a2 | ||
|
|
4a9fcfb8cf | ||
|
|
a119d8f27d | ||
|
|
87adbb5f81 | ||
|
|
722f2b3913 | ||
|
|
3cb419174a | ||
|
|
55b6f5ce99 | ||
|
|
4e1bff2231 | ||
|
|
7e0063d474 | ||
|
|
23bc5e303e | ||
|
|
6f703fad25 | ||
|
|
c538e5f85b | ||
|
|
c22e8c08a6 | ||
|
|
f893cf203f | ||
|
|
bbb825a207 | ||
|
|
ba1dfcd774 | ||
|
|
10e4248cfc | ||
|
|
bab6b151ff | ||
|
|
42e285cfbe | ||
|
|
97a2d91d15 | ||
|
|
79854f111e | ||
|
|
a05329e7d8 | ||
|
|
47652ac080 | ||
|
|
964919109d | ||
|
|
a968e5c23f | ||
|
|
6f73d62400 | ||
|
|
a5c790c31e | ||
|
|
8b146aac32 | ||
|
|
81006ebbd0 | ||
|
|
49e5cb311e | ||
|
|
533ed395e7 | ||
|
|
a0ffe26334 | ||
|
|
0c3e35c55e | ||
|
|
cfd1b82e00 |
12
.github/ISSUE_TEMPLATE
vendored
Normal file
12
.github/ISSUE_TEMPLATE
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
PLEASE STOP AND READ THIS INFORMATION!
|
||||
|
||||
If you are creating an issue just to ask a question, you will likely get faster and better responses by posting to our discussions forum instead:
|
||||
https://securityonion.net/discuss
|
||||
|
||||
If you think you have found a possible bug or are observing a behavior that you weren't expecting, use the discussion forum to start a conversation about it instead of creating an issue.
|
||||
|
||||
If you are very familiar with the latest version of the product and are confident you have found a bug in Security Onion, you can continue with creating an issue here, but please make sure you have done the following:
|
||||
- duplicated the issue on a fresh installation of the latest version
|
||||
- provide information about your system and how you installed Security Onion
|
||||
- include relevant log files
|
||||
- include reproduction steps
|
||||
15
.github/workflows/leaktest.yml
vendored
Normal file
15
.github/workflows/leaktest.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
name: leak-test
|
||||
|
||||
on: [push,pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: '0'
|
||||
|
||||
- name: Gitleaks
|
||||
uses: zricethezav/gitleaks-action@master
|
||||
1
KEYS
1
KEYS
@@ -1,4 +1,5 @@
|
||||
-----BEGIN PGP PUBLIC KEY BLOCK-----
|
||||
|
||||
mQINBF7rzwEBEADBg87uJhnC3Ls7s60hbHGaywGrPtbz2WuYA/ev3YS3X7WS75p8
|
||||
PGlzTWUCujx0pEHbK2vYfExl3zksZ8ZmLyZ9VB3oSLiWBzJgKAeB7YCFEo8te+eE
|
||||
P2Z+8c+kX4eOV+2waxZyewA2TipSkhWgStSI4Ow8SyVUcUWA3hCw7mo2duNVi7KO
|
||||
|
||||
11
README.md
11
README.md
@@ -1,7 +1,14 @@
|
||||
## Security Onion 2.3.0
|
||||
## Security Onion 2.3.21
|
||||
|
||||
Security Onion 2.3.0 is here!
|
||||
Security Onion 2.3.21 is here!
|
||||
|
||||
## Screenshots
|
||||
|
||||
Alerts
|
||||

|
||||
|
||||
Hunt
|
||||

|
||||
|
||||
### Release Notes
|
||||
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
### 2.3.0 ISO image built on 2020/10/15
|
||||
### 2.3.21 ISO image built on 2020/12/21
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.3.0 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.0.iso
|
||||
2.3.21 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.3.21.iso
|
||||
|
||||
MD5: E05B220E4FD7C054DF5C50906EE1375B
|
||||
SHA1: 55E93C6EAB140AB4A0F07873CC871EBFDC699CD6
|
||||
SHA256: 57B96A6E0951143E123BFC0CD0404F7466776E69F3C115F5A0444C0C6D5A6E32
|
||||
MD5: 7B8BC5B241B7220C011215BCE852FF78
|
||||
SHA1: 541C9689D8F8E8D3F25E169ED34A3F683851975B
|
||||
SHA256: 7647FD67BA6AC85CCB1308789FFF7DAB19A841621FDA9AE41B89A0A79618F068
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.0.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.21.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS
|
||||
@@ -24,22 +24,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.0.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.21.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.0.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.3.21.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.3.0.iso.sig securityonion-2.3.0.iso
|
||||
gpg --verify securityonion-2.3.21.iso.sig securityonion-2.3.21.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Thu 15 Oct 2020 08:06:28 PM EDT using RSA key ID FE507013
|
||||
gpg: Signature made Mon 21 Dec 2020 06:27:53 PM EST using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
@@ -54,7 +54,8 @@ if [ $TYPE == 'evaltab' ] || [ $TYPE == 'standalonetab' ]; then
|
||||
salt-call state.apply utility queue=True
|
||||
fi
|
||||
fi
|
||||
#if [ $TYPE == 'nodestab' ]; then
|
||||
if [ $TYPE == 'nodestab' ]; then
|
||||
salt-call state.apply elasticsearch queue=True
|
||||
# echo " nodetype: $NODETYPE" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||
# echo " hotname: $HOTNAME" >> $local_salt_dir/pillar/data/$TYPE.sls
|
||||
#fi
|
||||
fi
|
||||
|
||||
@@ -1,65 +0,0 @@
|
||||
firewall:
|
||||
analyst:
|
||||
ports:
|
||||
tcp:
|
||||
- 80
|
||||
- 443
|
||||
udp:
|
||||
beats_endpoint:
|
||||
ports:
|
||||
tcp:
|
||||
- 5044
|
||||
forward_nodes:
|
||||
ports:
|
||||
tcp:
|
||||
- 443
|
||||
- 5044
|
||||
- 5644
|
||||
- 9822
|
||||
udp:
|
||||
manager:
|
||||
ports:
|
||||
tcp:
|
||||
- 1514
|
||||
- 3200
|
||||
- 3306
|
||||
- 4200
|
||||
- 5601
|
||||
- 6379
|
||||
- 7788
|
||||
- 8086
|
||||
- 8090
|
||||
- 9001
|
||||
- 9200
|
||||
- 9300
|
||||
- 9400
|
||||
- 9500
|
||||
- 9595
|
||||
- 9696
|
||||
udp:
|
||||
- 1514
|
||||
minions:
|
||||
ports:
|
||||
tcp:
|
||||
- 3142
|
||||
- 4505
|
||||
- 4506
|
||||
- 5000
|
||||
- 8080
|
||||
- 8086
|
||||
- 55000
|
||||
osquery_endpoint:
|
||||
ports:
|
||||
tcp:
|
||||
- 8090
|
||||
search_nodes:
|
||||
ports:
|
||||
tcp:
|
||||
- 6379
|
||||
- 9300
|
||||
wazuh_endpoint:
|
||||
ports:
|
||||
tcp:
|
||||
- 1514
|
||||
udp:
|
||||
-1514
|
||||
@@ -3,7 +3,7 @@ base:
|
||||
- patch.needs_restarting
|
||||
- logrotate
|
||||
|
||||
'*_eval or *_helix or *_heavynode or *_sensor or *_standalone or *_import':
|
||||
'*_eval or *_helixsensor or *_heavynode or *_sensor or *_standalone or *_import':
|
||||
- match: compound
|
||||
- zeek
|
||||
|
||||
@@ -62,7 +62,7 @@ base:
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
|
||||
'*_helix':
|
||||
'*_helixsensor':
|
||||
- fireeye
|
||||
- zeeklogs
|
||||
- logstash
|
||||
@@ -82,6 +82,7 @@ base:
|
||||
- elasticsearch.search
|
||||
- global
|
||||
- minions.{{ grains.id }}
|
||||
- data.nodestab
|
||||
|
||||
'*_import':
|
||||
- zeeklogs
|
||||
|
||||
@@ -1,4 +1,64 @@
|
||||
#!py
|
||||
|
||||
import logging
|
||||
|
||||
def status():
|
||||
return __salt__['cmd.run']('/usr/sbin/so-status')
|
||||
return __salt__['cmd.run']('/usr/sbin/so-status')
|
||||
|
||||
|
||||
def mysql_conn(retry):
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
from time import sleep
|
||||
|
||||
try:
|
||||
from MySQLdb import _mysql
|
||||
except ImportError as e:
|
||||
log.error(e)
|
||||
return False
|
||||
|
||||
mainint = __salt__['pillar.get']('host:mainint')
|
||||
ip_arr = __salt__['grains.get']('ip4_interfaces').get(mainint)
|
||||
|
||||
mysql_up = False
|
||||
|
||||
if len(ip_arr) == 1:
|
||||
mainip = ip_arr[0]
|
||||
|
||||
if not(retry >= 1):
|
||||
log.debug('`retry` set to value below 1, resetting it to 1 to prevent errors.')
|
||||
retry = 1
|
||||
|
||||
for i in range(0, retry):
|
||||
log.debug(f'Connection attempt {i+1}')
|
||||
try:
|
||||
db = _mysql.connect(
|
||||
host=mainip,
|
||||
user='root',
|
||||
passwd=__salt__['pillar.get']('secrets:mysql')
|
||||
)
|
||||
log.debug(f'Connected to MySQL server on {mainip} after {i+1} attempts.')
|
||||
|
||||
db.query("""SELECT 1;""")
|
||||
log.debug(f'Successfully completed query against MySQL server on {mainip}')
|
||||
|
||||
db.close()
|
||||
mysql_up = True
|
||||
break
|
||||
except _mysql.OperationalError as e:
|
||||
log.debug(e)
|
||||
except Exception as e:
|
||||
log.error('Unexpected error occured.')
|
||||
log.error(e)
|
||||
break
|
||||
sleep(1)
|
||||
|
||||
if not mysql_up:
|
||||
log.error(f'Could not connect to MySQL server on {mainip} after {retry} attempts.')
|
||||
else:
|
||||
log.error(f'Main interface {mainint} has more than one IP address assigned to it, which is not supported.')
|
||||
log.debug(f'{mainint}:')
|
||||
for addr in ip_arr:
|
||||
log.debug(f' - {addr}')
|
||||
|
||||
return mysql_up
|
||||
12
salt/common/files/daemon.json
Normal file
12
salt/common/files/daemon.json
Normal file
@@ -0,0 +1,12 @@
|
||||
{%- set DOCKERRANGE = salt['pillar.get']('docker:range', '172.17.0.0/24') %}
|
||||
{%- set DOCKERBIND = salt['pillar.get']('docker:bip', '172.17.0.1/24') %}
|
||||
{
|
||||
"registry-mirrors": [ "https://:5000" ],
|
||||
"bip": "{{ DOCKERBIND }}",
|
||||
"default-address-pools": [
|
||||
{
|
||||
"base" : "{{ DOCKERRANGE }}",
|
||||
"size" : 24
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -18,6 +18,7 @@
|
||||
/opt/so/log/filebeat/*.log
|
||||
/opt/so/log/telegraf/*.log
|
||||
/opt/so/log/redis/*.log
|
||||
/opt/so/log/salt/so-salt-minion-check
|
||||
{
|
||||
{{ logrotate_conf | indent(width=4) }}
|
||||
}
|
||||
|
||||
@@ -32,6 +32,18 @@ soconfperms:
|
||||
- gid: 939
|
||||
- dir_mode: 770
|
||||
|
||||
sostatusconf:
|
||||
file.directory:
|
||||
- name: /opt/so/conf/so-status
|
||||
- uid: 939
|
||||
- gid: 939
|
||||
- dir_mode: 770
|
||||
|
||||
so-status.conf:
|
||||
file.touch:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- unless: ls /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
sosaltstackperms:
|
||||
file.directory:
|
||||
- name: /opt/so/saltstack
|
||||
@@ -99,7 +111,7 @@ heldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.2.13-2
|
||||
- docker-ce: 5:19.03.12~3-0~ubuntu-bionic
|
||||
- docker-ce: 5:19.03.14~3-0~ubuntu-bionic
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
|
||||
@@ -135,7 +147,7 @@ heldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.2.13-3.2.el7
|
||||
- docker-ce: 3:19.03.12-3.el7
|
||||
- docker-ce: 3:19.03.14-3.el7
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% endif %}
|
||||
@@ -158,8 +170,8 @@ Etc/UTC:
|
||||
utilsyncscripts:
|
||||
file.recurse:
|
||||
- name: /usr/sbin
|
||||
- user: 0
|
||||
- group: 0
|
||||
- user: root
|
||||
- group: root
|
||||
- file_mode: 755
|
||||
- template: jinja
|
||||
- source: salt://common/tools/sbin
|
||||
@@ -232,10 +244,19 @@ commonlogrotateconf:
|
||||
- dayweek: '*'
|
||||
{% endif %}
|
||||
|
||||
# Manager daemon.json
|
||||
docker_daemon:
|
||||
file.managed:
|
||||
- source: salt://common/files/daemon.json
|
||||
- name: /etc/docker/daemon.json
|
||||
- template: jinja
|
||||
|
||||
# Make sure Docker is always running
|
||||
docker:
|
||||
service.running:
|
||||
- enable: True
|
||||
- watch:
|
||||
- file: docker_daemon
|
||||
|
||||
{% else %}
|
||||
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-domainstats'
|
||||
]
|
||||
} %}
|
||||
@@ -1,20 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-filebeat',
|
||||
'so-nginx',
|
||||
'so-telegraf',
|
||||
'so-dockerregistry',
|
||||
'so-soc',
|
||||
'so-kratos',
|
||||
'so-idstools',
|
||||
'so-elasticsearch',
|
||||
'so-kibana',
|
||||
'so-steno',
|
||||
'so-suricata',
|
||||
'so-zeek',
|
||||
'so-curator',
|
||||
'so-elastalert',
|
||||
'so-soctopus',
|
||||
'so-sensoroni'
|
||||
]
|
||||
} %}
|
||||
@@ -1,10 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-mysql',
|
||||
'so-fleet',
|
||||
'so-redis',
|
||||
'so-filebeat',
|
||||
'so-nginx',
|
||||
'so-telegraf'
|
||||
]
|
||||
} %}
|
||||
@@ -1,7 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-mysql',
|
||||
'so-fleet',
|
||||
'so-redis'
|
||||
]
|
||||
} %}
|
||||
@@ -1,5 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-freqserver'
|
||||
]
|
||||
} %}
|
||||
@@ -1,6 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-influxdb',
|
||||
'so-grafana'
|
||||
]
|
||||
} %}
|
||||
@@ -1,15 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-nginx',
|
||||
'so-telegraf',
|
||||
'so-redis',
|
||||
'so-logstash',
|
||||
'so-elasticsearch',
|
||||
'so-curator',
|
||||
'so-steno',
|
||||
'so-suricata',
|
||||
'so-wazuh',
|
||||
'so-filebeat',
|
||||
'so-sensoroni'
|
||||
]
|
||||
} %}
|
||||
@@ -1,12 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-nginx',
|
||||
'so-telegraf',
|
||||
'so-idstools',
|
||||
'so-steno',
|
||||
'so-zeek',
|
||||
'so-redis',
|
||||
'so-logstash',
|
||||
'so-filebeat
|
||||
]
|
||||
} %}
|
||||
@@ -1,9 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-nginx',
|
||||
'so-telegraf',
|
||||
'so-logstash',
|
||||
'so-elasticsearch',
|
||||
'so-curator',
|
||||
]
|
||||
} %}
|
||||
@@ -1,10 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-filebeat',
|
||||
'so-nginx',
|
||||
'so-soc',
|
||||
'so-kratos',
|
||||
'so-elasticsearch',
|
||||
'so-kibana'
|
||||
]
|
||||
} %}
|
||||
@@ -1,18 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-dockerregistry',
|
||||
'so-nginx',
|
||||
'so-telegraf',
|
||||
'so-soc',
|
||||
'so-kratos',
|
||||
'so-aptcacherng',
|
||||
'so-idstools',
|
||||
'so-redis',
|
||||
'so-elasticsearch',
|
||||
'so-logstash',
|
||||
'so-kibana',
|
||||
'so-elastalert',
|
||||
'so-filebeat',
|
||||
'so-soctopus'
|
||||
]
|
||||
} %}
|
||||
@@ -1,18 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-nginx',
|
||||
'so-telegraf',
|
||||
'so-soc',
|
||||
'so-kratos',
|
||||
'so-aptcacherng',
|
||||
'so-idstools',
|
||||
'so-redis',
|
||||
'so-logstash',
|
||||
'so-elasticsearch',
|
||||
'so-curator',
|
||||
'so-kibana',
|
||||
'so-elastalert',
|
||||
'so-filebeat',
|
||||
'so-soctopus'
|
||||
]
|
||||
} %}
|
||||
@@ -1,5 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-zeek'
|
||||
]
|
||||
} %}
|
||||
@@ -1,5 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-playbook'
|
||||
]
|
||||
} %}
|
||||
@@ -1,10 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-nginx',
|
||||
'so-telegraf',
|
||||
'so-logstash',
|
||||
'so-elasticsearch',
|
||||
'so-curator',
|
||||
'so-filebeat'
|
||||
]
|
||||
} %}
|
||||
@@ -1,9 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-telegraf',
|
||||
'so-steno',
|
||||
'so-suricata',
|
||||
'so-filebeat',
|
||||
'so-sensoroni'
|
||||
]
|
||||
} %}
|
||||
@@ -1,48 +0,0 @@
|
||||
{% set role = grains.id.split('_') | last %}
|
||||
{% from 'common/maps/'~ role ~'.map.jinja' import docker with context %}
|
||||
|
||||
# Check if the service is enabled and append it's required containers
|
||||
# to the list predefined by the role / minion id affix
|
||||
{% macro append_containers(pillar_name, k, compare )%}
|
||||
{% if salt['pillar.get'](pillar_name~':'~k, {}) != compare %}
|
||||
{% if k == 'enabled' %}
|
||||
{% set k = pillar_name %}
|
||||
{% endif %}
|
||||
{% from 'common/maps/'~k~'.map.jinja' import docker as d with context %}
|
||||
{% for li in d['containers'] %}
|
||||
{{ docker['containers'].append(li) }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% endmacro %}
|
||||
|
||||
{% set docker = salt['grains.filter_by']({
|
||||
'*_'~role: {
|
||||
'containers': docker['containers']
|
||||
}
|
||||
},grain='id', merge=salt['pillar.get']('docker')) %}
|
||||
|
||||
{% if role in ['eval', 'managersearch', 'manager', 'standalone'] %}
|
||||
{{ append_containers('manager', 'grafana', 0) }}
|
||||
{{ append_containers('global', 'fleet_manager', 0) }}
|
||||
{{ append_containers('global', 'wazuh', 0) }}
|
||||
{{ append_containers('manager', 'thehive', 0) }}
|
||||
{{ append_containers('manager', 'playbook', 0) }}
|
||||
{{ append_containers('manager', 'freq', 0) }}
|
||||
{{ append_containers('manager', 'domainstats', 0) }}
|
||||
{% endif %}
|
||||
|
||||
{% if role in ['eval', 'heavynode', 'sensor', 'standalone'] %}
|
||||
{{ append_containers('strelka', 'enabled', 0) }}
|
||||
{% endif %}
|
||||
|
||||
{% if role in ['heavynode', 'standalone'] %}
|
||||
{{ append_containers('global', 'mdengine', 'SURICATA') }}
|
||||
{% endif %}
|
||||
|
||||
{% if role == 'searchnode' %}
|
||||
{{ append_containers('manager', 'wazuh', 0) }}
|
||||
{% endif %}
|
||||
|
||||
{% if role == 'sensor' %}
|
||||
{{ append_containers('global', 'mdengine', 'SURICATA') }}
|
||||
{% endif %}
|
||||
@@ -1,22 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-nginx',
|
||||
'so-telegraf',
|
||||
'so-soc',
|
||||
'so-kratos',
|
||||
'so-aptcacherng',
|
||||
'so-idstools',
|
||||
'so-redis',
|
||||
'so-logstash',
|
||||
'so-elasticsearch',
|
||||
'so-curator',
|
||||
'so-kibana',
|
||||
'so-elastalert',
|
||||
'so-filebeat',
|
||||
'so-suricata',
|
||||
'so-steno',
|
||||
'so-dockerregistry',
|
||||
'so-soctopus',
|
||||
'so-sensoroni'
|
||||
]
|
||||
} %}
|
||||
@@ -1,9 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-strelka-coordinator',
|
||||
'so-strelka-gatekeeper',
|
||||
'so-strelka-manager',
|
||||
'so-strelka-frontend',
|
||||
'so-strelka-filestream'
|
||||
]
|
||||
} %}
|
||||
@@ -1,7 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-thehive',
|
||||
'so-thehive-es',
|
||||
'so-cortex'
|
||||
]
|
||||
} %}
|
||||
@@ -1,7 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-nginx',
|
||||
'so-telegraf',
|
||||
'so-elasticsearch'
|
||||
]
|
||||
} %}
|
||||
@@ -1,5 +0,0 @@
|
||||
{% set docker = {
|
||||
'containers': [
|
||||
'so-wazuh'
|
||||
]
|
||||
} %}
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ ! -f /opt/so/state/dockernet.state ]; then
|
||||
docker network create -d bridge so-elastic-net
|
||||
touch /opt/so/state/dockernet.state
|
||||
else
|
||||
exit
|
||||
fi
|
||||
@@ -84,7 +84,7 @@ while [[ $INSTALL != "yes" ]] && [[ $INSTALL != "no" ]]; do
|
||||
echo "## ##"
|
||||
echo "## Installing the Security Onion ##"
|
||||
echo "## analyst node on this device will ##"
|
||||
echo "## make permanenet changes to ##"
|
||||
echo "## make permanent changes to ##"
|
||||
echo "## the system. ##"
|
||||
echo "## ##"
|
||||
echo "###########################################"
|
||||
|
||||
@@ -15,12 +15,10 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
IMAGEREPO=securityonion
|
||||
|
||||
# Check for prerequisites
|
||||
if [ "$(id -u)" -ne 0 ]; then
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
echo "This script must be run using sudo!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Define a banner to separate sections
|
||||
@@ -31,14 +29,43 @@ header() {
|
||||
printf '%s\n' "$banner" "$*" "$banner"
|
||||
}
|
||||
|
||||
lookup_salt_value() {
|
||||
key=$1
|
||||
group=$2
|
||||
kind=$3
|
||||
|
||||
if [ -z "$kind" ]; then
|
||||
kind=pillar
|
||||
fi
|
||||
|
||||
if [ -n "$group" ]; then
|
||||
group=${group}:
|
||||
fi
|
||||
|
||||
salt-call --no-color ${kind}.get ${group}${key} --out=newline_values_only
|
||||
}
|
||||
|
||||
lookup_pillar() {
|
||||
key=$1
|
||||
salt-call --no-color pillar.get global:${key} --out=newline_values_only
|
||||
key=$1
|
||||
pillar=$2
|
||||
if [ -z "$pillar" ]; then
|
||||
pillar=global
|
||||
fi
|
||||
lookup_salt_value "$key" "$pillar" "pillar"
|
||||
}
|
||||
|
||||
lookup_pillar_secret() {
|
||||
key=$1
|
||||
salt-call --no-color pillar.get secrets:${key} --out=newline_values_only
|
||||
lookup_pillar "$1" "secrets"
|
||||
}
|
||||
|
||||
lookup_grain() {
|
||||
lookup_salt_value "$1" "" "grains"
|
||||
}
|
||||
|
||||
lookup_role() {
|
||||
id=$(lookup_grain id)
|
||||
pieces=($(echo $id | tr '_' ' '))
|
||||
echo ${pieces[1]}
|
||||
}
|
||||
|
||||
check_container() {
|
||||
@@ -47,7 +74,69 @@ check_container() {
|
||||
}
|
||||
|
||||
check_password() {
|
||||
local password=$1
|
||||
echo "$password" | egrep -v "'|\"|\\\\" > /dev/null 2>&1
|
||||
return $?
|
||||
local password=$1
|
||||
echo "$password" | egrep -v "'|\"|\\$|\\\\" > /dev/null 2>&1
|
||||
return $?
|
||||
}
|
||||
|
||||
set_os() {
|
||||
if [ -f /etc/redhat-release ]; then
|
||||
OS=centos
|
||||
else
|
||||
OS=ubuntu
|
||||
fi
|
||||
}
|
||||
|
||||
set_minionid() {
|
||||
MINIONID=$(lookup_grain id)
|
||||
}
|
||||
|
||||
set_version() {
|
||||
CURRENTVERSION=0.0.0
|
||||
if [ -f /etc/soversion ]; then
|
||||
CURRENTVERSION=$(cat /etc/soversion)
|
||||
fi
|
||||
if [ -z "$VERSION" ]; then
|
||||
if [ -z "$NEWVERSION" ]; then
|
||||
if [ "$CURRENTVERSION" == "0.0.0" ]; then
|
||||
echo "ERROR: Unable to detect Security Onion version; terminating script."
|
||||
exit 1
|
||||
else
|
||||
VERSION=$CURRENTVERSION
|
||||
fi
|
||||
else
|
||||
VERSION="$NEWVERSION"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
require_manager() {
|
||||
# Check to see if this is a manager
|
||||
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
|
||||
if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ] || [ $MANAGERCHECK == 'so-import' ]; then
|
||||
echo "This is a manager, We can proceed."
|
||||
else
|
||||
echo "Please run this command on the manager; the manager controls the grid."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
is_single_node_grid() {
|
||||
role=$(lookup_role)
|
||||
if [ "$role" != "eval" ] && [ "$role" != "standalone" ] && [ "$role" != "import" ]; then
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
fail() {
|
||||
msg=$1
|
||||
echo "ERROR: $msg"
|
||||
echo "Exiting."
|
||||
exit 1
|
||||
}
|
||||
|
||||
get_random_value() {
|
||||
length=${1:-20}
|
||||
head -c 5000 /dev/urandom | tr -dc 'a-zA-Z0-9' | fold -w $length | head -n 1
|
||||
}
|
||||
@@ -31,7 +31,7 @@ fi
|
||||
USER=$1
|
||||
|
||||
CORTEX_KEY=$(lookup_pillar cortexkey)
|
||||
CORTEX_IP=$(lookup_pillar managerip)
|
||||
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
|
||||
CORTEX_ORG_NAME=$(lookup_pillar cortexorgname)
|
||||
CORTEX_USER=$USER
|
||||
|
||||
@@ -43,7 +43,7 @@ fi
|
||||
read -rs CORTEX_PASS
|
||||
|
||||
# Create new user in Cortex
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }")
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }")
|
||||
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully added user to Cortex."
|
||||
else
|
||||
|
||||
@@ -31,7 +31,7 @@ fi
|
||||
USER=$1
|
||||
|
||||
CORTEX_KEY=$(lookup_pillar cortexkey)
|
||||
CORTEX_IP=$(lookup_pillar managerip)
|
||||
CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api"
|
||||
CORTEX_USER=$USER
|
||||
|
||||
case "${2^^}" in
|
||||
@@ -46,7 +46,7 @@ case "${2^^}" in
|
||||
;;
|
||||
esac
|
||||
|
||||
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" "https://$CORTEX_IP/cortex/api/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }")
|
||||
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }")
|
||||
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully updated user in Cortex."
|
||||
else
|
||||
|
||||
@@ -16,96 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
. /usr/sbin/so-image-common
|
||||
|
||||
manager_check() {
|
||||
# Check to see if this is a manager
|
||||
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
|
||||
if [ $MANAGERCHECK == 'so-eval' ] || [ $MANAGERCHECK == 'so-manager' ] || [ $MANAGERCHECK == 'so-managersearch' ] || [ $MANAGERCHECK == 'so-standalone' ] || [ $MANAGERCHECK == 'so-helix' ]; then
|
||||
echo "This is a manager. We can proceed"
|
||||
else
|
||||
echo "Please run soup on the manager. The manager controls all updates."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
update_docker_containers() {
|
||||
|
||||
# Download the containers from the interwebs
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
# Pull down the trusted docker image
|
||||
echo "Downloading $i"
|
||||
docker pull --disable-content-trust=false docker.io/$IMAGEREPO/$i
|
||||
# Tag it with the new registry destination
|
||||
docker tag $IMAGEREPO/$i $HOSTNAME:5000/$IMAGEREPO/$i
|
||||
docker push $HOSTNAME:5000/$IMAGEREPO/$i
|
||||
done
|
||||
|
||||
}
|
||||
|
||||
version_check() {
|
||||
if [ -f /etc/soversion ]; then
|
||||
VERSION=$(cat /etc/soversion)
|
||||
else
|
||||
echo "Unable to detect version. I will now terminate."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
manager_check
|
||||
version_check
|
||||
|
||||
# Use the hostname
|
||||
HOSTNAME=$(hostname)
|
||||
# List all the containers
|
||||
if [ $MANAGERCHECK != 'so-helix' ]; then
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-acng:$VERSION" \
|
||||
"so-thehive-cortex:$VERSION" \
|
||||
"so-curator:$VERSION" \
|
||||
"so-domainstats:$VERSION" \
|
||||
"so-elastalert:$VERSION" \
|
||||
"so-elasticsearch:$VERSION" \
|
||||
"so-filebeat:$VERSION" \
|
||||
"so-fleet:$VERSION" \
|
||||
"so-fleet-launcher:$VERSION" \
|
||||
"so-freqserver:$VERSION" \
|
||||
"so-grafana:$VERSION" \
|
||||
"so-idstools:$VERSION" \
|
||||
"so-influxdb:$VERSION" \
|
||||
"so-kibana:$VERSION" \
|
||||
"so-kratos:$VERSION" \
|
||||
"so-logstash:$VERSION" \
|
||||
"so-minio:$VERSION" \
|
||||
"so-mysql:$VERSION" \
|
||||
"so-nginx:$VERSION" \
|
||||
"so-pcaptools:$VERSION" \
|
||||
"so-playbook:$VERSION" \
|
||||
"so-redis:$VERSION" \
|
||||
"so-soc:$VERSION" \
|
||||
"so-soctopus:$VERSION" \
|
||||
"so-steno:$VERSION" \
|
||||
"so-strelka-frontend:$VERSION" \
|
||||
"so-strelka-manager:$VERSION" \
|
||||
"so-strelka-backend:$VERSION" \
|
||||
"so-strelka-filestream:$VERSION" \
|
||||
"so-suricata:$VERSION" \
|
||||
"so-telegraf:$VERSION" \
|
||||
"so-thehive:$VERSION" \
|
||||
"so-thehive-es:$VERSION" \
|
||||
"so-wazuh:$VERSION" \
|
||||
"so-zeek:$VERSION" )
|
||||
else
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-filebeat:$VERSION" \
|
||||
"so-idstools:$VERSION" \
|
||||
"so-logstash:$VERSION" \
|
||||
"so-nginx:$VERSION" \
|
||||
"so-redis:$VERSION" \
|
||||
"so-steno:$VERSION" \
|
||||
"so-suricata:$VERSION" \
|
||||
"so-telegraf:$VERSION" \
|
||||
"so-zeek:$VERSION" )
|
||||
fi
|
||||
|
||||
update_docker_containers
|
||||
require_manager
|
||||
update_docker_containers "refresh"
|
||||
|
||||
@@ -19,8 +19,7 @@
|
||||
#
|
||||
# Purpose: This script will allow you to test your elastalert rule without entering the Docker container.
|
||||
|
||||
. /usr/sbin/so-elastic-common
|
||||
|
||||
HOST_RULE_DIR=/opt/so/rules/elastalert
|
||||
OPTIONS=""
|
||||
SKIP=0
|
||||
RESULTS_TO_LOG="n"
|
||||
@@ -29,114 +28,109 @@ FILE_SAVE_LOCATION=""
|
||||
|
||||
usage()
|
||||
{
|
||||
cat <<EOF
|
||||
cat <<EOF
|
||||
|
||||
Test Elastalert Rule
|
||||
Options:
|
||||
-h This message
|
||||
-a Trigger real alerts instead of the debug alert
|
||||
-l <path_to_file> Write results to specified log file
|
||||
-o '<options>' Specify Elastalert options ( Ex. --schema-only , --count-only, --days N )
|
||||
-r <rule_name> Specify path/name of rule to test
|
||||
-h This message
|
||||
-a Trigger real alerts instead of the debug alert
|
||||
-l <path_to_file> Write results to specified log file
|
||||
-o '<options>' Specify Elastalert options ( Ex. --schema-only , --count-only, --days N )
|
||||
-r <rule_name> Specify filename of rule to test (must exist in $HOST_RULE_DIR; do not include path)
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
while getopts "hal:o:r:" OPTION
|
||||
do
|
||||
case $OPTION in
|
||||
h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
a)
|
||||
OPTIONS="--alert"
|
||||
;;
|
||||
l)
|
||||
RESULTS_TO_LOG="y"
|
||||
FILE_SAVE_LOCATION=$OPTARG
|
||||
;;
|
||||
|
||||
o)
|
||||
OPTIONS=$OPTARG
|
||||
;;
|
||||
|
||||
r)
|
||||
RULE_NAME=$OPTARG
|
||||
SKIP=1
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
case $OPTION in
|
||||
h)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
a)
|
||||
OPTIONS="--alert"
|
||||
;;
|
||||
l)
|
||||
RESULTS_TO_LOG="y"
|
||||
FILE_SAVE_LOCATION=$OPTARG
|
||||
;;
|
||||
o)
|
||||
OPTIONS=$OPTARG
|
||||
;;
|
||||
r)
|
||||
RULE_NAME=$OPTARG
|
||||
SKIP=1
|
||||
;;
|
||||
*)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
docker_exec(){
|
||||
if [ ${RESULTS_TO_LOG,,} = "y" ] ; then
|
||||
docker exec -it so-elastalert bash -c "elastalert-test-rule $RULE_NAME $OPTIONS" > $FILE_SAVE_LOCATION
|
||||
CMD="docker exec -it so-elastalert elastalert-test-rule /opt/elastalert/rules/$RULE_NAME --config /opt/config/elastalert_config.yaml $OPTIONS"
|
||||
if [ "${RESULTS_TO_LOG,,}" = "y" ] ; then
|
||||
$CMD > "$FILE_SAVE_LOCATION"
|
||||
else
|
||||
docker exec -it so-elastalert bash -c "elastalert-test-rule $RULE_NAME $OPTIONS"
|
||||
$CMD
|
||||
fi
|
||||
}
|
||||
|
||||
rule_prompt(){
|
||||
CURRENT_RULES=$(find /opt/so/rules/elastalert -name "*.yaml")
|
||||
echo
|
||||
echo "This script will allow you to test an Elastalert rule."
|
||||
echo
|
||||
echo "Below is a list of active Elastalert rules:"
|
||||
echo
|
||||
CURRENT_RULES=$(cd "$HOST_RULE_DIR" && find . -type f \( -name "*.yaml" -o -name "*.yml" \) | sed -e 's/^\.\///')
|
||||
if [ -z "$CURRENT_RULES" ]; then
|
||||
echo "There are no rules available to test. Rule files must be placed in the $HOST_RULE_DIR directory."
|
||||
exit 1
|
||||
fi
|
||||
echo
|
||||
echo "This script will allow you to test an Elastalert rule."
|
||||
echo
|
||||
echo "Below is a list of available Elastalert rules:"
|
||||
echo
|
||||
echo "-----------------------------------"
|
||||
echo
|
||||
echo "$CURRENT_RULES"
|
||||
echo
|
||||
echo
|
||||
echo "$CURRENT_RULES"
|
||||
echo
|
||||
echo "-----------------------------------"
|
||||
echo
|
||||
echo "Note: To test a rule it must be accessible by the Elastalert Docker container."
|
||||
echo
|
||||
echo "Make sure to swap the local path (/opt/so/rules/elastalert/) for the docker path (/etc/elastalert/rules/)"
|
||||
echo "Example: /opt/so/rules/elastalert/nids2hive.yaml would be /etc/elastalert/rules/nids2hive.yaml"
|
||||
echo
|
||||
while [ -z $RULE_NAME ]; do
|
||||
echo "Please enter the file path and rule name you want to test."
|
||||
read -e RULE_NAME
|
||||
echo
|
||||
while [ -z "$RULE_NAME" ]; do
|
||||
read -p "Please enter the rule filename you want to test (filename only, no path): " -e RULE_NAME
|
||||
done
|
||||
}
|
||||
|
||||
log_save_prompt(){
|
||||
RESULTS_TO_LOG=""
|
||||
while [ -z $RESULTS_TO_LOG ]; do
|
||||
echo "The results can be rather long. Would you like to write the results to a file? (Y/N)"
|
||||
read RESULTS_TO_LOG
|
||||
done
|
||||
read -p "The results can be rather long. Would you like to write the results to a file? (y/N) " -e RESULTS_TO_LOG
|
||||
}
|
||||
|
||||
log_path_prompt(){
|
||||
while [ -z $FILE_SAVE_LOCATION ]; do
|
||||
echo "Please enter the file path and file name."
|
||||
read -e FILE_SAVE_LOCATION
|
||||
done
|
||||
while [ -z "$FILE_SAVE_LOCATION" ]; do
|
||||
read -p "Please enter the log file path and file name: " -e FILE_SAVE_LOCATION
|
||||
done
|
||||
echo "Depending on the rule this may take a while."
|
||||
}
|
||||
|
||||
if [ $SKIP -eq 0 ]; then
|
||||
rule_prompt
|
||||
log_save_prompt
|
||||
if [ ${RESULTS_TO_LOG,,} = "y" ] ; then
|
||||
log_path_prompt
|
||||
fi
|
||||
if [ "${RESULTS_TO_LOG,,}" = "y" ] ; then
|
||||
log_path_prompt
|
||||
fi
|
||||
fi
|
||||
|
||||
echo
|
||||
|
||||
docker_exec
|
||||
|
||||
if [ $? -eq 0 ]; then
|
||||
echo "Test completed successfully!"
|
||||
else
|
||||
echo "Something went wrong..."
|
||||
fi
|
||||
RESULT=$?
|
||||
|
||||
echo
|
||||
|
||||
if [ $RESULT -eq 0 ]; then
|
||||
echo "Test completed successfully!"
|
||||
else
|
||||
echo "Test failed."
|
||||
fi
|
||||
|
||||
|
||||
echo
|
||||
@@ -51,9 +51,9 @@ if [ $SKIP -ne 1 ]; then
|
||||
# List indices
|
||||
echo
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -k https://{{ NODEIP }}:9200/_cat/indices?v
|
||||
curl -k -L https://{{ NODEIP }}:9200/_cat/indices?v
|
||||
{% else %}
|
||||
curl {{ NODEIP }}:9200/_cat/indices?v
|
||||
curl -L {{ NODEIP }}:9200/_cat/indices?v
|
||||
{% endif %}
|
||||
echo
|
||||
# Inform user we are about to delete all data
|
||||
@@ -94,16 +94,16 @@ fi
|
||||
echo "Deleting data..."
|
||||
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
INDXS=$(curl -s -XGET -k https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
INDXS=$(curl -s -XGET -k -L https://{{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
{% else %}
|
||||
INDXS=$(curl -s -XGET {{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
INDXS=$(curl -s -XGET -L {{ NODEIP }}:9200/_cat/indices?v | egrep 'logstash|elastalert|so-' | awk '{ print $3 }')
|
||||
{% endif %}
|
||||
for INDX in ${INDXS}
|
||||
do
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -XDELETE -k https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
curl -XDELETE -k -L https://"{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
{% else %}
|
||||
curl -XDELETE "{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
curl -XDELETE -L "{{ NODEIP }}:9200/${INDX}" > /dev/null 2>&1
|
||||
{% endif %}
|
||||
done
|
||||
|
||||
|
||||
43
salt/common/tools/sbin/so-elastic-restart
Executable file
43
salt/common/tools/sbin/so-elastic-restart
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
|
||||
/usr/sbin/so-restart elasticsearch $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
|
||||
/usr/sbin/so-restart kibana $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||
/usr/sbin/so-restart logstash $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
|
||||
/usr/sbin/so-restart filebeat $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||
/usr/sbin/so-restart curator $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
|
||||
/usr/sbin/so-restart elastalert $1
|
||||
{%- endif %}
|
||||
43
salt/common/tools/sbin/so-elastic-start
Executable file
43
salt/common/tools/sbin/so-elastic-start
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
|
||||
/usr/sbin/so-start elasticsearch $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
|
||||
/usr/sbin/so-start kibana $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||
/usr/sbin/so-start logstash $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
|
||||
/usr/sbin/so-start filebeat $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||
/usr/sbin/so-start curator $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
|
||||
/usr/sbin/so-start elastalert $1
|
||||
{%- endif %}
|
||||
43
salt/common/tools/sbin/so-elastic-stop
Executable file
43
salt/common/tools/sbin/so-elastic-stop
Executable file
@@ -0,0 +1,43 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-import']%}
|
||||
/usr/sbin/so-stop elasticsearch $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-manager', 'so-managersearch', 'so-standalone', 'so-import']%}
|
||||
/usr/sbin/so-stop kibana $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||
/usr/sbin/so-stop logstash $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node', 'so-sensor']%}
|
||||
/usr/sbin/so-stop filebeat $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-heavynode', 'so-node']%}
|
||||
/usr/sbin/so-stop curator $1
|
||||
{%- endif %}
|
||||
|
||||
{%- if grains['role'] in ['so-eval','so-manager', 'so-managersearch', 'so-standalone']%}
|
||||
/usr/sbin/so-stop elastalert $1
|
||||
{%- endif %}
|
||||
@@ -22,5 +22,5 @@ THEHIVEESPORT=9400
|
||||
echo "Removing read only attributes for indices..."
|
||||
echo
|
||||
for p in $ESPORT $THEHIVEESPORT; do
|
||||
curl -XPUT -H "Content-Type: application/json" http://$IP:$p/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
curl -XPUT -H "Content-Type: application/json" -L http://$IP:$p/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi;
|
||||
done
|
||||
|
||||
@@ -20,14 +20,14 @@
|
||||
|
||||
if [ "$1" == "" ]; then
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
curl -s -L {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines"
|
||||
{% endif %}
|
||||
else
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
curl -s -L {{ NODEIP }}:9200/_nodes/stats | jq .nodes | jq ".[] | .ingest.pipelines.\"$1\""
|
||||
{% endif %}
|
||||
fi
|
||||
|
||||
@@ -18,14 +18,14 @@
|
||||
. /usr/sbin/so-common
|
||||
if [ "$1" == "" ]; then
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
curl -s -L {{ NODEIP }}:9200/_ingest/pipeline/* | jq 'keys'
|
||||
{% endif %}
|
||||
else
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
curl -s -L {{ NODEIP }}:9200/_ingest/pipeline/$1 | jq
|
||||
{% endif %}
|
||||
fi
|
||||
|
||||
@@ -18,14 +18,14 @@
|
||||
. /usr/sbin/so-common
|
||||
if [ "$1" == "" ]; then
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
curl -s -L {{ NODEIP }}:9200/_template/* | jq 'keys'
|
||||
{% endif %}
|
||||
else
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ NODEIP }}:9200/_template/$1 | jq
|
||||
curl -s -k -L https://{{ NODEIP }}:9200/_template/$1 | jq
|
||||
{% else %}
|
||||
curl -s {{ NODEIP }}:9200/_template/$1 | jq
|
||||
curl -s -L {{ NODEIP }}:9200/_template/$1 | jq
|
||||
{% endif %}
|
||||
fi
|
||||
|
||||
@@ -31,9 +31,9 @@ COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -k --output /dev/null --silent --head --fail https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
curl -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{% else %}
|
||||
curl --output /dev/null --silent --head --fail http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
curl --output /dev/null --silent --head --fail -L http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{% endif %}
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
@@ -56,9 +56,9 @@ cd ${ELASTICSEARCH_TEMPLATES}
|
||||
|
||||
echo "Loading templates..."
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
{% else %}
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl ${ELASTICSEARCH_AUTH} -s -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; curl ${ELASTICSEARCH_AUTH} -s -XPUT -L http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done
|
||||
{% endif %}
|
||||
echo
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
. /usr/sbin/so-image-common
|
||||
local_salt_dir=/opt/so/saltstack/local
|
||||
|
||||
cat << EOF
|
||||
@@ -39,34 +40,14 @@ fi
|
||||
|
||||
echo "Please wait while switching to Elastic Features."
|
||||
|
||||
manager_check() {
|
||||
# Check to see if this is a manager
|
||||
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
|
||||
if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch')$ ]]; then
|
||||
echo "This is a manager. We can proceed"
|
||||
else
|
||||
echo "Please run so-features-enable on the manager."
|
||||
exit 0
|
||||
fi
|
||||
}
|
||||
require_manager
|
||||
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-elasticsearch" \
|
||||
"so-filebeat" \
|
||||
"so-kibana" \
|
||||
"so-logstash" )
|
||||
update_docker_containers "features" "-features"
|
||||
|
||||
manager_check
|
||||
VERSION=$(grep soversion $local_salt_dir/pillar/global.sls | cut -d':' -f2|sed 's/ //g')
|
||||
# Modify global.sls to enable Features
|
||||
sed -i 's/features: False/features: True/' $local_salt_dir/pillar/global.sls
|
||||
SUFFIX="-features"
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-elasticsearch:$VERSION$SUFFIX" \
|
||||
"so-filebeat:$VERSION$SUFFIX" \
|
||||
"so-kibana:$VERSION$SUFFIX" \
|
||||
"so-logstash:$VERSION$SUFFIX" )
|
||||
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
# Pull down the trusted docker image
|
||||
echo "Downloading $i"
|
||||
docker pull --disable-content-trust=false docker.io/$IMAGEREPO/$i
|
||||
# Tag it with the new registry destination
|
||||
docker tag $IMAGEREPO/$i $HOSTNAME:5000/$IMAGEREPO/$i
|
||||
docker push $HOSTNAME:5000/$IMAGEREPO/$i
|
||||
done
|
||||
|
||||
@@ -116,7 +116,7 @@ def addhostgroup(args):
|
||||
print('Missing host group name argument', file=sys.stderr)
|
||||
showUsage(args)
|
||||
|
||||
name = args[1]
|
||||
name = args[0]
|
||||
content = loadYaml(hostgroupsFilename)
|
||||
if name in content['firewall']['hostgroups']:
|
||||
print('Already exists', file=sys.stderr)
|
||||
|
||||
@@ -15,8 +15,8 @@ if [ ! "$(docker ps -q -f name=so-fleet)" ]; then
|
||||
salt-call state.apply redis queue=True >> /root/fleet-setup.log
|
||||
fi
|
||||
|
||||
docker exec so-fleet fleetctl config set --address https://localhost:8080 --tls-skip-verify --url-prefix /fleet
|
||||
docker exec -it so-fleet bash -c 'while [[ "$(curl -s -o /dev/null --insecure -w ''%{http_code}'' https://localhost:8080/fleet)" != "301" ]]; do sleep 5; done'
|
||||
docker exec so-fleet fleetctl config set --address https://127.0.0.1:8080 --tls-skip-verify --url-prefix /fleet
|
||||
docker exec -it so-fleet bash -c 'while [[ "$(curl -s -o /dev/null --insecure -w ''%{http_code}'' https://127.0.0.1:8080/fleet)" != "301" ]]; do sleep 5; done'
|
||||
docker exec so-fleet fleetctl setup --email $1 --password $2
|
||||
|
||||
docker exec so-fleet fleetctl apply -f /packs/palantir/Fleet/Endpoints/MacOS/osquery.yaml
|
||||
@@ -26,9 +26,9 @@ docker exec so-fleet /bin/sh -c 'for pack in /packs/palantir/Fleet/Endpoints/pac
|
||||
docker exec so-fleet fleetctl apply -f /packs/osquery-config.conf
|
||||
|
||||
|
||||
# Enable Fleet
|
||||
echo "Enabling Fleet..."
|
||||
salt-call state.apply fleet.event_enable-fleet queue=True >> /root/fleet-setup.log
|
||||
# Update the Enroll Secret
|
||||
echo "Updating the Enroll Secret..."
|
||||
salt-call state.apply fleet.event_update-enroll-secret queue=True >> /root/fleet-setup.log
|
||||
salt-call state.apply nginx queue=True >> /root/fleet-setup.log
|
||||
|
||||
# Generate osquery install packages
|
||||
|
||||
@@ -59,6 +59,6 @@ if [[ $? -eq 0 ]]; then
|
||||
echo "Successfully added user to Fleet"
|
||||
else
|
||||
echo "Unable to add user to Fleet; user might already exist"
|
||||
echo $resp
|
||||
echo "$MYSQL_OUTPUT"
|
||||
exit 2
|
||||
fi
|
||||
181
salt/common/tools/sbin/so-image-common
Executable file
181
salt/common/tools/sbin/so-image-common
Executable file
@@ -0,0 +1,181 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# NOTE: This script depends on so-common
|
||||
IMAGEREPO=securityonion
|
||||
|
||||
container_list() {
|
||||
MANAGERCHECK=$1
|
||||
|
||||
if [ -z "$MANAGERCHECK" ]; then
|
||||
MANAGERCHECK=so-unknown
|
||||
if [ -f /etc/salt/grains ]; then
|
||||
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ $MANAGERCHECK == 'so-import' ]; then
|
||||
TRUSTED_CONTAINERS=(
|
||||
"so-elasticsearch"
|
||||
"so-filebeat"
|
||||
"so-idstools"
|
||||
"so-kibana"
|
||||
"so-kratos"
|
||||
"so-nginx"
|
||||
"so-pcaptools"
|
||||
"so-soc"
|
||||
"so-steno"
|
||||
"so-suricata"
|
||||
"so-zeek"
|
||||
)
|
||||
elif [ $MANAGERCHECK != 'so-helix' ]; then
|
||||
TRUSTED_CONTAINERS=(
|
||||
"so-acng"
|
||||
"so-curator"
|
||||
"so-domainstats"
|
||||
"so-elastalert"
|
||||
"so-elasticsearch"
|
||||
"so-filebeat"
|
||||
"so-fleet"
|
||||
"so-fleet-launcher"
|
||||
"so-freqserver"
|
||||
"so-grafana"
|
||||
"so-idstools"
|
||||
"so-influxdb"
|
||||
"so-kibana"
|
||||
"so-kratos"
|
||||
"so-logstash"
|
||||
"so-minio"
|
||||
"so-mysql"
|
||||
"so-nginx"
|
||||
"so-pcaptools"
|
||||
"so-playbook"
|
||||
"so-redis"
|
||||
"so-soc"
|
||||
"so-soctopus"
|
||||
"so-steno"
|
||||
"so-strelka-backend"
|
||||
"so-strelka-filestream"
|
||||
"so-strelka-frontend"
|
||||
"so-strelka-manager"
|
||||
"so-suricata"
|
||||
"so-telegraf"
|
||||
"so-thehive"
|
||||
"so-thehive-cortex"
|
||||
"so-thehive-es"
|
||||
"so-wazuh"
|
||||
"so-zeek"
|
||||
)
|
||||
else
|
||||
TRUSTED_CONTAINERS=(
|
||||
"so-filebeat"
|
||||
"so-idstools"
|
||||
"so-elasticsearch"
|
||||
"so-logstash"
|
||||
"so-nginx"
|
||||
"so-redis"
|
||||
"so-steno"
|
||||
"so-suricata"
|
||||
"so-soc"
|
||||
"so-telegraf"
|
||||
"so-zeek"
|
||||
)
|
||||
fi
|
||||
}
|
||||
|
||||
update_docker_containers() {
|
||||
local CURLTYPE=$1
|
||||
local IMAGE_TAG_SUFFIX=$2
|
||||
local PROGRESS_CALLBACK=$3
|
||||
local LOG_FILE=$4
|
||||
|
||||
local CONTAINER_REGISTRY=quay.io
|
||||
local SIGNPATH=/root/sosigs
|
||||
|
||||
if [ -z "$CURLTYPE" ]; then
|
||||
CURLTYPE=unknown
|
||||
fi
|
||||
|
||||
if [ -z "$LOG_FILE" ]; then
|
||||
if [ -c /dev/tty ]; then
|
||||
LOG_FILE=/dev/tty
|
||||
else
|
||||
LOG_FILE=/dev/null
|
||||
fi
|
||||
fi
|
||||
|
||||
# Recheck the version for scenarios were the VERSION wasn't known before this script was imported
|
||||
set_version
|
||||
set_os
|
||||
|
||||
if [ -z "$TRUSTED_CONTAINERS" ]; then
|
||||
container_list
|
||||
fi
|
||||
|
||||
# Let's make sure we have the public key
|
||||
curl -sSL https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS | gpg --import - >> "$LOG_FILE" 2>&1
|
||||
|
||||
rm -rf $SIGNPATH >> "$LOG_FILE" 2>&1
|
||||
mkdir -p $SIGNPATH >> "$LOG_FILE" 2>&1
|
||||
|
||||
# Download the containers from the interwebs
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
if [ -z "$PROGRESS_CALLBACK" ]; then
|
||||
echo "Downloading $i" >> "$LOG_FILE" 2>&1
|
||||
else
|
||||
$PROGRESS_CALLBACK $i
|
||||
fi
|
||||
|
||||
# Pull down the trusted docker image
|
||||
local image=$i:$VERSION$IMAGE_TAG_SUFFIX
|
||||
docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
|
||||
|
||||
# Get signature
|
||||
curl -A "$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)" https://sigs.securityonion.net/$VERSION/$i:$VERSION$IMAGE_TAG_SUFFIX.sig --output $SIGNPATH/$image.sig >> "$LOG_FILE" 2>&1
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to pull signature file for $image" >> "$LOG_FILE" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
# Dump our hash values
|
||||
DOCKERINSPECT=$(docker inspect $CONTAINER_REGISTRY/$IMAGEREPO/$image)
|
||||
|
||||
echo "$DOCKERINSPECT" | jq ".[0].RepoDigests[] | select(. | contains(\"$CONTAINER_REGISTRY\"))" > $SIGNPATH/$image.txt
|
||||
echo "$DOCKERINSPECT" | jq ".[0].Created, .[0].RootFS.Layers" >> $SIGNPATH/$image.txt
|
||||
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to inspect $image" >> "$LOG_FILE" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
GPGTEST=$(gpg --verify $SIGNPATH/$image.sig $SIGNPATH/$image.txt 2>&1)
|
||||
if [[ $? -eq 0 ]]; then
|
||||
if [[ -z "$SKIP_TAGPUSH" ]]; then
|
||||
# Tag it with the new registry destination
|
||||
if [ -z "$HOSTNAME" ]; then
|
||||
HOSTNAME=$(hostname)
|
||||
fi
|
||||
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
|
||||
docker push $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
|
||||
fi
|
||||
else
|
||||
echo "There is a problem downloading the $image image. Details: " >> "$LOG_FILE" 2>&1
|
||||
echo "" >> "$LOG_FILE" 2>&1
|
||||
echo $GPGTEST >> "$LOG_FILE" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
}
|
||||
@@ -27,8 +27,7 @@ function usage {
|
||||
cat << EOF
|
||||
Usage: $0 <pcap-file-1> [pcap-file-2] [pcap-file-N]
|
||||
|
||||
Imports one or more PCAP files onto a sensor node. The PCAP traffic will be analyzed and
|
||||
made available for review in the Security Onion toolset.
|
||||
Imports one or more PCAP files onto a sensor node. The PCAP traffic will be analyzed and made available for review in the Security Onion toolset.
|
||||
EOF
|
||||
}
|
||||
|
||||
@@ -218,6 +217,6 @@ https://{{ URLBASE }}/#/hunt?q=import.id:${HASH}%20%7C%20groupby%20event.module%
|
||||
or you can manually set your Time Range to be (in UTC):
|
||||
From: $START_OLDEST To: $END_NEWEST
|
||||
|
||||
Please note that it may take 30 seconds or more for events to appear in Onion Hunt.
|
||||
Please note that it may take 30 seconds or more for events to appear in Hunt.
|
||||
EOF
|
||||
fi
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -X GET -k https://localhost:9200/_cat/indices?v
|
||||
curl -X GET -k -L https://localhost:9200/_cat/indices?v
|
||||
{% else %}
|
||||
curl -X GET localhost:9200/_cat/indices?v
|
||||
curl -X GET -L localhost:9200/_cat/indices?v
|
||||
{% endif %}
|
||||
|
||||
63
salt/common/tools/sbin/so-ip-update
Executable file
63
salt/common/tools/sbin/so-ip-update
Executable file
@@ -0,0 +1,63 @@
|
||||
#!/bin/bash
|
||||
|
||||
. $(dirname $0)/so-common
|
||||
|
||||
if [ "$FORCE_IP_UPDATE" != "1" ]; then
|
||||
is_single_node_grid || fail "Cannot update the IP on a distributed grid"
|
||||
fi
|
||||
|
||||
echo "This tool will update a manager's IP address to the new IP assigned to the management network interface."
|
||||
|
||||
echo
|
||||
echo "WARNING: This tool is still undergoing testing, use at your own risk!"
|
||||
echo
|
||||
|
||||
if [ -z "$OLD_IP" ]; then
|
||||
OLD_IP=$(lookup_pillar "managerip")
|
||||
|
||||
if [ -z "$OLD_IP" ]; then
|
||||
fail "Unable to find old IP; possible salt system failure"
|
||||
fi
|
||||
|
||||
echo "Found old IP $OLD_IP."
|
||||
fi
|
||||
|
||||
if [ -z "$NEW_IP" ]; then
|
||||
iface=$(lookup_pillar "mainint" "host")
|
||||
NEW_IP=$(ip -4 addr list $iface | grep inet | cut -d' ' -f6 | cut -d/ -f1)
|
||||
|
||||
if [ -z "$NEW_IP" ]; then
|
||||
fail "Unable to detect new IP on interface $iface. "
|
||||
fi
|
||||
|
||||
echo "Detected new IP $NEW_IP on interface $iface."
|
||||
fi
|
||||
|
||||
if [ "$OLD_IP" == "$NEW_IP" ]; then
|
||||
fail "IP address has not changed"
|
||||
fi
|
||||
|
||||
echo "About to change old IP $OLD_IP to new IP $NEW_IP."
|
||||
|
||||
echo
|
||||
read -n 1 -p "Would you like to continue? (y/N) " CONTINUE
|
||||
echo
|
||||
|
||||
if [ "$CONTINUE" == "y" ]; then
|
||||
for file in $(grep -rlI $OLD_IP /opt/so/saltstack /etc); do
|
||||
echo "Updating file: $file"
|
||||
sed -i "s|$OLD_IP|$NEW_IP|g" $file
|
||||
done
|
||||
|
||||
echo "The IP has been changed from $OLD_IP to $NEW_IP."
|
||||
|
||||
echo
|
||||
read -n 1 -p "The system must reboot to ensure all services have restarted with the new configuration. Reboot now? (y/N)" CONTINUE
|
||||
echo
|
||||
|
||||
if [ "$CONTINUE" == "y" ]; then
|
||||
reboot
|
||||
fi
|
||||
else
|
||||
echo "Exiting without changes."
|
||||
fi
|
||||
@@ -23,7 +23,7 @@
|
||||
KIBANA_HOST={{ MANAGER }}
|
||||
KSO_PORT=5601
|
||||
OUTFILE="saved_objects.ndjson"
|
||||
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
|
||||
curl -s -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -XPOST -L $KIBANA_HOST:$KSO_PORT/api/saved_objects/_export -d '{ "type": [ "index-pattern", "config", "visualization", "dashboard", "search" ], "excludeExportDetails": false }' > $OUTFILE
|
||||
|
||||
# Clean up using PLACEHOLDER
|
||||
sed -i "s/$KIBANA_HOST/PLACEHOLDER/g" $OUTFILE
|
||||
|
||||
18
salt/common/tools/sbin/so-pcap-import
Executable file
18
salt/common/tools/sbin/so-pcap-import
Executable file
@@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
$(dirname $0)/so-import-pcap $@
|
||||
26
salt/common/tools/sbin/so-playbook-reset
Executable file
26
salt/common/tools/sbin/so-playbook-reset
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
salt-call state.apply playbook.db_init,playbook,playbook.automation_user_create
|
||||
|
||||
/usr/sbin/so-soctopus-restart
|
||||
|
||||
echo "Importing Plays - this will take some time...."
|
||||
wait 5
|
||||
/usr/sbin/so-playbook-ruleupdate
|
||||
@@ -10,4 +10,4 @@ got_root() {
|
||||
}
|
||||
|
||||
got_root
|
||||
docker exec so-idstools /bin/bash -c 'cd /opt/so/idstools/etc && idstools-rulecat'
|
||||
docker exec so-idstools /bin/bash -c "cd /opt/so/idstools/etc && idstools-rulecat $1"
|
||||
|
||||
104
salt/common/tools/sbin/so-salt-minion-check
Executable file
104
salt/common/tools/sbin/so-salt-minion-check
Executable file
@@ -0,0 +1,104 @@
|
||||
{% import_yaml 'salt/minion.defaults.yaml' as SALT_MINION_DEFAULTS -%}
|
||||
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# this script checks the time the file /opt/so/log/salt/state-apply-test was last modified and restarts the salt-minion service if it is outside a threshold date/time
|
||||
# the file is modified via file.touch using a scheduled job healthcheck.salt-minion.state-apply-test that runs a state.apply.
|
||||
# by default the file should be updated every 5-8 minutes.
|
||||
# this allows us to test that the minion is able apply states and communicate with the master
|
||||
# if the file is unable to be touched via the state.apply, then we assume there is a possibilty that the minion is hung (though it could be possible the master is down as well)
|
||||
# we then stop the service, pkill salt-minion, the start the salt-minion service back up
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
QUIET=false
|
||||
UPTIME_REQ=1800 #in seconds, how long the box has to be up before considering restarting salt-minion due to /opt/so/log/salt/state-apply-test not being touched
|
||||
CURRENT_TIME=$(date +%s)
|
||||
SYSTEM_START_TIME=$(date -d "$(</proc/uptime awk '{print $1}') seconds ago" +%s)
|
||||
LAST_HIGHSTATE_END=$([ -e "/opt/so/log/salt/lasthighstate" ] && date -r /opt/so/log/salt/lasthighstate +%s || echo 0)
|
||||
LAST_HEALTHCHECK_STATE_APPLY=$([ -e "/opt/so/log/salt/state-apply-test" ] && date -r /opt/so/log/salt/state-apply-test +%s || echo 0)
|
||||
# SETTING THRESHOLD TO ANYTHING UNDER 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
|
||||
THRESHOLD={{SALT_MINION_DEFAULTS.salt.minion.check_threshold}} #within how many seconds the file /opt/so/log/salt/state-apply-test must have been touched/modified before the salt minion is restarted
|
||||
THRESHOLD_DATE=$((LAST_HEALTHCHECK_STATE_APPLY+THRESHOLD))
|
||||
|
||||
logCmd() {
|
||||
cmd=$1
|
||||
info "Executing command: $cmd"
|
||||
$cmd >> "/opt/so/log/salt/so-salt-minion-check"
|
||||
}
|
||||
|
||||
log() {
|
||||
msg=$1
|
||||
level=${2:-I}
|
||||
now=$(TZ=GMT date +"%Y-%m-%dT%H:%M:%SZ")
|
||||
if ! $QUIET; then
|
||||
echo $msg
|
||||
fi
|
||||
echo -e "$now | $level | $msg" >> "/opt/so/log/salt/so-salt-minion-check" 2>&1
|
||||
}
|
||||
|
||||
error() {
|
||||
log "$1" "E"
|
||||
}
|
||||
|
||||
info() {
|
||||
log "$1" "I"
|
||||
}
|
||||
|
||||
usage()
|
||||
{
|
||||
cat <<EOF
|
||||
|
||||
Check health of salt-minion and restart it if needed
|
||||
Options:
|
||||
-h This message
|
||||
-q Don't output to terminal
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
while getopts ":q" opt; do
|
||||
case "$opt" in
|
||||
q )
|
||||
QUIET=true
|
||||
;;
|
||||
* ) usage
|
||||
exit 0
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
log "running so-salt-minion-check"
|
||||
|
||||
if [ $CURRENT_TIME -ge $((SYSTEM_START_TIME+$UPTIME_REQ)) ]; then
|
||||
if [ $THRESHOLD_DATE -le $CURRENT_TIME ]; then
|
||||
log "salt-minion is unable to apply states" E
|
||||
log "/opt/so/log/salt/healthcheck-state-apply not touched by required date: `date -d @$THRESHOLD_DATE`, last touched: `date -d @$LAST_HEALTHCHECK_STATE_APPLY`" I
|
||||
log "last highstate completed at `date -d @$LAST_HIGHSTATE_END`" I
|
||||
log "checking if any jobs are running" I
|
||||
logCmd "salt-call --local saltutil.running" I
|
||||
log "killing all salt-minion processes" I
|
||||
logCmd "pkill -9 -ef /usr/bin/salt-minion" I
|
||||
log "starting salt-minion service" I
|
||||
logCmd "systemctl start salt-minion" I
|
||||
else
|
||||
log "/opt/so/log/salt/healthcheck-state-apply last touched: `date -d @$LAST_HEALTHCHECK_STATE_APPLY` must be touched by `date -d @$THRESHOLD_DATE` to avoid salt-minion restart" I
|
||||
fi
|
||||
else
|
||||
log "system uptime only $((CURRENT_TIME-SYSTEM_START_TIME)) seconds does not meet $UPTIME_REQ second requirement." I
|
||||
fi
|
||||
93
salt/common/tools/sbin/so-ssh-harden
Executable file
93
salt/common/tools/sbin/so-ssh-harden
Executable file
@@ -0,0 +1,93 @@
|
||||
#!/bin/bash
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
if [[ $1 =~ ^(-q|--quiet) ]]; then
|
||||
quiet=true
|
||||
fi
|
||||
|
||||
before=
|
||||
after=
|
||||
reload_required=false
|
||||
|
||||
print_sshd_t() {
|
||||
local string=$1
|
||||
local state=$2
|
||||
echo "${state}:"
|
||||
|
||||
local grep_out
|
||||
grep_out=$(sshd -T | grep "^${string}")
|
||||
|
||||
if [[ $state == "Before" ]]; then
|
||||
before=$grep_out
|
||||
else
|
||||
after=$grep_out
|
||||
fi
|
||||
|
||||
echo $grep_out
|
||||
}
|
||||
|
||||
print_msg() {
|
||||
local msg=$1
|
||||
if ! [[ $quiet ]]; then
|
||||
printf "%s\n" \
|
||||
"----" \
|
||||
"$msg" \
|
||||
"----" \
|
||||
""
|
||||
fi
|
||||
}
|
||||
|
||||
if ! [[ $quiet ]]; then print_sshd_t "ciphers" "Before"; fi
|
||||
sshd -T | grep "^ciphers" | sed -e "s/\(3des-cbc\|aes128-cbc\|aes192-cbc\|aes256-cbc\|arcfour\|arcfour128\|arcfour256\|blowfish-cbc\|cast128-cbc\|rijndael-cbc@lysator.liu.se\)\,\?//g" >> /etc/ssh/sshd_config
|
||||
if ! [[ $quiet ]]; then
|
||||
print_sshd_t "ciphers" "After"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if [[ $before != $after ]]; then
|
||||
reload_required=true
|
||||
fi
|
||||
|
||||
if ! [[ $quiet ]]; then print_sshd_t "kexalgorithms" "Before"; fi
|
||||
sshd -T | grep "^kexalgorithms" | sed -e "s/\(diffie-hellman-group14-sha1\|ecdh-sha2-nistp256\|diffie-hellman-group-exchange-sha256\|diffie-hellman-group1-sha1\|diffie-hellman-group-exchange-sha1\|ecdh-sha2-nistp521\|ecdh-sha2-nistp384\)\,\?//g" >> /etc/ssh/sshd_config
|
||||
if ! [[ $quiet ]]; then
|
||||
print_sshd_t "kexalgorithms" "After"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if [[ $before != $after ]]; then
|
||||
reload_required=true
|
||||
fi
|
||||
|
||||
if ! [[ $quiet ]]; then print_sshd_t "macs" "Before"; fi
|
||||
sshd -T | grep "^macs" | sed -e "s/\(hmac-sha2-512,\|umac-128@openssh.com,\|hmac-sha2-256,\|umac-64@openssh.com,\|hmac-sha1,\|hmac-sha1-etm@openssh.com,\|umac-64-etm@openssh.com,\|hmac-sha1\)//g" >> /etc/ssh/sshd_config
|
||||
if ! [[ $quiet ]]; then
|
||||
print_sshd_t "macs" "After"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if [[ $before != $after ]]; then
|
||||
reload_required=true
|
||||
fi
|
||||
|
||||
if ! [[ $quiet ]]; then print_sshd_t "hostkeyalgorithms" "Before"; fi
|
||||
sshd -T | grep "^hostkeyalgorithms" | sed "s|ecdsa-sha2-nistp256,||g" | sed "s|ssh-rsa,||g" >> /etc/ssh/sshd_config
|
||||
if ! [[ $quiet ]]; then
|
||||
print_sshd_t "hostkeyalgorithms" "After"
|
||||
echo ""
|
||||
fi
|
||||
|
||||
if [[ $before != $after ]]; then
|
||||
reload_required=true
|
||||
fi
|
||||
|
||||
if [[ $reload_required == true ]]; then
|
||||
print_msg "Reloading sshd to load config changes..."
|
||||
systemctl reload sshd
|
||||
fi
|
||||
|
||||
{% if grains['os'] != 'CentOS' %}
|
||||
print_msg "[ WARNING ] Any new ssh sessions will need to remove and reaccept the ECDSA key for this server before reconnecting."
|
||||
{% endif %}
|
||||
|
||||
@@ -14,8 +14,6 @@
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{%- from 'common/maps/so-status.map.jinja' import docker with context %}
|
||||
{%- set container_list = docker['containers'] | sort | unique %}
|
||||
|
||||
if ! [ "$(id -u)" = 0 ]; then
|
||||
echo "This command must be run as root"
|
||||
@@ -23,14 +21,24 @@ if ! [ "$(id -u)" = 0 ]; then
|
||||
fi
|
||||
|
||||
# Constants
|
||||
SYSTEM_START_TIME=$(date -d "$(</proc/uptime awk '{print $1}') seconds ago" +%s)
|
||||
# file populated by salt.lasthighstate state at end of successful highstate run
|
||||
LAST_HIGHSTATE_END=$([ -e "/opt/so/log/salt/lasthighstate" ] && date -r /opt/so/log/salt/lasthighstate +%s || echo 0)
|
||||
HIGHSTATE_RUNNING=$(salt-call --local saltutil.running --out=json | jq -r '.local[].fun' | grep -q 'state.highstate' && echo $?)
|
||||
ERROR_STRING="ERROR"
|
||||
SUCCESS_STRING="OK"
|
||||
PENDING_STRING="PENDING"
|
||||
MISSING_STRING='MISSING'
|
||||
DISABLED_STRING='DISABLED'
|
||||
WAIT_START_STRING='WAIT_START'
|
||||
STARTING_STRING='STARTING'
|
||||
CALLER=$(ps -o comm= $PPID)
|
||||
declare -a BAD_STATUSES=("removing" "paused" "exited" "dead")
|
||||
declare -a PENDING_STATUSES=("paused" "created" "restarting")
|
||||
declare -a GOOD_STATUSES=("running")
|
||||
declare -a DISABLED_CONTAINERS=()
|
||||
mapfile -t DISABLED_CONTAINERS < <(sort -u /opt/so/conf/so-status/so-status.conf | grep "^\s*#" | tr -d "#")
|
||||
|
||||
|
||||
declare -a temp_container_name_list=()
|
||||
declare -a temp_container_state_list=()
|
||||
@@ -72,9 +80,9 @@ compare_lists() {
|
||||
# {% endraw %}
|
||||
|
||||
create_expected_container_list() {
|
||||
{% for item in container_list -%}
|
||||
expected_container_list+=("{{ item }}")
|
||||
{% endfor -%}
|
||||
|
||||
mapfile -t expected_container_list < <(sort -u /opt/so/conf/so-status/so-status.conf | tr -d "#")
|
||||
|
||||
}
|
||||
|
||||
populate_container_lists() {
|
||||
@@ -104,46 +112,67 @@ populate_container_lists() {
|
||||
|
||||
parse_status() {
|
||||
local container_state=${1}
|
||||
|
||||
[[ $container_state = "missing" ]] && printf $MISSING_STRING && return 1
|
||||
local service_name=${2}
|
||||
|
||||
for state in "${GOOD_STATUSES[@]}"; do
|
||||
[[ $container_state = "$state" ]] && printf $SUCCESS_STRING && return 0
|
||||
done
|
||||
|
||||
for state in "${PENDING_STATUSES[@]}"; do
|
||||
[[ $container_state = "$state" ]] && printf $PENDING_STRING && return 0
|
||||
done
|
||||
|
||||
# This is technically not needed since the default is error state
|
||||
for state in "${BAD_STATUSES[@]}"; do
|
||||
[[ $container_state = "$state" ]] && printf $ERROR_STRING && return 1
|
||||
[[ " ${DISABLED_CONTAINERS[@]} " =~ " ${service_name} " ]] && printf $DISABLED_STRING && return 0
|
||||
done
|
||||
|
||||
printf $ERROR_STRING && return 1
|
||||
# if a highstate has finished running since the system has started
|
||||
# then the containers should be running so let's check the status
|
||||
if [ $LAST_HIGHSTATE_END -ge $SYSTEM_START_TIME ]; then
|
||||
|
||||
[[ $container_state = "missing" ]] && printf $MISSING_STRING && return 1
|
||||
|
||||
for state in "${PENDING_STATUSES[@]}"; do
|
||||
[[ $container_state = "$state" ]] && printf $PENDING_STRING && return 0
|
||||
done
|
||||
|
||||
# This is technically not needed since the default is error state
|
||||
for state in "${BAD_STATUSES[@]}"; do
|
||||
[[ $container_state = "$state" ]] && printf $ERROR_STRING && return 1
|
||||
done
|
||||
|
||||
printf $ERROR_STRING && return 1
|
||||
|
||||
# if a highstate has not run since system start time, but a highstate is currently running
|
||||
# then show that the containers are STARTING
|
||||
elif [[ "$HIGHSTATE_RUNNING" == 0 ]]; then
|
||||
printf $STARTING_STRING && return 0
|
||||
|
||||
# if a highstate has not finished running since system startup and isn't currently running
|
||||
# then just show that the containers are WAIT_START; waiting to be started
|
||||
else
|
||||
printf $WAIT_START_STRING && return 1
|
||||
|
||||
fi
|
||||
}
|
||||
|
||||
# {% raw %}
|
||||
|
||||
print_line() {
|
||||
local service_name=${1}
|
||||
local service_state="$( parse_status ${2} )"
|
||||
local service_state="$( parse_status ${2} ${1} )"
|
||||
local columns=$(tput cols)
|
||||
local state_color="\e[0m"
|
||||
|
||||
local PADDING_CONSTANT=14
|
||||
local PADDING_CONSTANT=15
|
||||
|
||||
if [[ $service_state = "$ERROR_STRING" ]] || [[ $service_state = "$MISSING_STRING" ]]; then
|
||||
if [[ $service_state = "$ERROR_STRING" ]] || [[ $service_state = "$MISSING_STRING" ]] || [[ $service_state = "$WAIT_START_STRING" ]]; then
|
||||
state_color="\e[1;31m"
|
||||
elif [[ $service_state = "$SUCCESS_STRING" ]]; then
|
||||
state_color="\e[1;32m"
|
||||
elif [[ $service_state = "$PENDING_STRING" ]]; then
|
||||
elif [[ $service_state = "$PENDING_STRING" ]] || [[ $service_state = "$DISABLED_STRING" ]] || [[ $service_state = "$STARTING_STRING" ]]; then
|
||||
state_color="\e[1;33m"
|
||||
fi
|
||||
|
||||
printf " $service_name "
|
||||
for i in $(seq 0 $(( $columns - $PADDING_CONSTANT - ${#service_name} - ${#service_state} ))); do
|
||||
printf "-"
|
||||
printf "${state_color}%b\e[0m" "-"
|
||||
done
|
||||
printf " [ "
|
||||
printf "${state_color}%b\e[0m" "$service_state"
|
||||
@@ -152,12 +181,10 @@ print_line() {
|
||||
|
||||
non_term_print_line() {
|
||||
local service_name=${1}
|
||||
local service_state="$( parse_status ${2} )"
|
||||
|
||||
local PADDING_CONSTANT=10
|
||||
local service_state="$( parse_status ${2} ${1} )"
|
||||
|
||||
printf " $service_name "
|
||||
for i in $(seq 0 $(( 40 - $PADDING_CONSTANT - ${#service_name} - ${#service_state} ))); do
|
||||
for i in $(seq 0 $(( 35 - ${#service_name} - ${#service_state} ))); do
|
||||
printf "-"
|
||||
done
|
||||
printf " [ "
|
||||
|
||||
63
salt/common/tools/sbin/so-suricata-testrule
Normal file
63
salt/common/tools/sbin/so-suricata-testrule
Normal file
@@ -0,0 +1,63 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
{%- set VERSION = salt['pillar.get']('global:soversion') %}
|
||||
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
|
||||
TESTRULE=$1
|
||||
TESTPCAP=$2
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
echo ""
|
||||
echo "==============="
|
||||
echo "Running all.rules and $TESTRULE against the following pcap: $TESTPCAP"
|
||||
echo ""
|
||||
sleep 3
|
||||
|
||||
|
||||
rm -rf /tmp/nids-testing/output
|
||||
mkdir -p /tmp/nids-testing/output
|
||||
chown suricata:socore /tmp/nids-testing/output
|
||||
mkdir -p /tmp/nids-testing/rules
|
||||
|
||||
cp /opt/so/conf/suricata/rules/all.rules /tmp/nids-testing/rules/all.rules
|
||||
cat $TESTRULE >> /tmp/nids-testing/rules/all.rules
|
||||
|
||||
echo "==== Begin Suricata Output ==="
|
||||
|
||||
docker run --rm \
|
||||
-v /opt/so/conf/suricata/suricata.yaml:/etc/suricata/suricata.yaml:ro \
|
||||
-v /opt/so/conf/suricata/threshold.conf:/etc/suricata/threshold.conf:ro \
|
||||
-v /tmp/nids-testing/rules:/etc/suricata/rules:ro \
|
||||
-v "$TESTPCAP:/input.pcap:ro" \
|
||||
-v /opt/so/conf/suricata/bpf:/etc/suricata/bpf:ro \
|
||||
-v /tmp/nids-testing/output/:/nsm/:rw \
|
||||
{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-suricata:{{ VERSION }} \
|
||||
--runmode single -v -k none -r /input.pcap -l /tmp --init-errors-fatal
|
||||
echo "==== End Suricata Output ==="
|
||||
|
||||
echo ""
|
||||
echo "If any alerts hit, they will be displayed below:"
|
||||
echo ""
|
||||
|
||||
cat /tmp/nids-testing/output/* | jq
|
||||
|
||||
echo ""
|
||||
echo "End so-suricata-testrule"
|
||||
echo "==============="
|
||||
echo ""
|
||||
@@ -31,7 +31,7 @@ fi
|
||||
USER=$1
|
||||
|
||||
THEHIVE_KEY=$(lookup_pillar hivekey)
|
||||
THEHIVE_IP=$(lookup_pillar managerip)
|
||||
THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api"
|
||||
THEHIVE_USER=$USER
|
||||
|
||||
# Read password for new user from stdin
|
||||
@@ -47,7 +47,7 @@ if ! check_password "$THEHIVE_PASS"; then
|
||||
fi
|
||||
|
||||
# Create new user in TheHive
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" "https://$THEHIVE_IP/thehive/api/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
|
||||
resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}")
|
||||
if [[ "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully added user to TheHive"
|
||||
else
|
||||
|
||||
@@ -31,7 +31,7 @@ fi
|
||||
USER=$1
|
||||
|
||||
THEHIVE_KEY=$(lookup_pillar hivekey)
|
||||
THEHIVE_IP=$(lookup_pillar managerip)
|
||||
THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api"
|
||||
THEHIVE_USER=$USER
|
||||
|
||||
case "${2^^}" in
|
||||
@@ -46,7 +46,7 @@ case "${2^^}" in
|
||||
;;
|
||||
esac
|
||||
|
||||
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" "https://$THEHIVE_IP/thehive/api/user/${THEHIVE_USER}" -d "{\"status\":\"${THEHIVE_STATUS}\" }")
|
||||
resp=$(curl -sk -XPATCH -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user/${THEHIVE_USER}" -d "{\"status\":\"${THEHIVE_STATUS}\" }")
|
||||
if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then
|
||||
echo "Successfully updated user in TheHive"
|
||||
else
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
source $(dirname $0)/so-common
|
||||
|
||||
if [[ $# -lt 1 || $# -gt 2 ]]; then
|
||||
echo "Usage: $0 <list|add|update|enable|disable|validate|valemail|valpass> [email]"
|
||||
@@ -56,14 +56,14 @@ function verifyEnvironment() {
|
||||
require "openssl"
|
||||
require "sqlite3"
|
||||
[[ ! -f $databasePath ]] && fail "Unable to find database file; specify path via KRATOS_DB_PATH environment variable"
|
||||
response=$(curl -Ss ${kratosUrl}/)
|
||||
response=$(curl -Ss -L ${kratosUrl}/)
|
||||
[[ "$response" != "404 page not found" ]] && fail "Unable to communicate with Kratos; specify URL via KRATOS_URL environment variable"
|
||||
}
|
||||
|
||||
function findIdByEmail() {
|
||||
email=$1
|
||||
|
||||
response=$(curl -Ss ${kratosUrl}/identities)
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||
identityId=$(echo "${response}" | jq ".[] | select(.verifiable_addresses[0].value == \"$email\") | .id")
|
||||
echo $identityId
|
||||
}
|
||||
@@ -113,7 +113,7 @@ function updatePassword() {
|
||||
}
|
||||
|
||||
function listUsers() {
|
||||
response=$(curl -Ss ${kratosUrl}/identities)
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities)
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
echo "${response}" | jq -r ".[] | .verifiable_addresses[0].value" | sort
|
||||
@@ -131,7 +131,7 @@ function createUser() {
|
||||
EOF
|
||||
)
|
||||
|
||||
response=$(curl -Ss ${kratosUrl}/identities -d "$addUserJson")
|
||||
response=$(curl -Ss -L ${kratosUrl}/identities -d "$addUserJson")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
identityId=$(echo "${response}" | jq ".id")
|
||||
@@ -153,7 +153,7 @@ function updateStatus() {
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
response=$(curl -Ss "${kratosUrl}/identities/$identityId")
|
||||
response=$(curl -Ss -L "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
|
||||
oldConfig=$(echo "select config from identity_credentials where identity_id=${identityId};" | sqlite3 "$databasePath")
|
||||
@@ -171,7 +171,7 @@ function updateStatus() {
|
||||
fi
|
||||
|
||||
updatedJson=$(echo "$response" | jq ".traits.status = \"$status\" | del(.verifiable_addresses) | del(.id) | del(.schema_url)")
|
||||
response=$(curl -Ss -XPUT ${kratosUrl}/identities/$identityId -d "$updatedJson")
|
||||
response=$(curl -Ss -XPUT -L ${kratosUrl}/identities/$identityId -d "$updatedJson")
|
||||
[[ $? != 0 ]] && fail "Unable to mark user as locked"
|
||||
|
||||
}
|
||||
@@ -191,7 +191,7 @@ function deleteUser() {
|
||||
identityId=$(findIdByEmail "$email")
|
||||
[[ ${identityId} == "" ]] && fail "User not found"
|
||||
|
||||
response=$(curl -Ss -XDELETE "${kratosUrl}/identities/$identityId")
|
||||
response=$(curl -Ss -XDELETE -L "${kratosUrl}/identities/$identityId")
|
||||
[[ $? != 0 ]] && fail "Unable to communicate with Kratos"
|
||||
}
|
||||
|
||||
|
||||
17
salt/common/tools/sbin/so-wazuh-user-add
Executable file
17
salt/common/tools/sbin/so-wazuh-user-add
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
docker exec -it so-wazuh /usr/bin/node /var/ossec/api/configuration/auth/htpasswd /var/ossec/api/configuration/auth/user $1
|
||||
17
salt/common/tools/sbin/so-wazuh-user-passwd
Executable file
17
salt/common/tools/sbin/so-wazuh-user-passwd
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
docker exec -it so-wazuh /usr/bin/node /var/ossec/api/configuration/auth/htpasswd /var/ossec/api/configuration/auth/user $1
|
||||
17
salt/common/tools/sbin/so-wazuh-user-remove
Executable file
17
salt/common/tools/sbin/so-wazuh-user-remove
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
docker exec -it so-wazuh /usr/bin/node /var/ossec/api/configuration/auth/htpasswd -D /var/ossec/api/configuration/auth/user $1
|
||||
@@ -16,6 +16,8 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
{%- set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
|
||||
echo "Starting to check for yara rule updates at $(date)..."
|
||||
|
||||
output_dir="/opt/so/saltstack/default/salt/strelka/rules"
|
||||
mkdir -p $output_dir
|
||||
repos="$output_dir/repos.txt"
|
||||
@@ -27,6 +29,7 @@ updatecounter=0
|
||||
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
|
||||
echo "Airgap mode enabled."
|
||||
|
||||
clone_dir="/nsm/repo/rules/strelka"
|
||||
repo_name="signature-base"
|
||||
@@ -73,17 +76,17 @@ done
|
||||
|
||||
echo "Done!"
|
||||
|
||||
if [ "$newcounter" -gt 0 ];then
|
||||
echo "$newcounter new rules added."
|
||||
fi
|
||||
if [ "$newcounter" -gt 0 ];then
|
||||
echo "$newcounter new rules added."
|
||||
fi
|
||||
|
||||
if [ "$updatecounter" -gt 0 ];then
|
||||
echo "$updatecounter rules updated."
|
||||
fi
|
||||
if [ "$updatecounter" -gt 0 ];then
|
||||
echo "$updatecounter rules updated."
|
||||
fi
|
||||
|
||||
if [ "$deletecounter" -gt 0 ];then
|
||||
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
|
||||
fi
|
||||
if [ "$deletecounter" -gt 0 ];then
|
||||
echo "$deletecounter rules removed because they were deprecated or don't exist in the source repo."
|
||||
fi
|
||||
|
||||
{% else %}
|
||||
|
||||
@@ -162,4 +165,6 @@ else
|
||||
echo "No connectivity to Github...exiting..."
|
||||
exit 1
|
||||
fi
|
||||
{%- endif -%}
|
||||
{% endif %}
|
||||
|
||||
echo "Finished rule updates at $(date)..."
|
||||
|
||||
@@ -16,24 +16,22 @@
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
. /usr/sbin/so-common
|
||||
|
||||
UPDATE_DIR=/tmp/sogh/securityonion
|
||||
INSTALLEDVERSION=$(cat /etc/soversion)
|
||||
INSTALLEDSALTVERSION=$(salt --versions-report | grep Salt: | awk {'print $2'})
|
||||
DEFAULT_SALT_DIR=/opt/so/saltstack/default
|
||||
BATCHSIZE=5
|
||||
SOUP_LOG=/root/soup.log
|
||||
|
||||
exec 3>&1 1>${SOUP_LOG} 2>&1
|
||||
|
||||
manager_check() {
|
||||
# Check to see if this is a manager
|
||||
MANAGERCHECK=$(cat /etc/salt/grains | grep role | awk '{print $2}')
|
||||
if [[ "$MANAGERCHECK" =~ ^('so-eval'|'so-manager'|'so-standalone'|'so-managersearch'|'so-import')$ ]]; then
|
||||
echo "This is a manager. We can proceed."
|
||||
MINIONID=$(salt-call grains.get id --out=txt|awk -F: {'print $2'}|tr -d ' ')
|
||||
else
|
||||
echo "Please run soup on the manager. The manager controls all updates."
|
||||
exit 0
|
||||
fi
|
||||
add_common() {
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
salt-call state.apply common queue=True
|
||||
echo "Run soup one more time"
|
||||
exit 0
|
||||
}
|
||||
|
||||
airgap_mounted() {
|
||||
@@ -47,7 +45,7 @@ airgap_mounted() {
|
||||
echo "If you just copied the .iso file over you can specify the path."
|
||||
echo "If you burned the ISO to a disk the standard way you can specify the device."
|
||||
echo "Example: /home/user/securityonion-2.X.0.iso"
|
||||
echo "Example: /dev/cdrom"
|
||||
echo "Example: /dev/sdx1"
|
||||
echo ""
|
||||
read -p 'Enter the location of the iso: ' ISOLOC
|
||||
if [ -f $ISOLOC ]; then
|
||||
@@ -79,6 +77,30 @@ airgap_mounted() {
|
||||
fi
|
||||
}
|
||||
|
||||
airgap_update_dockers() {
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
# Let's copy the tarball
|
||||
if [ ! -f $AGDOCKER/registry.tar ]; then
|
||||
echo "Unable to locate registry. Exiting"
|
||||
exit 1
|
||||
else
|
||||
echo "Stopping the registry docker"
|
||||
docker stop so-dockerregistry
|
||||
docker rm so-dockerregistry
|
||||
echo "Copying the new dockers over"
|
||||
tar xvf $AGDOCKER/registry.tar -C /nsm/docker-registry/docker
|
||||
echo "Add Registry back"
|
||||
docker load -i $AGDOCKER/registry_image.tar
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
update_registry() {
|
||||
docker stop so-dockerregistry
|
||||
docker rm so-dockerregistry
|
||||
salt-call state.apply registry queue=True
|
||||
}
|
||||
|
||||
check_airgap() {
|
||||
# See if this is an airgap install
|
||||
AIRGAP=$(cat /opt/so/saltstack/local/pillar/global.sls | grep airgap | awk '{print $2}')
|
||||
@@ -92,6 +114,12 @@ check_airgap() {
|
||||
fi
|
||||
}
|
||||
|
||||
check_sudoers() {
|
||||
if grep -q "so-setup" /etc/sudoers; then
|
||||
echo "There is an entry for so-setup in the sudoers file, this can be safely deleted using \"visudo\"."
|
||||
fi
|
||||
}
|
||||
|
||||
clean_dockers() {
|
||||
# Place Holder for cleaning up old docker images
|
||||
echo "Trying to clean up old dockers."
|
||||
@@ -100,7 +128,6 @@ clean_dockers() {
|
||||
}
|
||||
|
||||
clone_to_tmp() {
|
||||
# TODO Need to add a air gap option
|
||||
# Clean old files
|
||||
rm -rf /tmp/sogh
|
||||
# Make a temp location for the files
|
||||
@@ -128,21 +155,17 @@ copy_new_files() {
|
||||
cd /tmp
|
||||
}
|
||||
|
||||
detect_os() {
|
||||
# Detect Base OS
|
||||
echo "Determining Base OS." >> "$SOUP_LOG" 2>&1
|
||||
if [ -f /etc/redhat-release ]; then
|
||||
OS="centos"
|
||||
elif [ -f /etc/os-release ]; then
|
||||
OS="ubuntu"
|
||||
fi
|
||||
echo "Found OS: $OS" >> "$SOUP_LOG" 2>&1
|
||||
generate_and_clean_tarballs() {
|
||||
local new_version
|
||||
new_version=$(cat $UPDATE_DIR/VERSION)
|
||||
[ -d /opt/so/repo ] || mkdir -p /opt/so/repo
|
||||
tar -cxf "/opt/so/repo/$new_version.tar.gz" "$UPDATE_DIR"
|
||||
find "/opt/so/repo" -type f -not -name "$new_version.tar.gz" -exec rm -rf {} \;
|
||||
}
|
||||
|
||||
highstate() {
|
||||
# Run a highstate but first cancel a running one.
|
||||
salt-call saltutil.kill_all_jobs
|
||||
salt-call state.highstate -l info
|
||||
# Run a highstate.
|
||||
salt-call state.highstate -l info queue=True
|
||||
}
|
||||
|
||||
masterlock() {
|
||||
@@ -182,7 +205,7 @@ pillar_changes() {
|
||||
[[ "$INSTALLEDVERSION" =~ rc.1 ]] && rc1_to_rc2
|
||||
[[ "$INSTALLEDVERSION" =~ rc.2 ]] && rc2_to_rc3
|
||||
[[ "$INSTALLEDVERSION" =~ rc.3 ]] && rc3_to_2.3.0
|
||||
|
||||
[[ "$INSTALLEDVERSION" == 2.3.0 ]] || [[ "$INSTALLEDVERSION" == 2.3.1 ]] || [[ "$INSTALLEDVERSION" == 2.3.2 ]] || [[ "$INSTALLEDVERSION" == 2.3.10 ]] && 2.3.0_to_2.3.20
|
||||
}
|
||||
|
||||
rc1_to_rc2() {
|
||||
@@ -198,8 +221,8 @@ rc1_to_rc2() {
|
||||
sed -i "/^global:/a \\$line" /opt/so/saltstack/local/pillar/global.sls;
|
||||
|
||||
# Adding play values to the global.sls
|
||||
local HIVEPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)
|
||||
local CORTEXPLAYSECRET=$(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)
|
||||
local HIVEPLAYSECRET=$(get_random_value)
|
||||
local CORTEXPLAYSECRET=$(get_random_value)
|
||||
sed -i "/^global:/a \\ hiveplaysecret: $HIVEPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
|
||||
sed -i "/^global:/a \\ cortexplaysecret: $CORTEXPLAYSECRET" /opt/so/saltstack/local/pillar/global.sls;
|
||||
|
||||
@@ -217,7 +240,7 @@ rc1_to_rc2() {
|
||||
while read p; do
|
||||
local NAME=$(echo $p | awk '{print $1}')
|
||||
local EHOSTNAME=$(echo $p | awk -F"_" '{print $1}')
|
||||
local IP=$(echo $p | awk '{print $2}')
|
||||
local IP=$(echo $p | awk '{print $2}')
|
||||
echo "Adding the new cross cluster config for $NAME"
|
||||
curl -XPUT http://localhost:9200/_cluster/settings -H'Content-Type: application/json' -d '{"persistent": {"search": {"remote": {"'$NAME'": {"skip_unavailable": "true", "seeds": ["'$EHOSTNAME':9300"]}}}}}'
|
||||
done </tmp/nodes.txt
|
||||
@@ -261,9 +284,50 @@ rc3_to_2.3.0() {
|
||||
|
||||
sed -i 's/playbook:/playbook_db:/' /opt/so/saltstack/local/pillar/secrets.sls
|
||||
{
|
||||
echo "playbook_admin: $(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)"
|
||||
echo "playbook_automation: $(tr -dc 'a-zA-Z0-9' < /dev/urandom | fold -w 20 | head -n 1)"
|
||||
echo "playbook_admin: $(get_random_value)"
|
||||
echo "playbook_automation: $(get_random_value)"
|
||||
} >> /opt/so/saltstack/local/pillar/secrets.sls
|
||||
|
||||
INSTALLEDVERSION=2.3.0
|
||||
}
|
||||
|
||||
2.3.0_to_2.3.20(){
|
||||
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
|
||||
# Remove PCAP from global
|
||||
sed '/pcap:/d' /opt/so/saltstack/local/pillar/global.sls
|
||||
sed '/sensor_checkin_interval_ms:/d' /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
# Add checking interval to glbal
|
||||
echo "sensoroni:" >> /opt/so/saltstack/local/pillar/global.sls
|
||||
echo " node_checkin_interval_ms: 10000" >> /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
# Update pillar fiels for new sensoroni functionality
|
||||
for file in /opt/so/saltstack/local/pillar/minions/*; do
|
||||
echo "sensoroni:" >> $file
|
||||
echo " node_description:" >> $file
|
||||
local SOMEADDRESS=$(cat $file | grep mainip | tail -n 1 | awk '{print $2'})
|
||||
echo " node_address: $SOMEADDRESS" >> $file
|
||||
done
|
||||
|
||||
# Remove old firewall config to reduce confusion
|
||||
rm -f /opt/so/saltstack/default/pillar/firewall/ports.sls
|
||||
|
||||
# Fix daemon.json by managing it
|
||||
echo "docker:" >> /opt/so/saltstack/local/pillar/global.sls
|
||||
DOCKERGREP=$(cat /etc/docker/daemon.json | grep base | awk {'print $3'} | cut -f1 -d"," | tr -d '"')
|
||||
if [ -z "$DOCKERGREP" ]; then
|
||||
echo " range: '172.17.0.0/24'" >> /opt/so/saltstack/local/pillar/global.sls
|
||||
echo " bip: '172.17.0.1/24'" >> /opt/so/saltstack/local/pillar/global.sls
|
||||
else
|
||||
DOCKERSTUFF="${DOCKERGREP//\"}"
|
||||
DOCKERSTUFFBIP=$(echo $DOCKERSTUFF | awk -F'.' '{print $1,$2,$3,1}' OFS='.')/24
|
||||
echo " range: '$DOCKERSTUFF/24'" >> /opt/so/saltstack/local/pillar/global.sls
|
||||
echo " bip: '$DOCKERSTUFFBIP'" >> /opt/so/saltstack/local/pillar/global.sls
|
||||
|
||||
fi
|
||||
|
||||
INSTALLEDVERSION=2.3.20
|
||||
|
||||
}
|
||||
|
||||
space_check() {
|
||||
@@ -278,118 +342,43 @@ space_check() {
|
||||
|
||||
}
|
||||
|
||||
thehive_maint() {
|
||||
echo -n "Waiting for TheHive..."
|
||||
COUNT=0
|
||||
THEHIVE_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert"
|
||||
if [ $? -eq 0 ]; then
|
||||
THEHIVE_CONNECTED="yes"
|
||||
echo "connected!"
|
||||
break
|
||||
else
|
||||
((COUNT+=1))
|
||||
sleep 1
|
||||
echo -n "."
|
||||
fi
|
||||
done
|
||||
if [ "$THEHIVE_CONNECTED" == "yes" ]; then
|
||||
echo "Migrating thehive databases if needed."
|
||||
curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate"
|
||||
curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate"
|
||||
fi
|
||||
}
|
||||
|
||||
unmount_update() {
|
||||
cd /tmp
|
||||
umount /tmp/soagupdate
|
||||
}
|
||||
|
||||
|
||||
update_centos_repo() {
|
||||
# Update the files in the repo
|
||||
echo "Syncing new updates to /nsm/repo"
|
||||
rsync -a $AGDOCKER/repo /nsm/repo
|
||||
rsync -av $AGREPO/* /nsm/repo/
|
||||
echo "Creating repo"
|
||||
createrepo /nsm/repo
|
||||
}
|
||||
|
||||
update_dockers() {
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
# Let's copy the tarball
|
||||
if [ ! -f $AGDOCKER/registry.tar ]; then
|
||||
echo "Unable to locate registry. Exiting"
|
||||
exit 0
|
||||
else
|
||||
echo "Stopping the registry docker"
|
||||
docker stop so-dockerregistry
|
||||
docker rm so-dockerregistry
|
||||
echo "Copying the new dockers over"
|
||||
tar xvf $AGDOCKER/registry.tar -C /nsm/docker-registry/docker
|
||||
fi
|
||||
else
|
||||
# List all the containers
|
||||
if [ $MANAGERCHECK == 'so-import' ]; then
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-idstools" \
|
||||
"so-nginx" \
|
||||
"so-filebeat" \
|
||||
"so-suricata" \
|
||||
"so-soc" \
|
||||
"so-elasticsearch" \
|
||||
"so-kibana" \
|
||||
"so-kratos" \
|
||||
"so-suricata" \
|
||||
"so-registry" \
|
||||
"so-pcaptools" \
|
||||
"so-zeek" )
|
||||
elif [ $MANAGERCHECK != 'so-helix' ]; then
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-acng" \
|
||||
"so-thehive-cortex" \
|
||||
"so-curator" \
|
||||
"so-domainstats" \
|
||||
"so-elastalert" \
|
||||
"so-elasticsearch" \
|
||||
"so-filebeat" \
|
||||
"so-fleet" \
|
||||
"so-fleet-launcher" \
|
||||
"so-freqserver" \
|
||||
"so-grafana" \
|
||||
"so-idstools" \
|
||||
"so-influxdb" \
|
||||
"so-kibana" \
|
||||
"so-kratos" \
|
||||
"so-logstash" \
|
||||
"so-minio" \
|
||||
"so-mysql" \
|
||||
"so-nginx" \
|
||||
"so-pcaptools" \
|
||||
"so-playbook" \
|
||||
"so-redis" \
|
||||
"so-soc" \
|
||||
"so-soctopus" \
|
||||
"so-steno" \
|
||||
"so-strelka-frontend" \
|
||||
"so-strelka-manager" \
|
||||
"so-strelka-backend" \
|
||||
"so-strelka-filestream" \
|
||||
"so-suricata" \
|
||||
"so-telegraf" \
|
||||
"so-thehive" \
|
||||
"so-thehive-es" \
|
||||
"so-wazuh" \
|
||||
"so-zeek" )
|
||||
else
|
||||
TRUSTED_CONTAINERS=( \
|
||||
"so-filebeat" \
|
||||
"so-idstools" \
|
||||
"so-logstash" \
|
||||
"so-nginx" \
|
||||
"so-redis" \
|
||||
"so-steno" \
|
||||
"so-suricata" \
|
||||
"so-telegraf" \
|
||||
"so-zeek" )
|
||||
fi
|
||||
|
||||
# Download the containers from the interwebs
|
||||
for i in "${TRUSTED_CONTAINERS[@]}"
|
||||
do
|
||||
# Pull down the trusted docker image
|
||||
echo "Downloading $i:$NEWVERSION"
|
||||
docker pull --disable-content-trust=false docker.io/$IMAGEREPO/$i:$NEWVERSION
|
||||
# Tag it with the new registry destination
|
||||
docker tag $IMAGEREPO/$i:$NEWVERSION $HOSTNAME:5000/$IMAGEREPO/$i:$NEWVERSION
|
||||
docker push $HOSTNAME:5000/$IMAGEREPO/$i:$NEWVERSION
|
||||
done
|
||||
fi
|
||||
# Cleanup on Aisle 4
|
||||
clean_dockers
|
||||
echo "Add Registry back if airgap"
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
docker load -i $AGDOCKER/registry_image.tar
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
update_version() {
|
||||
# Update the version to the latest
|
||||
echo "Updating the Security Onion version file."
|
||||
@@ -411,6 +400,10 @@ upgrade_check_salt() {
|
||||
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
|
||||
echo "You are already running the correct version of Salt for Security Onion."
|
||||
else
|
||||
UPGRADESALT=1
|
||||
fi
|
||||
}
|
||||
upgrade_salt() {
|
||||
SALTUPGRADED=True
|
||||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||||
echo ""
|
||||
@@ -421,7 +414,11 @@ upgrade_check_salt() {
|
||||
yum versionlock delete "salt-*"
|
||||
echo "Updating Salt packages and restarting services."
|
||||
echo ""
|
||||
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -r -F -M -x python3 stable "$NEWSALTVERSION"
|
||||
else
|
||||
sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -F -M -x python3 stable "$NEWSALTVERSION"
|
||||
fi
|
||||
echo "Applying yum versionlock for Salt."
|
||||
echo ""
|
||||
yum versionlock add "salt-*"
|
||||
@@ -441,18 +438,24 @@ upgrade_check_salt() {
|
||||
apt-mark hold "salt-master"
|
||||
apt-mark hold "salt-minion"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
verify_latest_update_script() {
|
||||
# Check to see if the update scripts match. If not run the new one.
|
||||
CURRENTSOUP=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/soup | awk '{print $1}')
|
||||
GITSOUP=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/soup | awk '{print $1}')
|
||||
if [[ "$CURRENTSOUP" == "$GITSOUP" ]]; then
|
||||
CURRENTCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||||
GITCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-common | awk '{print $1}')
|
||||
CURRENTIMGCMN=$(md5sum /opt/so/saltstack/default/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||||
GITIMGCMN=$(md5sum $UPDATE_DIR/salt/common/tools/sbin/so-image-common | awk '{print $1}')
|
||||
|
||||
if [[ "$CURRENTSOUP" == "$GITSOUP" && "$CURRENTCMN" == "$GITCMN" && "$CURRENTIMGCMN" == "$GITIMGCMN" ]]; then
|
||||
echo "This version of the soup script is up to date. Proceeding."
|
||||
else
|
||||
echo "You are not running the latest soup version. Updating soup."
|
||||
echo "You are not running the latest soup version. Updating soup and its components. Might take multiple runs to complete"
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/soup $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
cp $UPDATE_DIR/salt/common/tools/sbin/so-image-common $DEFAULT_SALT_DIR/salt/common/tools/sbin/
|
||||
salt-call state.apply common queue=True
|
||||
echo ""
|
||||
echo "soup has been updated. Please run soup again."
|
||||
@@ -478,53 +481,106 @@ done
|
||||
|
||||
echo "Checking to see if this is a manager."
|
||||
echo ""
|
||||
manager_check
|
||||
require_manager
|
||||
set_minionid
|
||||
echo "Checking to see if this is an airgap install"
|
||||
echo ""
|
||||
check_airgap
|
||||
echo "Found that Security Onion $INSTALLEDVERSION is currently installed."
|
||||
echo ""
|
||||
detect_os
|
||||
set_os
|
||||
echo ""
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
# Let's mount the ISO since this is airgap
|
||||
airgap_mounted
|
||||
else
|
||||
echo "Cloning Security Onion github repo into $UPDATE_DIR."
|
||||
echo "Removing previous upgrade sources."
|
||||
rm -rf $UPDATE_DIR
|
||||
clone_to_tmp
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Verifying we have the latest soup script."
|
||||
verify_latest_update_script
|
||||
echo ""
|
||||
|
||||
echo "Generating new repo archive"
|
||||
generate_and_clean_tarballs
|
||||
if [ -f /usr/sbin/so-image-common ]; then
|
||||
. /usr/sbin/so-image-common
|
||||
else
|
||||
add_common
|
||||
fi
|
||||
|
||||
echo "Let's see if we need to update Security Onion."
|
||||
upgrade_check
|
||||
space_check
|
||||
|
||||
echo "Checking for Salt Master and Minion updates."
|
||||
upgrade_check_salt
|
||||
|
||||
echo ""
|
||||
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
|
||||
echo ""
|
||||
echo "Updating dockers to $NEWVERSION."
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
airgap_update_dockers
|
||||
else
|
||||
update_registry
|
||||
update_docker_containers "soup"
|
||||
FEATURESCHECK=$(lookup_pillar features elastic)
|
||||
if [[ "$FEATURESCHECK" == "True" ]]; then
|
||||
TRUSTED_CONTAINERS=(
|
||||
"so-elasticsearch"
|
||||
"so-filebeat"
|
||||
"so-kibana"
|
||||
"so-logstash"
|
||||
)
|
||||
update_docker_containers "features" "-features"
|
||||
fi
|
||||
fi
|
||||
echo ""
|
||||
echo "Stopping Salt Minion service."
|
||||
systemctl stop salt-minion
|
||||
echo "Killing any remaining Salt Minion processes."
|
||||
pkill -9 -ef /usr/bin/salt-minion
|
||||
echo ""
|
||||
echo "Stopping Salt Master service."
|
||||
systemctl stop salt-master
|
||||
echo ""
|
||||
echo "Checking for Salt Master and Minion updates."
|
||||
upgrade_check_salt
|
||||
|
||||
# Does salt need upgraded. If so update it.
|
||||
if [ "$UPGRADESALT" == "1" ]; then
|
||||
echo "Upgrading Salt"
|
||||
# Update the repo files so it can actually upgrade
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
update_centos_repo
|
||||
yum clean all
|
||||
fi
|
||||
upgrade_salt
|
||||
fi
|
||||
|
||||
echo "Checking if Salt was upgraded."
|
||||
echo ""
|
||||
# Check that Salt was upgraded
|
||||
if [[ $(salt --versions-report | grep Salt: | awk {'print $2'}) != "$NEWSALTVERSION" ]]; then
|
||||
echo "Salt upgrade failed. Check of indicators of failure in $SOUP_LOG."
|
||||
echo "Once the issue is resolved, run soup again."
|
||||
echo "Exiting."
|
||||
echo ""
|
||||
exit 1
|
||||
else
|
||||
echo "Salt upgrade success."
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo "Making pillar changes."
|
||||
pillar_changes
|
||||
echo ""
|
||||
|
||||
echo ""
|
||||
echo "Updating dockers to $NEWVERSION."
|
||||
update_dockers
|
||||
|
||||
# Only update the repo if its airgap
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
if [[ $is_airgap -eq 0 ]] && [[ "$UPGRADESALT" != "1" ]]; then
|
||||
update_centos_repo
|
||||
fi
|
||||
|
||||
@@ -542,9 +598,19 @@ echo ""
|
||||
echo "Starting Salt Master service."
|
||||
systemctl start salt-master
|
||||
|
||||
# Only regenerate osquery packages if Fleet is enabled
|
||||
FLEET_MANAGER=$(lookup_pillar fleet_manager)
|
||||
FLEET_NODE=$(lookup_pillar fleet_node)
|
||||
if [[ "$FLEET_MANAGER" == "True" || "$FLEET_NODE" == "True" ]]; then
|
||||
echo ""
|
||||
echo "Regenerating Osquery Packages.... This will take several minutes."
|
||||
salt-call state.apply fleet.event_gen-packages -l info queue=True
|
||||
echo ""
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
|
||||
highstate
|
||||
salt-call state.highstate -l info queue=True
|
||||
echo ""
|
||||
echo "Upgrade from $INSTALLEDVERSION to $NEWVERSION complete."
|
||||
|
||||
@@ -557,18 +623,24 @@ masterunlock
|
||||
echo ""
|
||||
echo "Starting Salt Master service."
|
||||
systemctl start salt-master
|
||||
highstate
|
||||
echo "Running a highstate. This could take several minutes."
|
||||
salt-call state.highstate -l info queue=True
|
||||
playbook
|
||||
unmount_update
|
||||
thehive_maint
|
||||
|
||||
SALTUPGRADED="True"
|
||||
if [[ "$SALTUPGRADED" == "True" ]]; then
|
||||
if [ "$UPGRADESALT" == "1" ]; then
|
||||
echo ""
|
||||
echo "Upgrading Salt on the remaining Security Onion nodes from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||||
salt -C 'not *_eval and not *_helix and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion
|
||||
if [ $is_airgap -eq 0 ]; then
|
||||
salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone' cmd.run "yum clean all"
|
||||
fi
|
||||
salt -C 'not *_eval and not *_helixsensor and not *_manager and not *_managersearch and not *_standalone' -b $BATCHSIZE state.apply salt.minion queue=True
|
||||
echo ""
|
||||
fi
|
||||
|
||||
check_sudoers
|
||||
|
||||
}
|
||||
|
||||
main "$@" | tee /dev/fd/3
|
||||
|
||||
@@ -1,2 +1,27 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
APP=close
|
||||
lf=/tmp/$APP-pidLockFile
|
||||
# create empty lock file if none exists
|
||||
cat /dev/null >> $lf
|
||||
read lastPID < $lf
|
||||
# if lastPID is not null and a process with that pid exists , exit
|
||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
||||
echo $$ > $lf
|
||||
|
||||
/usr/sbin/so-curator-closed-delete > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-zeek-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-beats-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-firewall-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ids-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-import-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-osquery-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-ossec-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-strelka-close.yml > /dev/null 2>&1; docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/so-syslog-close.yml > /dev/null 2>&1
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018 Security Onion Solutions, LLC
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
@@ -34,6 +34,13 @@
|
||||
#fi
|
||||
|
||||
# Avoid starting multiple instances
|
||||
if ! pgrep -f "so-curator-closed-delete-delete" >/dev/null; then
|
||||
/usr/sbin/so-curator-closed-delete-delete
|
||||
fi
|
||||
APP=closeddelete
|
||||
lf=/tmp/$APP-pidLockFile
|
||||
# create empty lock file if none exists
|
||||
cat /dev/null >> $lf
|
||||
read lastPID < $lf
|
||||
# if lastPID is not null and a process with that pid exists , exit
|
||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
||||
echo $$ > $lf
|
||||
|
||||
/usr/sbin/so-curator-closed-delete-delete
|
||||
|
||||
@@ -26,41 +26,36 @@
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#. /usr/sbin/so-elastic-common
|
||||
#. /etc/nsm/securityonion.conf
|
||||
|
||||
LOG="/opt/so/log/curator/so-curator-closed-delete.log"
|
||||
|
||||
overlimit() {
|
||||
|
||||
[[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]]
|
||||
}
|
||||
|
||||
closedindices() {
|
||||
|
||||
INDICES=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed 2> /dev/null)
|
||||
[ $? -eq 1 ] && return false
|
||||
echo ${INDICES} | grep -q -E "(logstash-|so-)"
|
||||
}
|
||||
|
||||
# Check for 2 conditions:
|
||||
# 1. Are Elasticsearch indices using more disk space than LOG_SIZE_LIMIT?
|
||||
# 2. Are there any closed logstash- or so- indices that we can delete?
|
||||
# 2. Are there any closed indices that we can delete?
|
||||
# If both conditions are true, keep on looping until one of the conditions is false.
|
||||
while [[ $(du -hs --block-size=1GB /nsm/elasticsearch/nodes | awk '{print $1}' ) -gt "{{LOG_SIZE_LIMIT}}" ]] &&
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" > /dev/null; do
|
||||
{% else %}
|
||||
curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" > /dev/null; do
|
||||
{% endif %}
|
||||
while overlimit && closedindices; do
|
||||
|
||||
# We need to determine OLDEST_INDEX.
|
||||
# First, get the list of closed indices that are prefixed with "logstash-" or "so-".
|
||||
# For example: logstash-ids-YYYY.MM.DD
|
||||
# We need to determine OLDEST_INDEX:
|
||||
# First, get the list of closed indices using _cat/indices?h=index\&expand_wildcards=closed.
|
||||
# Then, sort by date by telling sort to use hyphen as delimiter and then sort on the third field.
|
||||
# Finally, select the first entry in that sorted list.
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
OLDEST_INDEX=$(curl -s -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
|
||||
{% else %}
|
||||
OLDEST_INDEX=$(curl -s {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices | grep -E " close (logstash-|so-)" | awk '{print $2}' | sort -t- -k3 | head -1)
|
||||
{% endif %}
|
||||
OLDEST_INDEX=$(curl -s -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/_cat/indices?h=index\&expand_wildcards=closed | grep -E "(logstash-|so-)" | sort -t- -k3 | head -1)
|
||||
|
||||
# Now that we've determined OLDEST_INDEX, ask Elasticsearch to delete it.
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl -XDELETE -k https://{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
{% else %}
|
||||
curl -XDELETE {{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
{% endif %}
|
||||
curl -XDELETE -k {% if grains['role'] in ['so-node','so-heavynode'] %}https://{% endif %}{{ELASTICSEARCH_HOST}}:{{ELASTICSEARCH_PORT}}/${OLDEST_INDEX}
|
||||
|
||||
# Finally, write a log entry that says we deleted it.
|
||||
echo "$(date) - Used disk space exceeds LOG_SIZE_LIMIT ({{LOG_SIZE_LIMIT}} GB) - Index ${OLDEST_INDEX} deleted ..." >> ${LOG}
|
||||
|
||||
done
|
||||
done
|
||||
@@ -1,2 +1,27 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2014,2015,2016,2017,2018,2019,2020 Security Onion Solutions, LLC
|
||||
#
|
||||
# This program is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This program is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
APP=delete
|
||||
lf=/tmp/$APP-pidLockFile
|
||||
# create empty lock file if none exists
|
||||
cat /dev/null >> $lf
|
||||
read lastPID < $lf
|
||||
# if lastPID is not null and a process with that pid exists , exit
|
||||
[ ! -z "$lastPID" -a -d /proc/$lastPID ] && exit
|
||||
echo $$ > $lf
|
||||
|
||||
docker exec so-curator curator --config /etc/curator/config/curator.yml /etc/curator/action/delete.yml > /dev/null 2>&1
|
||||
|
||||
@@ -127,6 +127,12 @@ so-curator:
|
||||
- /opt/so/conf/curator/curator.yml:/etc/curator/config/curator.yml:ro
|
||||
- /opt/so/conf/curator/action/:/etc/curator/action:ro
|
||||
- /opt/so/log/curator:/var/log/curator:rw
|
||||
|
||||
append_so-curator_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-curator
|
||||
|
||||
# Begin Curator Cron Jobs
|
||||
|
||||
# Close
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3']%}
|
||||
{% set OLDVERSIONS = ['2.0.0-rc.1','2.0.1-rc.1','2.0.2-rc.1','2.0.3-rc.1','2.1.0-rc.2','2.2.0-rc.3','2.3.0','2.3.1','2.3.2','2.3.20']%}
|
||||
|
||||
{% for VERSION in OLDVERSIONS %}
|
||||
remove_images_{{ VERSION }}:
|
||||
@@ -42,4 +42,4 @@ remove_images_{{ VERSION }}:
|
||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-es:{{ VERSION }}'
|
||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-wazuh:{{ VERSION }}'
|
||||
- '{{ MANAGER }}:5000/{{ IMAGEREPO }}/so-zeek:{{ VERSION }}'
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
|
||||
@@ -43,19 +43,24 @@ dstatslogdir:
|
||||
|
||||
so-domainstatsimage:
|
||||
cmd.run:
|
||||
- name: docker pull --disable-content-trust=false docker.io/{{ IMAGEREPO }}/so-domainstats:HH1.0.3
|
||||
- name: docker pull {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-domainstats:{{ VERSION }}
|
||||
|
||||
so-domainstats:
|
||||
docker_container.running:
|
||||
- require:
|
||||
- so-domainstatsimage
|
||||
- image: docker.io/{{ IMAGEREPO }}/so-domainstats:HH1.0.3
|
||||
- image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-domainstats:{{ VERSION }}
|
||||
- hostname: domainstats
|
||||
- name: so-domainstats
|
||||
- user: domainstats
|
||||
- binds:
|
||||
- /opt/so/log/domainstats:/var/log/domain_stats
|
||||
|
||||
append_so-domainstats_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-domainstats
|
||||
|
||||
{% else %}
|
||||
|
||||
domainstats_state_not_allowed:
|
||||
|
||||
@@ -16,7 +16,7 @@ class PlaybookESAlerter(Alerter):
|
||||
today = strftime("%Y.%m.%d", gmtime())
|
||||
timestamp = strftime("%Y-%m-%d"'T'"%H:%M:%S", gmtime())
|
||||
headers = {"Content-Type": "application/json"}
|
||||
payload = {"rule": { "name": self.rule['play_title'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
|
||||
payload = {"rule": { "name": self.rule['play_title'],"case_template": self.rule['play_id'],"uuid": self.rule['play_id'],"category": self.rule['rule.category']},"event":{ "severity": self.rule['event.severity'],"module": self.rule['event.module'],"dataset": self.rule['event.dataset'],"severity_label": self.rule['sigma_level']},"kibana_pivot": self.rule['kibana_pivot'],"soc_pivot": self.rule['soc_pivot'],"play_url": self.rule['play_url'],"sigma_level": self.rule['sigma_level'],"event_data": match, "@timestamp": timestamp}
|
||||
url = f"http://{self.rule['elasticsearch_host']}/so-playbook-alerts-{today}/_doc/"
|
||||
requests.post(url, data=json.dumps(payload), headers=headers, verify=False)
|
||||
|
||||
|
||||
@@ -121,6 +121,12 @@ so-elastalert:
|
||||
- {{MANAGER_URL}}:{{MANAGER_IP}}
|
||||
- require:
|
||||
- module: wait_for_elasticsearch
|
||||
|
||||
append_so-elastalert_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-elastalert
|
||||
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
@@ -1,18 +1,19 @@
|
||||
{%- set NODE_ROUTE_TYPE = salt['pillar.get']('elasticsearch:node_route_type', 'hot') %}
|
||||
{%- if salt['pillar.get']('elasticsearch:hot_warm_enabled') or salt['pillar.get']('elasticsearch:true_cluster') %}
|
||||
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:true_cluster_name', '') %}
|
||||
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip') %}
|
||||
{%- set FEATURES = salt['pillar.get']('elastic:features', False) %}
|
||||
{%- set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
|
||||
{%- if TRUECLUSTER is sameas true %}
|
||||
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:true_cluster_name') %}
|
||||
{%- else %}
|
||||
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername', '') %}
|
||||
{%- set ESCLUSTERNAME = salt['pillar.get']('elasticsearch:esclustername') %}
|
||||
{%- endif %}
|
||||
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
|
||||
cluster.name: "{{ ESCLUSTERNAME }}"
|
||||
network.host: 0.0.0.0
|
||||
|
||||
# minimum_master_nodes need to be explicitly set when bound on a public IP
|
||||
# set to 1 to allow single node clusters
|
||||
# Details: https://github.com/elastic/elasticsearch/pull/17288
|
||||
discovery.zen.minimum_master_nodes: 1
|
||||
#discovery.zen.minimum_master_nodes: 1
|
||||
# This is a test -- if this is here, then the volume is mounted correctly.
|
||||
path.logs: /var/log/elasticsearch
|
||||
action.destructive_requires_name: true
|
||||
@@ -37,10 +38,30 @@ cluster.routing.allocation.disk.watermark.flood_stage: 98%
|
||||
#xpack.security.http.ssl.client_authentication: none
|
||||
#xpack.security.authc:
|
||||
# anonymous:
|
||||
# username: anonymous_user
|
||||
# roles: superuser
|
||||
# authz_exception: true
|
||||
# username: anonymous_user
|
||||
# roles: superuser
|
||||
# authz_exception: true
|
||||
{%- endif %}
|
||||
node.attr.box_type: {{ NODE_ROUTE_TYPE }}
|
||||
node.name: {{ ESCLUSTERNAME }}
|
||||
node.name: {{ grains.host }}
|
||||
script.max_compilations_rate: 1000/1m
|
||||
{%- if TRUECLUSTER is sameas true %}
|
||||
{%- if grains.role == 'so-manager' %}
|
||||
{%- if salt['pillar.get']('nodestab', {}) %}
|
||||
node.roles: [ master, data, remote_cluster_client ]
|
||||
discovery.seed_hosts:
|
||||
- {{ grains.master }}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
|
||||
- {{ SN.split('_')|first }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- else %}
|
||||
node.roles: [ data, ingest ]
|
||||
node.attr.box_type: {{ NODE_ROUTE_TYPE }}
|
||||
discovery.seed_hosts:
|
||||
- {{ grains.master }}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{%- if TRUECLUSTER is sameas false %}
|
||||
node.attr.box_type: {{ NODE_ROUTE_TYPE }}
|
||||
{%- endif %}
|
||||
indices.query.bool.max_clause_count: 1500
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
{ "set": { "if": "ctx.rule?.uuid > 1999999", "field": "rule.reference", "value": "https://doc.emergingthreats.net/{{rule.uuid}}" } },
|
||||
{ "convert": { "if": "ctx.rule.uuid != null", "field": "rule.uuid", "type": "string" } },
|
||||
{ "dissect": { "if": "ctx.rule.name != null", "field": "rule.name", "pattern" : "%{rule_type} %{rest_of_rulename} ", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.rule_type == 'GPL'", "field": "rule_ruleset", "value": "Snort GPL" } },
|
||||
{ "set": { "if": "ctx.rule_type == 'GPL'", "field": "rule.ruleset", "value": "Snort GPL" } },
|
||||
{ "set": { "if": "ctx.rule_type == 'ET'", "field": "rule.ruleset", "value": "Emerging Threats" } },
|
||||
{ "set": { "if": "ctx.rule.severity == 3", "field": "event.severity", "value": 1, "override": true } },
|
||||
{ "set": { "if": "ctx.rule.severity == 2", "field": "event.severity", "value": 2, "override": true } },
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
{ "gsub": { "field": "message2.columns.data", "pattern": "\\\\xC2\\\\xAE", "replacement": "", "ignore_missing": true } },
|
||||
{ "rename": { "if": "ctx.message2.columns?.eventid != null", "field": "message2.columns", "target_field": "winlog", "ignore_missing": true } },
|
||||
{ "json": { "field": "winlog.data", "target_field": "temp", "ignore_failure": true } },
|
||||
{ "rename": { "field": "temp.Data", "target_field": "winlog.event_data", "ignore_missing": true } },
|
||||
{ "rename": { "field": "temp.EventData", "target_field": "winlog.event_data", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.source", "target_field": "winlog.channel", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.eventid", "target_field": "winlog.event_id", "ignore_missing": true } },
|
||||
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
|
||||
@@ -22,4 +22,4 @@
|
||||
{ "set": { "field": "event.dataset", "value": "{{osquery.result.name}}", "override": false} },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,7 +63,7 @@
|
||||
{ "rename": { "field": "fields.module", "target_field": "event.module", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "pipeline": { "if": "ctx.winlog?.channel == 'Microsoft-Windows-Sysmon/Operational'", "name": "sysmon" } },
|
||||
{ "pipeline": { "if": "ctx.winlog?.channel != 'Microsoft-Windows-Sysmon/Operational'", "name":"win.eventlogs" } },
|
||||
{ "set": { "if": "ctx.containsKey('rule') && ctx.rule != null", "field": "event.dataset", "value": "alert", "override": true } },
|
||||
{ "set": { "if": "ctx.rule != null && ctx.rule.name != null", "field": "event.dataset", "value": "alert", "override": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -6,15 +6,27 @@
|
||||
{ "rename": { "field": "message2.scan", "target_field": "scan", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.request", "target_field": "request", "ignore_missing": true } },
|
||||
{ "rename": { "field": "scan.hash", "target_field": "hash", "ignore_missing": true } },
|
||||
|
||||
{ "rename": { "field": "scan.exiftool", "target_field": "exiftool", "ignore_missing": true } },
|
||||
{ "grok": { "if": "ctx.request?.attributes?.filename != null", "field": "request.attributes.filename", "patterns": ["-%{WORD:log.id.fuid}-"], "ignore_failure": true } },
|
||||
{ "foreach":
|
||||
{
|
||||
"if": "ctx.scan?.exiftool?.keys !=null",
|
||||
"field": "scan.exiftool.keys",
|
||||
"processor":{
|
||||
"if": "ctx.exiftool?.keys !=null",
|
||||
"field": "exiftool.keys",
|
||||
"processor": {
|
||||
"append": {
|
||||
"field": "scan.exiftool",
|
||||
"value": "{{_ingest._value.key}}={{_ingest._value.value}}"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
{ "foreach":
|
||||
{
|
||||
"if": "ctx.exiftool?.keys !=null",
|
||||
"field": "exiftool.keys",
|
||||
"processor": {
|
||||
"set": {
|
||||
"field": "scan.exiftool.{{_ingest._value.key}}",
|
||||
"field": "exiftool.{{_ingest._value.key}}",
|
||||
"value": "{{_ingest._value.value}}"
|
||||
}
|
||||
}
|
||||
@@ -32,6 +44,14 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
{ "set": { "if": "ctx.exiftool?.SourceFile != null", "field": "file.source", "value": "{{exiftool.SourceFile}}", "ignore_failure": true }},
|
||||
{ "set": { "if": "ctx.exiftool?.FilePermissions != null", "field": "file.permissions", "value": "{{exiftool.FilePermissions}}", "ignore_failure": true }},
|
||||
{ "set": { "if": "ctx.exiftool?.FileName != null", "field": "file.name", "value": "{{exiftool.FileName}}", "ignore_failure": true }},
|
||||
{ "set": { "if": "ctx.exiftool?.FileModifyDate != null", "field": "file.mtime", "value": "{{exiftool.FileModifyDate}}", "ignore_failure": true }},
|
||||
{ "set": { "if": "ctx.exiftool?.FileAccessDate != null", "field": "file.accessed", "value": "{{exiftool.FileAccessDate}}", "ignore_failure": true }},
|
||||
{ "set": { "if": "ctx.exiftool?.FileInodeChangeDate != null", "field": "file.ctime", "value": "{{exiftool.FileInodeChangeDate}}", "ignore_failure": true }},
|
||||
{ "set": { "if": "ctx.exiftool?.FileDirectory != null", "field": "file.directory", "value": "{{exiftool.FileDirectory}}", "ignore_failure": true }},
|
||||
{ "set": { "if": "ctx.exiftool?.Subsystem != null", "field": "host.subsystem", "value": "{{exiftool.Subsystem}}", "ignore_failure": true }},
|
||||
{ "set": { "if": "ctx.scan?.yara?.matches != null", "field": "rule.name", "value": "{{scan.yara.matches.0}}" }},
|
||||
{ "set": { "if": "ctx.scan?.yara?.matches != null", "field": "dataset", "value": "alert", "override": true }},
|
||||
{ "rename": { "field": "file.flavors.mime", "target_field": "file.mime_type", "ignore_missing": true }},
|
||||
@@ -42,7 +62,8 @@
|
||||
{ "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 70 && ctx.rule?.score <=89", "field": "event.severity", "value": 3, "override": true } },
|
||||
{ "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 90", "field": "event.severity", "value": 4, "override": true } },
|
||||
{ "set": { "field": "observer.name", "value": "{{agent.name}}" }},
|
||||
{ "remove": { "field": ["host", "path", "message", "scan.exiftool.keys", "scan.yara.meta"], "ignore_missing": true } },
|
||||
{ "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }},
|
||||
{ "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
|
||||
10
salt/elasticsearch/files/ingest/suricata.ftp_data
Normal file
10
salt/elasticsearch/files/ingest/suricata.ftp_data
Normal file
@@ -0,0 +1,10 @@
|
||||
{
|
||||
"description" : "suricata.ftp_data",
|
||||
"processors" : [
|
||||
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ftp_data.command", "target_field": "ftp.command", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.ftp_data.filename","target_field": "ftp.argument", "ignore_missing": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
@@ -12,9 +12,25 @@
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{ "grok": { "field": "message", "patterns": ["<%{INT:syslog.priority}>%{DATA:syslog.timestamp} %{WORD:source.application}: %{GREEDYDATA:real_message}"], "ignore_failure": false } },
|
||||
{ "set": { "if": "ctx.source.application == 'filterlog'", "field": "dataset", "value": "firewall" } },
|
||||
{ "pipeline": { "if": "ctx.dataset == 'firewall'", "name": "filterlog" } },
|
||||
{
|
||||
"grok":
|
||||
{
|
||||
"field": "message",
|
||||
"patterns": [
|
||||
"^<%{INT:syslog.priority}>%{DATA:syslog.timestamp} %{WORD:source.application}: %{GREEDYDATA:real_message}$",
|
||||
"^%{SYSLOGTIMESTAMP:syslog.timestamp} %{SYSLOGHOST:syslog.host} %{SYSLOGPROG:syslog.program}: CEF:0\\|%{DATA:vendor}\\|%{DATA:product}\\|%{GREEDYDATA:message2}$"
|
||||
],
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{ "set": { "if": "ctx.source?.application == 'filterlog'", "field": "dataset", "value": "firewall", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.vendor != null", "field": "module", "value": "{{ vendor }}", "ignore_failure": true } },
|
||||
{ "set": { "if": "ctx.product != null", "field": "dataset", "value": "{{ product }}", "ignore_failure": true } },
|
||||
{ "set": { "field": "ingest.timestamp", "value": "{{ @timestamp }}" } },
|
||||
{ "date": { "if": "ctx.syslog?.timestamp != null", "field": "syslog.timestamp", "target_field": "@timestamp", "formats": ["MMM d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601", "UNIX"], "ignore_failure": true } },
|
||||
{ "remove": { "field": ["pid", "program"], "ignore_missing": true, "ignore_failure": true } },
|
||||
{ "pipeline": { "if": "ctx.vendor != null && ctx.product != null", "name": "{{ vendor }}.{{ product }}", "ignore_failure": true } },
|
||||
{ "pipeline": { "if": "ctx.dataset == 'firewall'", "name": "filterlog", "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "common" } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -30,40 +30,40 @@
|
||||
{ "rename": { "field": "winlog.event_data.DestinationHostname", "target_field": "destination.hostname", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.DestinationIp", "target_field": "destination.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.DestinationPort", "target_field": "destination.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.image", "target_field": "process.executable", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.image", "target_field": "process.executable", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Image", "target_field": "process.executable", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.processID", "target_field": "process.pid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ProcessID", "target_field": "process.pid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.processGuid", "target_field": "process.entity_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.processID", "target_field": "process.pid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ProcessId", "target_field": "process.pid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.processGuid", "target_field": "process.entity_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ProcessGuid", "target_field": "process.entity_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.commandLine", "target_field": "process.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.commandLine", "target_field": "process.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.CommandLine", "target_field": "process.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.currentDirectory", "target_field": "process.working_directory", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.currentDirectory", "target_field": "process.working_directory", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.CurrentDirectory", "target_field": "process.working_directory", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.description", "target_field": "process.pe.description", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.description", "target_field": "process.pe.description", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Description", "target_field": "process.pe.description", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.product", "target_field": "process.pe.product", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.product", "target_field": "process.pe.product", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Product", "target_field": "process.pe.product", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.company", "target_field": "process.pe.company", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.company", "target_field": "process.pe.company", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Company", "target_field": "process.pe.company", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.originalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.originalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.OriginalFileName", "target_field": "process.pe.original_file_name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.fileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.fileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.FileVersion", "target_field": "process.pe.file_version", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.parentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.parentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentCommandLine", "target_field": "process.parent.command_line", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.parentImage", "target_field": "process.parent.executable", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.parentImage", "target_field": "process.parent.executable", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentImage", "target_field": "process.parent.executable", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.parentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.parentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentProcessGuid", "target_field": "process.parent.entity_id", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.parentProcessId", "target_field": "process.ppid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.parentProcessId", "target_field": "process.ppid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.ParentProcessId", "target_field": "process.ppid", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.Protocol", "target_field": "network.transport", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SourceHostname", "target_field": "source.hostname", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SourceIp", "target_field": "source.ip", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SourcePort", "target_field": "source.port", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.targetFilename", "target_field": "file.target", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.TargetFilename", "target_field": "file.target", "ignore_missing": true } }
|
||||
{ "rename": { "field": "winlog.event_data.TargetFilename", "target_field": "file.target", "ignore_missing": true } }
|
||||
]
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
{ "set": { "if": "ctx.winlog?.computer_name != null", "field": "observer.name", "value": "{{winlog.computer_name}}", "override": true } },
|
||||
{ "set": { "field": "event.code", "value": "{{winlog.event_id}}", "override": true } },
|
||||
{ "set": { "field": "event.category", "value": "host", "override": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.SubjectUserName", "target_field": "user.name", "ignore_failure": true, "ignore_missing": true } },
|
||||
{ "rename": { "field": "winlog.event_data.User", "target_field": "user.name", "ignore_missing": true } }
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,10 @@
|
||||
{%- set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{%- set MANAGER = salt['grains.get']('master') %}
|
||||
. /usr/sbin/so-common
|
||||
|
||||
# Exit on errors, since all lines must succeed
|
||||
set -e
|
||||
|
||||
# Check to see if we have extracted the ca cert.
|
||||
if [ ! -f /opt/so/saltstack/local/salt/common/cacerts ]; then
|
||||
docker run -v /etc/pki/ca.crt:/etc/pki/ca.crt --name so-elasticsearchca --user root --entrypoint jdk/bin/keytool {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-elasticsearch:{{ VERSION }} -keystore /etc/pki/ca-trust/extracted/java/cacerts -alias SOSCA -import -file /etc/pki/ca.crt -storepass changeit -noprompt
|
||||
|
||||
@@ -28,9 +28,9 @@ COUNT=0
|
||||
ELASTICSEARCH_CONNECTED="no"
|
||||
while [[ "$COUNT" -le 240 ]]; do
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
curl ${ELASTICSEARCH_AUTH} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{% else %}
|
||||
curl ${ELASTICSEARCH_AUTH} --output /dev/null --silent --head --fail http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
curl ${ELASTICSEARCH_AUTH} --output /dev/null --silent --head --fail -L http://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
|
||||
{% endif %}
|
||||
if [ $? -eq 0 ]; then
|
||||
ELASTICSEARCH_CONNECTED="yes"
|
||||
@@ -52,9 +52,9 @@ cd ${ELASTICSEARCH_INGEST_PIPELINES}
|
||||
|
||||
echo "Loading pipelines..."
|
||||
{% if grains['role'] in ['so-node','so-heavynode'] %}
|
||||
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
{% else %}
|
||||
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
for i in *; do echo $i; RESPONSE=$(curl ${ELASTICSEARCH_AUTH} -XPUT -L http://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
|
||||
{% endif %}
|
||||
echo
|
||||
|
||||
|
||||
@@ -21,23 +21,26 @@
|
||||
{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %}
|
||||
{% set MANAGER = salt['grains.get']('master') %}
|
||||
{% set FEATURES = salt['pillar.get']('elastic:features', False) %}
|
||||
{%- set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{% set NODEIP = salt['pillar.get']('elasticsearch:mainip', '') -%}
|
||||
{% set TRUECLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
|
||||
{% set MANAGERIP = salt['pillar.get']('global:managerip') %}
|
||||
|
||||
|
||||
{%- if FEATURES is sameas true %}
|
||||
{% if FEATURES is sameas true %}
|
||||
{% set FEATUREZ = "-features" %}
|
||||
{% else %}
|
||||
{% set FEATUREZ = '' %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains['role'] in ['so-eval','so-managersearch', 'so-manager', 'so-standalone', 'so-import'] %}
|
||||
{% set esclustername = salt['pillar.get']('manager:esclustername', '') %}
|
||||
{% set esheap = salt['pillar.get']('manager:esheap', '') %}
|
||||
{% set esclustername = salt['pillar.get']('manager:esclustername') %}
|
||||
{% set esheap = salt['pillar.get']('manager:esheap') %}
|
||||
{% set ismanager = True %}
|
||||
{% elif grains['role'] in ['so-node','so-heavynode'] %}
|
||||
{% set esclustername = salt['pillar.get']('elasticsearch:esclustername', '') %}
|
||||
{% set esheap = salt['pillar.get']('elasticsearch:esheap', '') %}
|
||||
{% set esclustername = salt['pillar.get']('elasticsearch:esclustername') %}
|
||||
{% set esheap = salt['pillar.get']('elasticsearch:esheap') %}
|
||||
{% set ismanager = False %}
|
||||
{% elif grains['role'] == 'so-helix' %}
|
||||
{% set ismanager = True %} {# Solely for the sake of running so-catrust #}
|
||||
{% endif %}
|
||||
|
||||
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
|
||||
@@ -86,6 +89,8 @@ capemz:
|
||||
- user: 939
|
||||
- group: 939
|
||||
|
||||
{% if grains['role'] != 'so-helix' %}
|
||||
|
||||
# Add ES Group
|
||||
elasticsearchgroup:
|
||||
group.present:
|
||||
@@ -188,16 +193,21 @@ so-elasticsearch:
|
||||
- name: so-elasticsearch
|
||||
- user: elasticsearch
|
||||
- extra_hosts:
|
||||
{% if ismanager %}
|
||||
- {{ grains.host }}:{{ NODEIP }}
|
||||
{%- if ismanager %}
|
||||
{%- if salt['pillar.get']('nodestab', {}) %}
|
||||
{%- for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
|
||||
{% if salt['pillar.get']('nodestab', {}) %}
|
||||
{% for SN, SNDATA in salt['pillar.get']('nodestab', {}).items() %}
|
||||
- {{ SN.split('_')|first }}:{{ SNDATA.ip }}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% else %}
|
||||
- {{ grains.host }}:{{ NODEIP }}
|
||||
- {{ MANAGER }}:{{ MANAGERIP }}
|
||||
{% endif %}
|
||||
- environment:
|
||||
{% if TRUECLUSTER is sameas false or (TRUECLUSTER is sameas true and not salt['pillar.get']('nodestab', {})) %}
|
||||
- discovery.type=single-node
|
||||
{% endif %}
|
||||
- ES_JAVA_OPTS=-Xms{{ esheap }} -Xmx{{ esheap }}
|
||||
ulimits:
|
||||
- memlock=-1:-1
|
||||
@@ -215,13 +225,17 @@ so-elasticsearch:
|
||||
- /etc/pki/ca.crt:/usr/share/elasticsearch/config/ca.crt:ro
|
||||
- /etc/pki/elasticsearch.p12:/usr/share/elasticsearch/config/elasticsearch.p12:ro
|
||||
- /opt/so/conf/elasticsearch/sotls.yml:/usr/share/elasticsearch/config/sotls.yml:ro
|
||||
|
||||
- watch:
|
||||
- file: cacertz
|
||||
- file: esyml
|
||||
- file: esingestconf
|
||||
- file: so-elasticsearch-pipelines-file
|
||||
|
||||
append_so-elasticsearch_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-elasticsearch
|
||||
|
||||
so-elasticsearch-pipelines-file:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/elasticsearch/so-elasticsearch-pipelines
|
||||
@@ -247,10 +261,12 @@ so-elasticsearch-templates:
|
||||
- template: jinja
|
||||
{% endif %}
|
||||
|
||||
{% endif %} {# if grains['role'] != 'so-helix' #}
|
||||
|
||||
{% else %}
|
||||
|
||||
elasticsearch_state_not_allowed:
|
||||
test.fail_without_changes:
|
||||
- name: elasticsearch_state_not_allowed
|
||||
|
||||
{% endif %}
|
||||
{% endif %} {# if 'elasticsearch' in top_states #}
|
||||
|
||||
@@ -379,9 +379,14 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"scan":{
|
||||
"scan":{
|
||||
"type":"object",
|
||||
"dynamic": true
|
||||
"dynamic": true,
|
||||
"properties":{
|
||||
"exiftool":{
|
||||
"type":"text"
|
||||
}
|
||||
}
|
||||
},
|
||||
"server":{
|
||||
"type":"object",
|
||||
|
||||
@@ -74,7 +74,6 @@ filebeat.modules:
|
||||
# List of prospectors to fetch data.
|
||||
filebeat.inputs:
|
||||
#------------------------------ Log prospector --------------------------------
|
||||
{%- if grains['role'] in ['so-sensor', "so-eval", "so-helix", "so-heavynode", "so-standalone", "so-import"] %}
|
||||
- type: udp
|
||||
enabled: true
|
||||
host: "0.0.0.0:514"
|
||||
@@ -100,6 +99,8 @@ filebeat.inputs:
|
||||
- drop_fields:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
fields_under_root: true
|
||||
|
||||
{%- if grains['role'] in ['so-eval', 'so-standalone', 'so-sensor', 'so-helix', 'so-heavynode', 'so-import'] %}
|
||||
{%- if ZEEKVER != 'SURICATA' %}
|
||||
{%- for LOGNAME in salt['pillar.get']('zeeklogs:enabled', '') %}
|
||||
- type: log
|
||||
@@ -114,7 +115,7 @@ filebeat.inputs:
|
||||
fields: ["source", "prospector", "input", "offset", "beat"]
|
||||
|
||||
fields_under_root: true
|
||||
clean_removed: false
|
||||
clean_removed: true
|
||||
close_removed: false
|
||||
|
||||
- type: log
|
||||
|
||||
@@ -58,8 +58,8 @@ filebeatconfsync:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/filebeat/etc/filebeat.yml
|
||||
- source: salt://filebeat/etc/filebeat.yml
|
||||
- user: 0
|
||||
- group: 0
|
||||
- user: root
|
||||
- group: root
|
||||
- template: jinja
|
||||
- defaults:
|
||||
INPUTS: {{ salt['pillar.get']('filebeat:config:inputs', {}) }}
|
||||
@@ -82,9 +82,15 @@ so-filebeat:
|
||||
- /etc/ssl/certs/intca.crt:/usr/share/filebeat/intraca.crt:ro
|
||||
- port_bindings:
|
||||
- 0.0.0.0:514:514/udp
|
||||
- 0.0.0.0:514:514/tcp
|
||||
- watch:
|
||||
- file: /opt/so/conf/filebeat/etc/filebeat.yml
|
||||
|
||||
append_so-filebeat_so-status.conf:
|
||||
file.append:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- text: so-filebeat
|
||||
|
||||
{% else %}
|
||||
|
||||
filebeat_state_not_allowed:
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
{% set ISAIRGAP = salt['pillar.get']('global:airgap', 'False') %}
|
||||
{% import_yaml 'firewall/portgroups.yaml' as portgroups %}
|
||||
{% set portgroups = portgroups.firewall.aliases.ports %}
|
||||
{% set TRUE_CLUSTER = salt['pillar.get']('elasticsearch:true_cluster', False) %}
|
||||
|
||||
role:
|
||||
eval:
|
||||
@@ -32,9 +33,9 @@ role:
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.sensoroni }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
@@ -42,6 +43,11 @@ role:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.minio }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
heavy_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.minio }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
@@ -121,12 +127,12 @@ role:
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.sensoroni }}
|
||||
{% if ISAIRGAP is sameas true %}
|
||||
- {{ portgroups.yum }}
|
||||
{% endif %}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
@@ -134,6 +140,13 @@ role:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.minio }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
heavy_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.minio }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
@@ -207,10 +220,10 @@ role:
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.yum }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
@@ -218,6 +231,11 @@ role:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.minio }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
heavy_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.minio }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
@@ -291,10 +309,10 @@ role:
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.fleet_api }}
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.yum }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
@@ -302,6 +320,11 @@ role:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.minio }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
heavy_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.minio }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
@@ -371,9 +394,9 @@ role:
|
||||
- {{ portgroups.osquery_8080 }}
|
||||
- {{ portgroups.influxdb }}
|
||||
- {{ portgroups.wazuh_api }}
|
||||
- {{ portgroups.sensoroni }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.sensoroni }}
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
search_node:
|
||||
@@ -424,6 +447,14 @@ role:
|
||||
elasticsearch_rest:
|
||||
portgroups:
|
||||
- {{ portgroups.elasticsearch_rest }}
|
||||
{% if TRUE_CLUSTER %}
|
||||
search_node:
|
||||
portgroups:
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
{% endif %}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
@@ -437,6 +468,11 @@ role:
|
||||
- {{ portgroups.all }}
|
||||
sensor:
|
||||
chain:
|
||||
DOCKER-USER:
|
||||
hostgroups:
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
@@ -463,6 +499,9 @@ role:
|
||||
elasticsearch_rest:
|
||||
portgroups:
|
||||
- {{ portgroups.elasticsearch_rest }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
INPUT:
|
||||
hostgroups:
|
||||
anywhere:
|
||||
@@ -521,18 +560,15 @@ role:
|
||||
minion:
|
||||
portgroups:
|
||||
- {{ portgroups.docker_registry }}
|
||||
- {{ portgroups.sensoroni }}
|
||||
sensor:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5044 }}
|
||||
- {{ portgroups.beats_5644 }}
|
||||
- {{ portgroups.sensoroni }}
|
||||
search_node:
|
||||
portgroups:
|
||||
- {{ portgroups.redis }}
|
||||
- {{ portgroups.elasticsearch_node }}
|
||||
self:
|
||||
portgroups:
|
||||
- {{ portgroups.syslog}}
|
||||
beats_endpoint:
|
||||
portgroups:
|
||||
- {{ portgroups.beats_5044 }}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user