Compare commits

...

363 Commits

Author SHA1 Message Date
Mike Reeves
d3802c1668 Merge pull request #11854 from Security-Onion-Solutions/hotfix/2.4.30
Hotfix/2.4.30
2023-11-21 16:39:40 -05:00
Mike Reeves
874618d512 Merge pull request #11853 from Security-Onion-Solutions/2.4.30hf2
2.4.30 hotfix
2023-11-21 14:32:53 -05:00
Mike Reeves
fa9032b323 2.4.30 hotfix 2023-11-21 14:28:23 -05:00
Mike Reeves
17942676c6 Merge pull request #11844 from Security-Onion-Solutions/TOoSmOotH-patch-5
Update soup
2023-11-21 10:32:24 -05:00
Mike Reeves
458c6de39d Update soup 2023-11-21 10:30:21 -05:00
Mike Reeves
a39f696a34 Merge pull request #11843 from Security-Onion-Solutions/TOoSmOotH-patch-4
Update soup
2023-11-21 10:19:21 -05:00
Mike Reeves
9aa193af3b Update soup 2023-11-21 10:18:02 -05:00
Mike Reeves
3f1f256748 Merge pull request #11842 from Security-Onion-Solutions/TOoSmOotH-patch-3
Update HOTFIX
2023-11-21 10:01:13 -05:00
Mike Reeves
c78ea0183f Update HOTFIX 2023-11-21 09:59:51 -05:00
Mike Reeves
e9417dd437 Merge pull request #11841 from Security-Onion-Solutions/TOoSmOotH-patch-2
Update soup
2023-11-21 09:56:45 -05:00
Mike Reeves
14b5aa476e Update soup 2023-11-21 09:55:44 -05:00
Mike Reeves
4b0033c60a Merge pull request #11827 from Security-Onion-Solutions/hotfix/2.4.30
Hotfix 2.4.30
2023-11-20 15:26:16 -05:00
Mike Reeves
c20004c210 Merge pull request #11826 from Security-Onion-Solutions/2.4.30hf
2.4.30 hotfix
2023-11-20 11:35:11 -05:00
Mike Reeves
45dc1ce036 2.4.30 hotfix 2023-11-20 11:32:21 -05:00
Jason Ertel
0cc10fbf80 Merge pull request #11823 from Security-Onion-Solutions/jertel/igwarn
ignore libwbclient upgrade warning
2023-11-19 19:46:19 -05:00
Jason Ertel
e71ee97717 ignore libwbclient upgrade warning 2023-11-19 19:03:23 -05:00
Mike Reeves
77d0a7277a Merge pull request #11818 from Security-Onion-Solutions/TOoSmOotH-patch-2
Update soup
2023-11-17 17:07:54 -05:00
Mike Reeves
2ae87de409 Merge branch 'hotfix/2.4.30' into TOoSmOotH-patch-2 2023-11-17 17:05:11 -05:00
Josh Brower
a69a65c44f Merge pull request #11819 from Security-Onion-Solutions/hftesting
Remove state file
2023-11-17 16:54:08 -05:00
Mike Reeves
d89beefc8c Update soup 2023-11-17 16:53:11 -05:00
Josh Brower
9c371fc374 Remove state file 2023-11-17 16:52:34 -05:00
Mike Reeves
4fb9cce41c Update signing_policies.conf 2023-11-17 16:38:50 -05:00
Mike Reeves
e226efa799 Update soup 2023-11-17 16:35:12 -05:00
Josh Brower
82a41894f3 Merge pull request #11817 from Security-Onion-Solutions/hftesting
Hftesting
2023-11-17 13:12:06 -05:00
Josh Brower
7aadc3851f Remove state file 2023-11-17 13:08:15 -05:00
Josh Brower
ca1498fca1 Dont update Defend Integration 2023-11-17 12:19:22 -05:00
Josh Brower
15fc4f2655 Merge pull request #11815 from Security-Onion-Solutions/hftesting
use updated code
2023-11-17 11:23:45 -05:00
Josh Brower
089a111ae8 use updated code 2023-11-17 11:20:13 -05:00
Josh Brower
33bd04b797 Merge pull request #11811 from Security-Onion-Solutions/hftesting
Move API check logic
2023-11-17 06:02:26 -05:00
Josh Brower
5920a14478 Move API check logic 2023-11-16 20:34:01 -05:00
Josh Brower
3ede19a106 Merge pull request #11808 from Security-Onion-Solutions/2.4/defendhotfix2
Update HOTFIX
2023-11-16 15:25:24 -05:00
weslambert
b6e2df45c7 Update HOTFIX 2023-11-16 14:48:00 -05:00
Josh Brower
af98c8e2da Merge pull request #11805 from Security-Onion-Solutions/2.4/defendhotfix2
.30 hotfix
2023-11-16 11:42:49 -05:00
Josh Brower
6b8e48c973 Remove highstate 2023-11-16 11:41:20 -05:00
Josh Brower
109ee55d8c Add to pre for .30 soup 2023-11-16 11:37:38 -05:00
Josh Brower
ff8cd194f1 Make sure kibana API is up 2023-11-16 11:21:34 -05:00
Josh Brower
d5dd0d88ed .30 hotfix 2023-11-16 10:58:23 -05:00
weslambert
46c5bf40e0 Merge pull request #11804 from Security-Onion-Solutions/fix/kibana_corrupt_integration
Discard corrupt integration
2023-11-16 10:49:39 -05:00
Wes
3ed7b36865 Discard corrupt integration 2023-11-16 15:45:38 +00:00
Mike Reeves
f036623d55 Merge pull request #11777 from Security-Onion-Solutions/2.4/dev
2.4.30
2023-11-13 15:27:24 -05:00
Mike Reeves
1204ce96f3 Merge pull request #11776 from Security-Onion-Solutions/2.4.30
2.4.30
2023-11-13 13:13:29 -05:00
Mike Reeves
bc178a9784 2.4.30 2023-11-13 13:11:49 -05:00
Mike Reeves
c338daabce Merge pull request #11769 from Security-Onion-Solutions/TOoSmOotH-patch-7
Update soup
2023-11-13 08:51:40 -05:00
Mike Reeves
fe7af49a82 Update soup 2023-11-13 08:37:46 -05:00
weslambert
aeb09b16db Merge pull request #11760 from Security-Onion-Solutions/fix/elastic_packages
Add Elastic Agent package and upgrade packages when elasticfleet.packages list changes
2023-11-10 10:20:17 -05:00
weslambert
583ec5176e Add package check 2023-11-10 10:15:52 -05:00
weslambert
4bb1dabb89 Add elastic_agent 2023-11-10 10:14:59 -05:00
Josh Brower
89c3d45abe Merge pull request #11751 from Security-Onion-Solutions/2.4/fleetresetfix2
Remove unneeded datastreams
2023-11-09 15:04:02 -05:00
Josh Brower
551f7831de Add more clarity to message 2023-11-09 15:01:56 -05:00
Josh Brower
193c9d202e Remove unneeded datastreams 2023-11-09 14:30:00 -05:00
Josh Brower
b5912fc1e4 Merge pull request #11750 from Security-Onion-Solutions/2.4/defendpolicy
Upgrade Defend Integration policy
2023-11-09 12:48:57 -05:00
Josh Brower
33f538b73e Upgrade Defend Integration policy 2023-11-09 11:52:06 -05:00
Josh Brower
d3ea5def69 Merge pull request #11747 from Security-Onion-Solutions/2.4/resetscriptfix
remove state file
2023-11-09 09:12:52 -05:00
Josh Brower
d1b6ef411b remove state file 2023-11-09 09:01:57 -05:00
Jason Ertel
8ca825b9a1 Merge pull request #11745 from Security-Onion-Solutions/jertel/yaml
re-add source pkgs from accidental commit
2023-11-09 07:19:22 -05:00
Jason Ertel
209e237d0d re-add source pkgs from accidental commit 2023-11-09 00:34:52 -05:00
Jason Ertel
325dceb01b Merge pull request #11743 from Security-Onion-Solutions/fix/elastic_template_check
Additional fixes for index template check
2023-11-09 00:15:14 -05:00
weslambert
02baa18502 Add metrics 2023-11-08 22:41:24 -05:00
Jason Ertel
268dc03131 Merge pull request #11742 from Security-Onion-Solutions/jertel/yaml
add yaml helper script; refactor python testing
2023-11-08 21:06:04 -05:00
weslambert
e39edab00d Exclude osquery and display failed name 2023-11-08 20:55:08 -05:00
weslambert
acb6e84248 Don't load index template if component template doesn't exist 2023-11-08 20:34:08 -05:00
Jason Ertel
9231c8d2f2 replace reset sed with new script 2023-11-08 19:17:32 -05:00
Jason Ertel
bc044fa2d5 more coverage 2023-11-08 18:42:06 -05:00
Jason Ertel
84b815c2ef add yaml helper script; refactor python testing 2023-11-08 18:30:05 -05:00
Jason Ertel
1ab44a40d3 add yaml helper script; refactor python testing 2023-11-08 18:29:06 -05:00
Jason Ertel
9317e51f20 add yaml helper script; refactor python testing 2023-11-08 18:26:37 -05:00
Jason Ertel
33a8ef1568 add yaml helper script; refactor python testing 2023-11-08 18:24:23 -05:00
Josh Patterson
01e846ba22 Merge pull request #11741 from Security-Onion-Solutions/issue/11738
remove comments from BPFs
2023-11-08 15:25:02 -05:00
weslambert
9df3a8fc18 Merge pull request #11740 from Security-Onion-Solutions/fix/elastic_templates
Remove template files
2023-11-08 15:20:01 -05:00
weslambert
36098e6314 Remove template files 2023-11-08 14:32:58 -05:00
Jason Ertel
32079a7bce Merge pull request #11734 from Security-Onion-Solutions/fix/elastic_scripts
Improve error handling and add retry logic
2023-11-08 12:19:00 -05:00
Jason Ertel
3701c1d847 ignore retry logging 2023-11-08 11:50:56 -05:00
m0duspwnens
f46aef1611 remove comments from BPFs 2023-11-08 11:23:19 -05:00
Jason Ertel
d256be3eb3 allow template loads to partially succeed only on the initial attempt 2023-11-08 10:32:11 -05:00
Wes
653fda124f Check expected with retry 2023-11-08 13:02:17 +00:00
Wes
b46e86c39b Extend index template loading to 60 attempts and a total of ~5 minutes 2023-11-08 02:29:09 +00:00
Wes
de9f9549af Extend template loading to 24 attempts and a total of ~2 minutes 2023-11-07 23:55:03 +00:00
weslambert
749e22e4b9 Fix if statement 2023-11-07 17:29:38 -05:00
weslambert
69ec1987af Fix if statement 2023-11-07 17:28:37 -05:00
Wes
570624da7e Remove RETURN_CODE 2023-11-07 21:09:29 +00:00
Wes
7772657b4b Remove RETURN_CODE 2023-11-07 21:06:35 +00:00
Wes
1676c84f9c Use the retry function so-elasticsearch-query 2023-11-07 19:56:50 +00:00
Jason Ertel
e665899e4d Merge pull request #11735 from Security-Onion-Solutions/fix/elastic_agent_template
Change pipeline to 1.13.1
2023-11-07 14:11:47 -05:00
weslambert
1dcca0bfd3 Change pipeline to 1.13.1 2023-11-07 12:17:51 -05:00
Wes
0b4a246ddb State file changes and retry logic 2023-11-07 16:44:42 +00:00
weslambert
f97dc70fcb Merge pull request #11732 from Security-Onion-Solutions/fix/elastic_agent_template
Change pipeline to 1.8.0
2023-11-07 09:08:25 -05:00
weslambert
cce80eb2fb Change pipeline to 1.8.0 2023-11-07 09:02:48 -05:00
Jason Ertel
b008661b6b Merge pull request #11726 from Security-Onion-Solutions/jertel/auto
improve verbosity of setup logs
2023-11-06 11:27:33 -05:00
Jason Ertel
b99c7ce76e improve verbosity of setup logs 2023-11-06 11:22:35 -05:00
Wes
c30a0d5b5b Better error handling and state file management 2023-11-06 14:29:01 +00:00
Wes
74eda68d84 Exit if unable to communicate with Elasticsearch 2023-11-06 13:16:35 +00:00
Josh Brower
ef1dfc3152 Merge pull request #11722 from Security-Onion-Solutions/2.4/packageupgrade
Set execute permissions
2023-11-06 08:06:13 -05:00
Josh Brower
f6cd35e143 Set execute permissions 2023-11-06 08:03:31 -05:00
Jason Ertel
d010af9a24 Merge pull request #11718 from Security-Onion-Solutions/jertel/auto
disregard false positives
2023-11-04 16:32:02 -04:00
Jason Ertel
7a0b21647f disregard false positives 2023-11-04 10:05:37 -04:00
Josh Patterson
610374816d Merge pull request #11714 from Security-Onion-Solutions/change/so-minion
apply es and soc states to manager if new search or hn are added
2023-11-03 16:43:16 -04:00
Josh Brower
3ff74948d8 Merge pull request #11713 from Security-Onion-Solutions/2.4/agentupdate
Upgrade Elastic Agent
2023-11-03 15:23:55 -04:00
Josh Brower
0086c24729 Upgrade Elastic Agent 2023-11-03 15:21:06 -04:00
m0duspwnens
9d2b84818f apply es and soc states to manager if new search or hn are added 2023-11-03 15:00:13 -04:00
Mike Reeves
b74aa32deb Merge pull request #11712 from Security-Onion-Solutions/TOoSmOotH-patch-5
Update soc_elasticsearch.yaml
2023-11-03 11:33:00 -04:00
Mike Reeves
3d8663db66 Update soc_elasticsearch.yaml 2023-11-03 11:29:45 -04:00
Josh Brower
65978a340f Merge pull request #11710 from Security-Onion-Solutions/2.4/navlayerfix
exit 0
2023-11-03 11:07:10 -04:00
Josh Brower
a8b0e41dbe exit 0 2023-11-03 11:04:52 -04:00
Jason Ertel
1bc4b44be7 Merge pull request #11709 from Security-Onion-Solutions/jertel/auto
ignore malformed open canary log lines
2023-11-03 09:17:23 -04:00
Jason Ertel
1a3d4a2051 ignore malformed open canary log lines 2023-11-03 09:14:26 -04:00
Josh Brower
9d639df882 Merge pull request #11708 from Security-Onion-Solutions/2.4/metadatafix2
Dont overwrite metadata
2023-11-03 08:47:48 -04:00
Josh Brower
8c7767b381 Dont overwrite metadata 2023-11-03 08:41:33 -04:00
weslambert
96582add5e Merge pull request #11704 from Security-Onion-Solutions/feature/integrations_checkpoint_vsphere
Checkpoint and VSphere Integrations
2023-11-02 17:17:03 -04:00
Wes
5bfef3f527 Add checkpoint and vsphere templates 2023-11-02 21:10:01 +00:00
Wes
3875970dc5 Add checkpoint and vsphere packages 2023-11-02 21:09:37 +00:00
Jason Ertel
7aa4f28524 Merge pull request #11702 from Security-Onion-Solutions/jertel/auto
ignore connectivity problems to docker containers during startup
2023-11-02 16:48:09 -04:00
Jason Ertel
96fdfb3829 ignore connectivity problems to docker containers during startup 2023-11-02 16:46:41 -04:00
weslambert
ac593e4632 Merge pull request #11701 from Security-Onion-Solutions/fix/elastic_templates_common
Don't source so-elastic-fleet-common if not there
2023-11-02 16:43:27 -04:00
weslambert
51e7861757 Don't source so-elastic-fleet-common if not there 2023-11-02 16:41:34 -04:00
Jason Ertel
6332df04d1 Merge pull request #11695 from Security-Onion-Solutions/jertel/auto
Jertel/auto
2023-11-02 13:07:09 -04:00
Jason Ertel
32701b5941 more log bypass 2023-11-02 12:50:12 -04:00
Josh Brower
0dec6693dc Merge pull request #11678 from Security-Onion-Solutions/2.4/fleetreset
Add Elastic Fleet reset script
2023-11-02 11:33:58 -04:00
Jason Ertel
41a6ab5b4f Merge pull request #11691 from Security-Onion-Solutions/jertel/auto
more log bypass
2023-11-02 10:41:17 -04:00
Jason Ertel
e18e0fd69a more log bypass 2023-11-02 10:39:14 -04:00
Josh Brower
2c0e287f8c Fix name 2023-11-02 10:34:24 -04:00
Josh Patterson
9a76cfe3d3 Merge pull request #11690 from Security-Onion-Solutions/upgrade/salt3006.3v2
fix UPGRADECOMMAND used for distrib salt upgrade. remove unneeded vars
2023-11-02 10:28:29 -04:00
m0duspwnens
6c4dc7cc09 fix UPGRADECOMMAND used for distrib salt upgrade. remove unneeded vars 2023-11-02 10:23:03 -04:00
Josh Brower
5388b92865 Refactor & cleanup 2023-11-02 10:20:32 -04:00
Jason Ertel
f932444101 Merge pull request #11689 from Security-Onion-Solutions/jertel/auto
more log bypass
2023-11-02 10:02:13 -04:00
Jason Ertel
1d2518310d more log bypass 2023-11-02 09:59:45 -04:00
weslambert
e10f043b1c Merge pull request #11688 from Security-Onion-Solutions/fix/integrations_roles
Add eval and import roles
2023-11-02 09:58:40 -04:00
weslambert
65735fc4d3 Add eval and import roles 2023-11-02 09:54:01 -04:00
Jason Ertel
b7f516fca4 Merge pull request #11687 from Security-Onion-Solutions/jertel/auto
adjust log filter to include all hosts
2023-11-02 09:24:08 -04:00
Jason Ertel
c8d8997119 adjust log filter to include all hosts 2023-11-02 09:21:57 -04:00
Josh Brower
c230cf4eb7 Formatting 2023-11-01 17:00:32 -04:00
Josh Brower
344dd7d61f Add Elastic Fleet reset script 2023-11-01 16:50:20 -04:00
Mike Reeves
cd8949d26b Merge pull request #11677 from Security-Onion-Solutions/lowram
Allow 16GB of memory
2023-11-01 16:38:40 -04:00
weslambert
f9e2940181 Merge pull request #11676 from Security-Onion-Solutions/feature/sublime_platform_integration
Sublime Platform Integration
2023-11-01 16:13:57 -04:00
Wes
f33079f1e3 Make settings global 2023-11-01 20:09:56 +00:00
Mike Reeves
e6a0838e4c Add memory restrictions 2023-11-01 15:26:24 -04:00
Mike Reeves
cc93976db9 Add memory restrictions 2023-11-01 15:17:23 -04:00
Mike Reeves
b3b67acf07 Add memory restrictions 2023-11-01 15:11:54 -04:00
Josh Patterson
64926941dc Merge pull request #11674 from Security-Onion-Solutions/foxtrot
Foxtrot
2023-11-01 15:03:30 -04:00
Wes
c32935e2e6 Remove optional integration from configuration if not enabled 2023-11-01 17:02:43 +00:00
Mike Reeves
4f98beaf9e Merge pull request #11671 from Security-Onion-Solutions/TOoSmOotH-patch-4
Remove legacy pillar info
2023-11-01 13:00:34 -04:00
Wes
655c88cd09 Make sure enabled_nodes is populated 2023-11-01 16:47:51 +00:00
Mike Reeves
f62e02a477 Delete pillar/thresholding/pillar.example 2023-11-01 10:42:29 -04:00
Mike Reeves
2b3e405b2d Delete pillar/thresholding/pillar.usage 2023-11-01 10:41:40 -04:00
Josh Patterson
59328d3909 Merge pull request #11670 from Security-Onion-Solutions/fix/soupagrepo
Fix/soupagrepo
2023-11-01 10:36:17 -04:00
m0duspwnens
4d7b1095b7 Merge remote-tracking branch 'origin/2.4/dev' into fix/soupagrepo 2023-11-01 10:31:59 -04:00
m0duspwnens
338146fedd fix repo update during soup for airgap 2023-11-01 10:19:56 -04:00
Wes
bca1194a46 Sublime SOC Action 2023-11-01 14:01:55 +00:00
Wes
a0926b7b87 Load optional integrations 2023-11-01 13:59:24 +00:00
Wes
44e45843bf Change optional integration Fleet configuration 2023-11-01 13:52:38 +00:00
Wes
9701d0ac20 Optional integration Fleet configuration 2023-11-01 13:47:20 +00:00
Wes
23ee9c2bb0 Sublime Platform integration 2023-11-01 13:41:40 +00:00
Wes
51247be6b9 Sublime Platform integration defaults 2023-11-01 13:37:52 +00:00
Wes
4dc64400c5 Support document_id 2023-11-01 13:36:32 +00:00
Wes
ae45d40eca Add Sublime Platform ingest pipeline 2023-11-01 13:34:30 +00:00
Mike Reeves
ebf982bf86 Merge pull request #11666 from Security-Onion-Solutions/TOoSmOotH-patch-3
Remove unused scripts and functions
2023-10-31 15:18:23 -04:00
Mike Reeves
d07cfdd3fe Update so-functions 2023-10-31 13:10:55 -04:00
Mike Reeves
497294c363 Delete salt/common/tools/sbin/so-zeek-logs 2023-10-31 12:57:10 -04:00
Mike Reeves
cc3a69683c Delete salt/manager/tools/sbin/so-allow-view 2023-10-31 12:55:47 -04:00
Mike Reeves
0c98bd96c7 Delete salt/idstools/tools/sbin/so-rule
UI does this now
2023-10-31 12:52:00 -04:00
Jason Ertel
a6d456e108 Merge pull request #11665 from Security-Onion-Solutions/jertel/auto
ignore specific Suricata errors
2023-10-31 11:20:28 -04:00
Jason Ertel
c420e198fb ignore specific Suricata errors 2023-10-31 11:18:39 -04:00
weslambert
5a85003952 Merge pull request #11664 from Security-Onion-Solutions/fix/elastic_import
Add import roles
2023-10-31 10:47:13 -04:00
weslambert
c354924b68 Add import roles 2023-10-31 10:05:29 -04:00
Jason Ertel
db0d687b87 Merge pull request #11661 from Security-Onion-Solutions/fix/elastic_eval_roles
Add roles for eval mode
2023-10-30 22:01:22 -04:00
weslambert
ed6473a34b Add roles for eval mode 2023-10-30 20:41:49 -04:00
Josh Patterson
1b99d5081a Merge pull request #11659 from Security-Onion-Solutions/issue/11457
ensure networkminer is latest version
2023-10-30 16:20:36 -04:00
m0duspwnens
07e51121ba ensure networkminer is latest version 2023-10-30 16:11:36 -04:00
weslambert
9a1e95cd09 Merge pull request #11648 from Security-Onion-Solutions/fix/ilm_remove_policy
Remove ILM policies for Cases and OSQuery manager indices
2023-10-27 17:28:59 -04:00
weslambert
76dd6f07ab Remove policy for OSQuery manager indices 2023-10-27 17:26:33 -04:00
weslambert
c955f9210a Remove policy for Cases indices 2023-10-27 17:24:27 -04:00
Josh Patterson
d35483aa02 Merge pull request #11647 from Security-Onion-Solutions/upgrade/salt3006.3v2
Upgrade/salt3006.3v2
2023-10-27 14:37:16 -04:00
Jorge Reyes
a9284b35a2 Merge pull request #11644 from Security-Onion-Solutions/bravo
UPGRADE: influxdb 2.7.1 & telegraf 1.28.2
2023-10-27 12:16:48 -04:00
Jason Ertel
58cab35a4c Merge pull request #11643 from Security-Onion-Solutions/kilo
oidc
2023-10-27 11:21:20 -04:00
Jason Ertel
3a83c52660 minor updates 2023-10-27 11:20:05 -04:00
Jason Ertel
d42b5ef901 remove unused url props to avoid kratos complaining about invalid urls when they're blank 2023-10-27 11:18:56 -04:00
m0duspwnens
2b511cef77 Merge branch 'upgrade/salt3006.3' into upgrade/salt3006.3v2 2023-10-27 10:58:09 -04:00
Josh Patterson
4bbcc5002a Revert "Revert "Upgrade/salt3006.3""
This reverts commit c41e19ad0b.
2023-10-27 10:56:45 -04:00
Mike Reeves
f1dbea6e2d Merge pull request #11623 from Security-Onion-Solutions/warmui
Warm Node UI Changes
2023-10-27 10:36:23 -04:00
Mike Reeves
25f1a0251f Annotation changes for warm node 2023-10-27 09:08:07 -04:00
Mike Reeves
87494f64c7 Annotation changes for warm node 2023-10-27 09:06:12 -04:00
Mike Reeves
ce1858fe05 Annotation changes for warm node 2023-10-27 09:02:39 -04:00
Mike Reeves
9fc3a73035 Annotation changes for warm node 2023-10-27 08:58:08 -04:00
Josh Brower
0d52efafa8 Merge pull request #11637 from Security-Onion-Solutions/2.4/kibanauser
2.4/kibanauser
2023-10-27 08:43:12 -04:00
defensivedepth
3b63ef149a Merge remote-tracking branch 'remotes/origin/2.4/dev' into 2.4/kibanauser 2023-10-27 07:50:58 -04:00
defensivedepth
cc3ee43192 Make dirs as needed 2023-10-27 07:49:34 -04:00
Mike Reeves
b37e38e3c3 Update defaults.yaml 2023-10-26 16:03:58 -04:00
Jorge Reyes
25982b79ab Merge pull request #11633 from Security-Onion-Solutions/reyesj2/influxdb_config
UPGRADE: Influxdb 2.7.1 & telegraf 1.28.2
2023-10-26 14:37:09 -04:00
Jason Ertel
cb9d72ebd7 switch back to kilo version 2023-10-26 14:19:59 -04:00
m0duspwnens
7e8f3b753f add minion name to log, update comment 2023-10-26 13:19:04 -04:00
reyesj2
47373adad2 Specify config.yaml in config_path. Otherwise when no influxd.bolt exists influxdb will fail to read the config file and won't create a new db.
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2023-10-26 13:15:40 -04:00
m0duspwnens
6891a95254 remove wait_for_salt_minion from so-functions 2023-10-26 13:02:39 -04:00
Mike Reeves
2e0100fd35 Update defaults.yaml 2023-10-26 12:37:55 -04:00
Jason Ertel
a969c319f5 Merge pull request #11631 from Security-Onion-Solutions/kilo
oidc
2023-10-26 12:30:06 -04:00
Jason Ertel
4942f83d4f adjust version to match target branch 2023-10-26 11:45:39 -04:00
Josh Brower
6f4566c23e Merge pull request #11609 from Security-Onion-Solutions/2.4/kibanauser
Add kibana curl config
2023-10-26 10:42:32 -04:00
Wes
891ea997e7 Add lifecycle policies and warm settings 2023-10-26 12:25:37 +00:00
Mike Reeves
01810a782c Annotation changes for warm node 2023-10-25 16:46:30 -04:00
Mike Reeves
6d6292714f Annotation changes for warm node 2023-10-25 16:21:47 -04:00
Mike Reeves
88fb7d06e6 Annotation changes for warm node 2023-10-25 16:20:28 -04:00
Josh Patterson
39abe19cfd Update config.map.jinja 2023-10-25 16:17:06 -04:00
Josh Patterson
807b40019f Update soc_elasticsearch.yaml 2023-10-25 16:16:48 -04:00
Josh Patterson
5f168a33ed Update defaults.yaml 2023-10-25 16:16:01 -04:00
Mike Reeves
d1170cb69f Update soc_elasticsearch.yaml 2023-10-25 16:05:20 -04:00
m0duspwnens
19fdc9319b fix role update 2023-10-25 15:58:26 -04:00
Mike Reeves
dc53b49f15 Update soup 2023-10-25 15:53:39 -04:00
Josh Patterson
af4b34801f Update defaults.yaml 2023-10-25 15:48:27 -04:00
Josh Patterson
1ae8896a05 Update config.map.jinja 2023-10-25 15:47:40 -04:00
Mike Reeves
6fb0c5dbfe Annotation changes for warm node 2023-10-25 15:37:36 -04:00
Mike Reeves
58bf6d3eff Merge branch '2.4/dev' of github.com:Security-Onion-Solutions/securityonion into warmui 2023-10-25 15:37:14 -04:00
Mike Reeves
a887551dad Annotation changes for warm node 2023-10-25 15:22:47 -04:00
Jason Ertel
b20177b0ef Merge branch '2.4/dev' into kilo 2023-10-25 15:19:57 -04:00
defensivedepth
1e710a22ce Merge remote-tracking branch 'remotes/origin/2.4/dev' into 2.4/kibanauser 2023-10-25 11:33:38 -04:00
Josh Patterson
d562445686 Merge pull request #11619 from Security-Onion-Solutions/revert-11612-upgrade/salt3006.3
Revert "Upgrade/salt3006.3"
2023-10-25 11:28:14 -04:00
Josh Patterson
c41e19ad0b Revert "Upgrade/salt3006.3" 2023-10-25 11:01:13 -04:00
m0duspwnens
a3e6b1ee1d change generate_ssl wait_for_salt_minion 2023-10-25 09:26:36 -04:00
Jason Ertel
a28cc274ba Merge branch '2.4/dev' into kilo 2023-10-25 09:04:36 -04:00
Jason Ertel
a66006c8a6 minor updates 2023-10-25 09:04:23 -04:00
defensivedepth
3ad480453a Rename to remove dupe 2023-10-25 07:20:07 -04:00
Josh Patterson
205748e992 Merge pull request #11613 from Security-Onion-Solutions/issue/11610
fix issue/11610
2023-10-24 18:16:44 -04:00
m0duspwnens
dfe707ab64 fix issue/11610 2023-10-24 17:26:39 -04:00
Josh Patterson
308e5ea505 Merge pull request #11612 from Security-Onion-Solutions/upgrade/salt3006.3
Upgrade/salt3006.3
2023-10-24 16:45:12 -04:00
m0duspwnens
3e343bff84 fix line to log properly 2023-10-24 16:40:51 -04:00
m0duspwnens
1d6e32fbab dont exit if salt isnt running 2023-10-24 15:08:50 -04:00
defensivedepth
310a6b4f27 Add kibana curl config 2023-10-24 14:21:01 -04:00
m0duspwnens
180ba3a958 if deb fam, stop salt-master and salt-minion after salt upgrade 2023-10-24 13:24:52 -04:00
m0duspwnens
6d3465626e if deb fam, stop salt-master and salt-minion after salt upgrade 2023-10-24 12:52:25 -04:00
m0duspwnens
fab91edd2d Merge remote-tracking branch 'origin/2.4/dev' into upgrade/salt3006.3 2023-10-24 09:41:23 -04:00
m0duspwnens
752390be2e merge with dev, fix confict 2023-10-24 09:40:09 -04:00
Mike Reeves
02639d3bc5 Merge pull request #11606 from Security-Onion-Solutions/TOoSmOotH-patch-2
Enable http2 for Suricata
2023-10-24 09:23:07 -04:00
Mike Reeves
4a3fc06a4d Enable http2 for Suricata 2023-10-24 09:18:10 -04:00
weslambert
0c2b3f3c62 Merge pull request #11600 from Security-Onion-Solutions/fix/suricata_pkt_src
Parse pkt_src for Suricata logs
2023-10-23 15:51:30 -04:00
weslambert
660020cc76 Parse pkt_src for Suricata logs 2023-10-23 15:45:41 -04:00
Jorge Reyes
b59a95b72f Merge pull request #11594 from Security-Onion-Solutions/fix/playbookrule
FIX: Add -watch to soctopus saltstate for file SOCtopus.conf. Makes contai…
2023-10-23 11:51:53 -04:00
reyesj2
030a667d26 Add -watch to soctopus saltstate for file SOCtopus.conf. Makes container restart @ highstate if file is updated.
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2023-10-23 11:47:14 -04:00
Josh Patterson
a40760e601 Merge pull request #11592 from Security-Onion-Solutions/minechanges
Minechanges
2023-10-23 10:37:05 -04:00
m0duspwnens
dc3ca99c12 ask the minion if it can see itself in the mine 2023-10-20 17:16:33 -04:00
m0duspwnens
7e3aa11a73 check mine is populated with ip before telling node to highstate 2023-10-20 16:27:20 -04:00
m0duspwnens
c409339446 change post setup highstate cron to 5 minutes since accepting minion runs a highstate 2023-10-20 13:46:24 -04:00
m0duspwnens
c588bf4395 update mine and highstate minion when added 2023-10-20 13:43:12 -04:00
m0duspwnens
6d77b1e4c3 continue loop if minion not in mine 2023-10-20 13:41:53 -04:00
m0duspwnens
99662c999f log operation and minion target 2023-10-20 13:41:24 -04:00
m0duspwnens
ef2b89f5bf fix attempts logic 2023-10-20 13:40:40 -04:00
Josh Patterson
2878f82754 Merge pull request #11582 from Security-Onion-Solutions/minechanges
handle a minion not being in the mine data return
2023-10-20 10:07:44 -04:00
m0duspwnens
2e16250c93 handle a minion not being in the mine data return 2023-10-20 10:00:39 -04:00
m0duspwnens
f03bbdbc09 Merge remote-tracking branch 'origin/2.4/dev' into upgrade/salt3006.3 2023-10-19 17:01:12 -04:00
m0duspwnens
dbfccdfff8 fix logging when using wait_for_minion 2023-10-19 16:53:03 -04:00
m0duspwnens
dfcbbfd157 update call to wait_for_salt_minion with new options in so-functions 2023-10-19 15:58:50 -04:00
m0duspwnens
37e803917e have soup wait_for_salt_minion() before running any highstate 2023-10-19 15:58:10 -04:00
m0duspwnens
66ee074795 add wait_for_salt_minion to so-common 2023-10-19 15:57:24 -04:00
m0duspwnens
90bde94371 handle debian family salt upgrade for soup 2023-10-19 13:46:48 -04:00
m0duspwnens
84f8e1cc92 debian family upgrade salt without -r flag 2023-10-19 13:46:07 -04:00
m0duspwnens
e3830fa286 all more os to set_os in so-common 2023-10-19 13:43:03 -04:00
m0duspwnens
13a5c8baa7 remove extra || 2023-10-19 11:19:51 -04:00
m0duspwnens
c5610edd83 handle salt for r9 and c9 2023-10-19 11:12:20 -04:00
weslambert
5119e6c45a Merge pull request #11570 from Security-Onion-Solutions/feature/additional_integrations
Additional integrations
2023-10-19 09:30:40 -04:00
m0duspwnens
02e22c87e8 Merge remote-tracking branch 'origin/2.4/dev' into upgrade/salt3006.3 2023-10-19 09:15:31 -04:00
Mike Reeves
0772926992 Merge pull request #11573 from Security-Onion-Solutions/minechanges 2023-10-18 19:45:23 -04:00
m0duspwnens
b2bb92d413 remove extra space 2023-10-18 19:38:19 -04:00
Mike Reeves
19bebe44aa Merge pull request #11572 from Security-Onion-Solutions/minechanges 2023-10-18 19:37:34 -04:00
m0duspwnens
f30a652e19 add back redirects 2023-10-18 19:31:45 -04:00
m0duspwnens
ff18b1f074 remove redirect 2023-10-18 18:45:14 -04:00
m0duspwnens
9eb682bc40 generate_ca after salt-master and salt-minion states run 2023-10-18 18:37:35 -04:00
Wes
c135f886a9 Remove Carbon Black Cloud integration 2023-10-18 20:41:34 +00:00
Wes
28b7a24cc1 Add templates for integrations 2023-10-18 20:36:04 +00:00
m0duspwnens
a52ee063e5 use generate_ca and generate_ssl functions and move them up 2023-10-18 16:35:33 -04:00
Wes
767a54c91b Add pkgs 2023-10-18 20:07:26 +00:00
m0duspwnens
ac28e1b967 verify crt and key differently in checkmine 2023-10-18 15:53:12 -04:00
Jorge Reyes
5e10a0d9e2 Merge pull request #11568 from Security-Onion-Solutions/2.4/zeek6
Add back plugin-tds/ plugin-profinet. Using patched versions for Zeek 6
2023-10-18 15:39:30 -04:00
reyesj2
dd28dc6ddd Add back plugin-tds/ plugin-profinet. Using patched versions for Zeek 6
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2023-10-18 15:30:32 -04:00
m0duspwnens
e58c1e189c use x509 instead of file for onchanges 2023-10-18 15:10:17 -04:00
m0duspwnens
1c1b23c328 fix mine update for ca 2023-10-18 15:07:18 -04:00
m0duspwnens
2206cdb0fa change soup comment 2023-10-18 15:04:39 -04:00
m0duspwnens
1999db0bb3 apply ca state early in setup 2023-10-18 15:02:22 -04:00
m0duspwnens
c3cde61202 docker service watches and requires the intca 2023-10-18 15:01:26 -04:00
m0duspwnens
8e68f96316 check that the manager has a ca in the mine and that it is valid 2023-10-18 13:59:15 -04:00
m0duspwnens
138aa9c554 update the mine with the ca when it is created or changed 2023-10-18 13:54:14 -04:00
weslambert
f0e380870d Merge pull request #11567 from Security-Onion-Solutions/fix/mhr_docs
Add note regarding DNS resolver
2023-10-18 13:46:25 -04:00
weslambert
34717fb65e Add note regarding DNS resolver 2023-10-18 13:44:09 -04:00
Josh Patterson
d81dfb99d0 Merge pull request #11563 from Security-Onion-Solutions/minechanges
Minechanges
2023-10-17 17:36:46 -04:00
m0duspwnens
fb9a0ab8b6 endif not fi in jinja 2023-10-17 17:33:53 -04:00
m0duspwnens
928fb23e96 only add node to pillar if returned ip from mine 2023-10-17 17:28:28 -04:00
m0duspwnens
d9862aefcf handle mine.p not being present. only check if mine_ip exists, dont compare to alived ip 2023-10-17 17:09:52 -04:00
m0duspwnens
496b97d706 handle the mine file not being present before checking the size 2023-10-17 15:42:42 -04:00
weslambert
830b5b9a21 Merge pull request #11560 from Security-Onion-Solutions/foxtrot
Elastic 8.10.4
2023-10-17 13:47:21 -04:00
weslambert
06e731c762 Update VERSION 2023-10-17 13:33:12 -04:00
weslambert
be2a829524 Elastic 8.10.4 2023-10-17 10:49:03 -04:00
weslambert
8cab242ad0 Elastic 8.10.4 2023-10-17 10:48:31 -04:00
weslambert
99054a2687 Elastic 8.10.4 2023-10-17 10:47:26 -04:00
weslambert
adcb7840bd Elastic 8.10.3 2023-10-17 10:38:20 -04:00
weslambert
8db6fef92d Elastic 8.10.3 2023-10-17 10:35:36 -04:00
weslambert
24329e3731 Update config_saved_objects.ndjson 2023-10-17 10:34:38 -04:00
weslambert
1db88bdbb5 Update so-common 2023-10-17 10:33:39 -04:00
weslambert
7c2cdb78e9 Update VERSION 2023-10-17 10:31:53 -04:00
Josh Patterson
e858a1211e Merge pull request #11558 from Security-Onion-Solutions/excludelogfp
mark suricata 7 log line as fp fo so-log-check
2023-10-17 10:02:21 -04:00
m0duspwnens
01cb0fccb6 mark suricata 7 log line as fp fo so-log-check 2023-10-17 10:01:11 -04:00
Josh Patterson
86394dab01 Merge pull request #11555 from Security-Onion-Solutions/minechanges
Minechanges
2023-10-16 17:32:16 -04:00
m0duspwnens
53fcafea50 redo how we check if salt-master is ready and accessible 2023-10-16 16:31:43 -04:00
Jorge Reyes
574a81da7f Merge pull request #11554 from Security-Onion-Solutions/2.4/zeek6
Zeek 6 upgrade
2023-10-16 15:52:48 -04:00
reyesj2
ed693a7ae6 Remove commented lines in defaults.yaml to avoid UI issues.
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2023-10-16 15:48:51 -04:00
reyesj2
e5c936e8cf Replace external zeek-community-id with builtin community-id. Disable plugin-tds + plugin-profinet. Not updated for Zeek 6.x
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2023-10-16 15:18:26 -04:00
m0duspwnens
9f3a9dfab0 reorder salt.master state 2023-10-16 15:00:53 -04:00
m0duspwnens
c0030bc513 dont need to restart minion service when just adding sleep delay on service start 2023-10-16 15:00:07 -04:00
m0duspwnens
a637b0e61b apply salt.master and minion state early in setup to prevent the services from restarting later in setup 2023-10-16 14:58:58 -04:00
Jason Ertel
2f0e673ec3 Merge pull request #11552 from Security-Onion-Solutions/jertel/auto
only add heavynodes to remoteHostUrls
2023-10-16 13:10:10 -04:00
Jason Ertel
84c39b5de7 only add heavynodes to remoteHostUrls 2023-10-16 13:01:13 -04:00
m0duspwnens
07902d17cc display container dl status during soup 2023-10-16 11:20:19 -04:00
m0duspwnens
1a7761c531 display container dl status during soup 2023-10-16 11:00:31 -04:00
m0duspwnens
2773da5a12 run the checkmine engine under master instead of minion 2023-10-16 10:34:45 -04:00
m0duspwnens
e23b3a62f3 default interval of 60s 2023-10-13 16:24:11 -04:00
m0duspwnens
57684efddf checkmine looks for 1 byte file and verify mine ip is correct 2023-10-13 16:23:16 -04:00
m0duspwnens
1641aa111b add checkmine back 2023-10-13 13:46:31 -04:00
Jason Ertel
ca2530e07f Merge pull request #11535 from Security-Onion-Solutions/jertel/auto
avoid rebooting when testing deb installs
2023-10-12 16:30:24 -04:00
Mike Reeves
104b53c6ec Merge pull request #11534 from Security-Onion-Solutions/TOoSmOotH-patch-1
Update HOTFIX
2023-10-12 16:20:37 -04:00
Mike Reeves
6c5f8e4e2d Update HOTFIX 2023-10-12 16:19:59 -04:00
Mike Reeves
b8d586addd Merge pull request #11533 from Security-Onion-Solutions/2.4/main
2.4/main
2023-10-12 16:19:29 -04:00
Mike Reeves
1b5cd4f53a Merge pull request #11532 from Security-Onion-Solutions/hotfix/2.4.20
Hotfix 2.4.20
2023-10-12 16:16:49 -04:00
m0duspwnens
d2002a5158 add additional comments 2023-10-12 15:58:33 -04:00
m0duspwnens
5250292e95 only allow stable install type. require -r to be used 2023-10-12 15:54:22 -04:00
Mike Reeves
acc6715f90 Merge pull request #11531 from Security-Onion-Solutions/2.4.20hf
2.4.20 hotfix
2023-10-12 15:52:44 -04:00
Mike Reeves
b6af59d9b0 2.4.20 hotfix 2023-10-12 15:47:53 -04:00
Jason Ertel
49a651fd72 adjust var name 2023-10-12 15:43:22 -04:00
m0duspwnens
2d688331df handle version install for stable and onedir install type 2023-10-12 15:32:04 -04:00
m0duspwnens
b12c4a96e9 remove files 2023-10-12 15:11:25 -04:00
m0duspwnens
6dd06c0fe9 change install_centos_onedir to install version provided from command line 2023-10-12 15:07:47 -04:00
Jason Ertel
17ae9b3349 avoid reboot during testing 2023-10-12 13:54:07 -04:00
m0duspwnens
8dc163f074 use script from develop branch 2023-10-12 13:09:07 -04:00
Josh Brower
8ce70e1f18 Merge pull request #11525 from Security-Onion-Solutions/hotfixfunctions
Apply named state
2023-10-12 11:05:32 -04:00
defensivedepth
98eab906af Apply named state 2023-10-12 11:00:24 -04:00
Josh Brower
d558f20715 Merge pull request #11524 from Security-Onion-Solutions/hotfixfunctions
Apply state correctly
2023-10-12 10:56:43 -04:00
defensivedepth
967138cdff Apply state correctly 2023-10-12 10:54:26 -04:00
Josh Brower
c76ac717f2 Merge pull request #11522 from Security-Onion-Solutions/hotfixfunctions
Add hotfix changes
2023-10-12 09:52:55 -04:00
defensivedepth
a671ac387a Add hotfix changes 2023-10-12 09:45:20 -04:00
m0duspwnens
ab4c5acd0c update bootstrap-salt.sh with stable branch 2023-10-12 09:28:07 -04:00
defensivedepth
1043315e6b Manage Elastic Defend Integration manually 2023-10-12 09:22:26 -04:00
m0duspwnens
d357864d69 fix upgrade_salt function for oel 2023-10-11 15:32:11 -04:00
Jason Ertel
44b855dd93 merge 2.4/dev 2023-10-11 13:35:16 -04:00
m0duspwnens
2094b4f688 upgrade to salt 3006.3 2023-10-11 09:04:36 -04:00
Josh Patterson
5252482fe3 Merge pull request #11503 from Security-Onion-Solutions/minechanges
Minechanges
2023-10-10 16:33:17 -04:00
m0duspwnens
abeebc7bc4 Merge remote-tracking branch 'origin/2.4/dev' into minechanges 2023-10-10 13:13:55 -04:00
m0duspwnens
4193130ed0 reduce salt mine interval to 25 minutes 2023-10-10 13:07:12 -04:00
m0duspwnens
89467adf9c batch the salt mine update 2023-10-10 13:05:43 -04:00
m0duspwnens
a283e7ea0b remove checkmine salt engine 2023-10-10 13:00:54 -04:00
Mike Reeves
a54479d603 Merge pull request #11497 from Security-Onion-Solutions/TOoSmOotH-patch-9
Update VERSION
2023-10-10 11:07:51 -04:00
Mike Reeves
49ebbf3232 Update VERSION 2023-10-10 11:05:39 -04:00
m0duspwnens
05da5c039c Merge remote-tracking branch 'origin/2.4/dev' into minechanges 2023-10-10 11:02:19 -04:00
Josh Patterson
f3d0248ec5 Merge pull request #11496 from Security-Onion-Solutions/fix/ping
accept icmp on input chain
2023-10-10 10:59:05 -04:00
m0duspwnens
4dc24b22c7 accept icmp on input chain 2023-10-10 10:51:59 -04:00
m0duspwnens
39ea1d317d add comment 2023-09-29 17:12:14 -04:00
m0duspwnens
827ed7b273 run salt.mine_function state locally and provide pillar info to it 2023-09-29 17:08:42 -04:00
m0duspwnens
8690304dff change how mine_functions.conf is managed during setup 2023-09-29 16:17:19 -04:00
m0duspwnens
1e327c143c Merge remote-tracking branch 'origin/2.4/dev' into minechanges 2023-09-29 15:11:06 -04:00
m0duspwnens
ad01be66ea remove checkmine engine. add x509.get_pem_entries to managers mine_functions. simplify mine update during soup 2023-09-29 14:09:04 -04:00
Jason Ertel
5c7c3fb996 avoid rare false positive when dasbhoard load completes during setup 2023-07-31 16:09:36 -04:00
Jason Ertel
f4907a5b5c Merge branch '2.4/dev' into kilo 2023-07-28 14:15:14 -04:00
Jason Ertel
a5c4783564 oidc 2023-07-27 18:36:50 -04:00
Jason Ertel
d3e83d154b Merge branch '2.4/t dev' into kilo 2023-07-27 10:20:22 -04:00
Jason Ertel
aa36e9a785 oidc 2023-07-27 08:40:27 -04:00
Jason Ertel
b712d505f2 update version to use kilo images 2023-07-26 09:21:23 -04:00
Jason Ertel
6d56deb2e4 oidc 1 2023-07-25 08:12:45 -04:00
Jason Ertel
101e2e8ba1 do not redirect to API URLs when not logged in 2023-07-24 17:05:52 -04:00
Jason Ertel
83bff72cd4 Merge branch '2.4/dev' into kilo 2023-07-18 10:49:12 -04:00
Jason Ertel
b24afac0f4 upgrade registry version 2023-07-18 10:48:42 -04:00
Jason Ertel
b129b4ceaa prepare for alt login 2023-07-14 17:03:20 -04:00
101 changed files with 12751 additions and 5638 deletions

View File

@@ -4,9 +4,11 @@ on:
push:
paths:
- "salt/sensoroni/files/analyzers/**"
- "salt/manager/tools/sbin"
pull_request:
paths:
- "salt/sensoroni/files/analyzers/**"
- "salt/manager/tools/sbin"
jobs:
build:
@@ -16,7 +18,7 @@ jobs:
fail-fast: false
matrix:
python-version: ["3.10"]
python-code-path: ["salt/sensoroni/files/analyzers"]
python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"]
steps:
- uses: actions/checkout@v3
@@ -34,4 +36,4 @@ jobs:
flake8 ${{ matrix.python-code-path }} --show-source --max-complexity=12 --doctests --max-line-length=200 --statistics
- name: Test with pytest
run: |
pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=${{ matrix.python-code-path }}/pytest.ini
pytest ${{ matrix.python-code-path }} --cov=${{ matrix.python-code-path }} --doctest-modules --cov-report=term --cov-fail-under=100 --cov-config=pytest.ini

View File

@@ -1,18 +1,18 @@
### 2.4.20-20231006 ISO image released on 2023/10/06
### 2.4.30-20231121 ISO image released on 2023/11/21
### Download and Verify
2.4.20-20231006 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231006.iso
2.4.30-20231121 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231121.iso
MD5: 269F00308C53976BF0EAE788D1DB29DB
SHA1: 3F7C2324AE1271112F3B752BA4724AF36688FC27
SHA256: 542B8B3F4F75AD24DC78007F8FE0857E00DC4CC9F4870154DCB8D5D0C4144B65
MD5: 09DB0A6B3A75435C855E777272FC03F8
SHA1: A68868E67A3F86B77E01F54067950757EFD3BA72
SHA256: B3880C0302D9CDED7C974585B14355544FC9C3279F952EC79FC2BA9AEC7CB749
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231006.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231121.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231006.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.30-20231121.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231006.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.30-20231121.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.4.20-20231006.iso.sig securityonion-2.4.20-20231006.iso
gpg --verify securityonion-2.4.30-20231121.iso.sig securityonion-2.4.30-20231121.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Tue 03 Oct 2023 11:40:51 AM EDT using RSA key ID FE507013
gpg: Signature made Tue 21 Nov 2023 01:21:38 PM EST using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.

2
HOTFIX
View File

@@ -1 +1 @@
20231121

View File

@@ -1 +1 @@
2.4.20
2.4.30

View File

@@ -12,7 +12,6 @@ role:
eval:
fleet:
heavynode:
helixsensor:
idh:
import:
manager:

View File

@@ -7,19 +7,23 @@
tgt_type='compound') | dictsort()
%}
{% set hostname = cached_grains[minionid]['host'] %}
{% set node_type = minionid.split('_')[1] %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
# only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%}
{% set hostname = cached_grains[minionid]['host'] %}
{% set node_type = minionid.split('_')[1] %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: ip[0]}}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: ip[0]}) %}
{% else %}
{% do node_types[node_type][hostname].update(ip[0]) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}
logstash:
nodes:
{% for node_type, values in node_types.items() %}

View File

@@ -4,18 +4,22 @@
{% set hostname = minionid.split('_')[0] %}
{% set node_type = minionid.split('_')[1] %}
{% set is_alive = False %}
{% if minionid in manage_alived.keys() %}
{% if ip[0] == manage_alived[minionid] %}
{% set is_alive = True %}
# only add a node to the pillar if it returned an ip from the mine
{% if ip | length > 0%}
{% if minionid in manage_alived.keys() %}
{% if ip[0] == manage_alived[minionid] %}
{% set is_alive = True %}
{% endif %}
{% endif %}
{% endif %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %}
{% else %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %}
{% if node_type not in node_types.keys() %}
{% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %}
{% else %}
{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %}
{% if hostname not in node_types[node_type] %}
{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %}
{% else %}
{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %}
{% endif %}
{% endif %}
{% endif %}
{% endfor %}

View File

@@ -1,44 +0,0 @@
thresholding:
sids:
8675309:
- threshold:
gen_id: 1
type: threshold
track: by_src
count: 10
seconds: 10
- threshold:
gen_id: 1
type: limit
track: by_dst
count: 100
seconds: 30
- rate_filter:
gen_id: 1
track: by_rule
count: 50
seconds: 30
new_action: alert
timeout: 30
- suppress:
gen_id: 1
track: by_either
ip: 10.10.3.7
11223344:
- threshold:
gen_id: 1
type: limit
track: by_dst
count: 10
seconds: 10
- rate_filter:
gen_id: 1
track: by_src
count: 50
seconds: 20
new_action: pass
timeout: 60
- suppress:
gen_id: 1
track: by_src
ip: 10.10.3.0/24

View File

@@ -1,20 +0,0 @@
thresholding:
sids:
<signature id>:
- threshold:
gen_id: <generator id>
type: <threshold | limit | both>
track: <by_src | by_dst>
count: <count>
seconds: <seconds>
- rate_filter:
gen_id: <generator id>
track: <by_src | by_dst | by_rule | by_both>
count: <count>
seconds: <seconds>
new_action: <alert | pass>
timeout: <seconds>
- suppress:
gen_id: <generator id>
track: <by_src | by_dst | by_either>
ip: <ip | subnet>

26
pyci.sh Executable file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
if [[ $# -ne 1 ]]; then
echo "Usage: $0 <python_script_dir>"
echo "Runs tests on all *_test.py files in the given directory."
exit 1
fi
HOME_DIR=$(dirname "$0")
TARGET_DIR=${1:-.}
PATH=$PATH:/usr/local/bin
if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
echo "Missing dependencies. Consider running the following command:"
echo " python -m pip install flake8 pytest pytest-cov"
exit 1
fi
pip install pytest pytest-cov
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"

10
salt/bpf/macros.jinja Normal file
View File

@@ -0,0 +1,10 @@
{% macro remove_comments(bpfmerged, app) %}
{# remove comments from the bpf #}
{% for bpf in bpfmerged[app] %}
{% if bpf.strip().startswith('#') %}
{% do bpfmerged[app].pop(loop.index0) %}
{% endif %}
{% endfor %}
{% endmacro %}

View File

@@ -1,4 +1,7 @@
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
{% set PCAPBPF = BPFMERGED.pcap %}

View File

@@ -1,4 +1,7 @@
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'suricata') }}
{% set SURICATABPF = BPFMERGED.suricata %}

View File

@@ -1,4 +1,7 @@
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
{% import 'bpf/macros.jinja' as MACROS %}
{{ MACROS.remove_comments(BPFMERGED, 'zeek') }}
{% set ZEEKBPF = BPFMERGED.zeek %}

View File

@@ -37,7 +37,7 @@ x509_signing_policies:
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "critical keyEncipherment"
- keyUsage: "critical keyEncipherment digitalSignature"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- extendedKeyUsage: serverAuth

View File

@@ -50,6 +50,12 @@ pki_public_ca_crt:
attempts: 5
interval: 30
mine_update_ca_crt:
module.run:
- mine.update: []
- onchanges:
- x509: pki_public_ca_crt
cakeyperms:
file.managed:
- replace: False

View File

@@ -8,6 +8,7 @@ include:
- common.packages
{% if GLOBALS.role in GLOBALS.manager_roles %}
- manager.elasticsearch # needed for elastic_curl_config state
- manager.kibana
{% endif %}
net.core.wmem_default:

View File

@@ -8,7 +8,7 @@
# Elastic agent is not managed by salt. Because of this we must store this base information in a
# script that accompanies the soup system. Since so-common is one of those special soup files,
# and since this same logic is required during installation, it's included in this file.
ELASTIC_AGENT_TARBALL_VERSION="8.8.2"
ELASTIC_AGENT_TARBALL_VERSION="8.10.4"
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
@@ -133,22 +133,37 @@ check_elastic_license() {
}
check_salt_master_status() {
local timeout=$1
echo "Checking if we can talk to the salt master"
salt-call state.show_top concurrent=true
return
local count=0
local attempts="${1:- 10}"
current_time="$(date '+%b %d %H:%M:%S')"
echo "Checking if we can access the salt master and that it is ready at: ${current_time}"
while ! salt-call state.show_top -l error concurrent=true 1> /dev/null; do
current_time="$(date '+%b %d %H:%M:%S')"
echo "Can't access salt master or it is not ready at: ${current_time}"
((count+=1))
if [[ $count -eq $attempts ]]; then
# 10 attempts takes about 5.5 minutes
echo "Gave up trying to access salt-master"
return 1
fi
done
current_time="$(date '+%b %d %H:%M:%S')"
echo "Successfully accessed and salt master ready at: ${current_time}"
return 0
}
# this is only intended to be used to check the status of the minion from a salt master
check_salt_minion_status() {
local timeout=$1
echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1
salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1
local minion="$1"
local timeout="${2:-5}"
local logfile="${3:-'/dev/stdout'}"
echo "Checking if the salt minion: $minion will respond to jobs" >> "$logfile" 2>&1
salt "$minion" test.ping -t $timeout > /dev/null 2>&1
local status=$?
if [ $status -gt 0 ]; then
echo " Minion did not respond" >> "$setup_log" 2>&1
echo " Minion did not respond" >> "$logfile" 2>&1
else
echo " Received job response from salt minion" >> "$setup_log" 2>&1
echo " Received job response from salt minion" >> "$logfile" 2>&1
fi
return $status
@@ -382,6 +397,10 @@ retry() {
echo "<Start of output>"
echo "$output"
echo "<End of output>"
if [[ $exitcode -eq 0 ]]; then
echo "Forcing exit code to 1"
exitcode=1
fi
fi
elif [ -n "$failedOutput" ]; then
if [[ "$output" =~ "$failedOutput" ]]; then
@@ -390,7 +409,7 @@ retry() {
echo "$output"
echo "<End of output>"
if [[ $exitcode -eq 0 ]]; then
echo "The exitcode was 0, but we are setting to 1 since we found $failedOutput in the output."
echo "Forcing exit code to 1"
exitcode=1
fi
else
@@ -428,6 +447,24 @@ run_check_net_err() {
fi
}
wait_for_salt_minion() {
local minion="$1"
local timeout="${2:-5}"
local logfile="${3:-'/dev/stdout'}"
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail
local attempt=0
# each attempts would take about 15 seconds
local maxAttempts=20
until check_salt_minion_status "$minion" "$timeout" "$logfile"; do
attempt=$((attempt+1))
if [[ $attempt -eq $maxAttempts ]]; then
return 1
fi
sleep 10
done
return 0
}
salt_minion_count() {
local MINIONDIR="/opt/so/saltstack/local/pillar/minions"
MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep -v adv_ | wc -l)
@@ -440,19 +477,51 @@ set_os() {
OS=rocky
OSVER=9
is_rocky=true
is_rpm=true
elif grep -q "CentOS Stream release 9" /etc/redhat-release; then
OS=centos
OSVER=9
is_centos=true
elif grep -q "Oracle Linux Server release 9" /etc/system-release; then
OS=oel
is_rpm=true
elif grep -q "AlmaLinux release 9" /etc/redhat-release; then
OS=alma
OSVER=9
is_oracle=true
is_alma=true
is_rpm=true
elif grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release; then
if [ -f /etc/oracle-release ]; then
OS=oracle
OSVER=9
is_oracle=true
is_rpm=true
else
OS=rhel
OSVER=9
is_rhel=true
is_rpm=true
fi
fi
cron_service_name="crond"
else
OS=ubuntu
is_ubuntu=true
elif [ -f /etc/os-release ]; then
if grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
OSVER=focal
UBVER=20.04
OS=ubuntu
is_ubuntu=true
is_deb=true
elif grep -q "UBUNTU_CODENAME=jammy" /etc/os-release; then
OSVER=jammy
UBVER=22.04
OS=ubuntu
is_ubuntu=true
is_deb=true
elif grep -q "VERSION_CODENAME=bookworm" /etc/os-release; then
OSVER=bookworm
DEBVER=12
is_debian=true
OS=debian
is_deb=true
fi
cron_service_name="cron"
fi
}
@@ -486,6 +555,10 @@ set_version() {
fi
}
status () {
printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n"
}
systemctl_func() {
local action=$1
local echo_action=$1

View File

@@ -137,7 +137,7 @@ update_docker_containers() {
for i in "${TRUSTED_CONTAINERS[@]}"
do
if [ -z "$PROGRESS_CALLBACK" ]; then
echo "Downloading $i" >> "$LOG_FILE" 2>&1
echo "Downloading $i" >> "$LOG_FILE" 2>&1
else
$PROGRESS_CALLBACK $i
fi

View File

@@ -114,6 +114,11 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to poll" # server not yet ready (sensoroni waiting on soc)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|minions returned with non" # server not yet ready (salt waiting on minions)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so_long_term" # server not yet ready (influxdb not yet setup)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|search_phase_execution_exception" # server not yet ready (elastalert running searches before ES is ready)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving docker" # Telegraf unable to reach Docker engine, rare
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving container" # Telegraf unable to reach Docker engine, rare
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines
fi
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
@@ -136,6 +141,8 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content)
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses)
fi
if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
@@ -157,6 +164,9 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|soc.field." # known ingest type collisions issue with earlier versions of SO
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error parsing signature" # Malformed Suricata rule, from upstream provider
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sticky buffer has no matches" # Non-critical Suricata error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to determine destination index stats" # Elastic transform temporary error
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets"
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed"
@@ -230,4 +240,4 @@ else
echo -e "\nResult: One or more errors found"
fi
exit $RESULT
exit $RESULT

View File

@@ -1,67 +0,0 @@
#!/bin/bash
local_salt_dir=/opt/so/saltstack/local
zeek_logs_enabled() {
echo "zeeklogs:" > $local_salt_dir/pillar/zeeklogs.sls
echo " enabled:" >> $local_salt_dir/pillar/zeeklogs.sls
for BLOG in "${BLOGS[@]}"; do
echo " - $BLOG" | tr -d '"' >> $local_salt_dir/pillar/zeeklogs.sls
done
}
whiptail_manager_adv_service_zeeklogs() {
BLOGS=$(whiptail --title "so-zeek-logs" --checklist "Please Select Logs to Send:" 24 78 12 \
"conn" "Connection Logging" ON \
"dce_rpc" "RPC Logs" ON \
"dhcp" "DHCP Logs" ON \
"dnp3" "DNP3 Logs" ON \
"dns" "DNS Logs" ON \
"dpd" "DPD Logs" ON \
"files" "Files Logs" ON \
"ftp" "FTP Logs" ON \
"http" "HTTP Logs" ON \
"intel" "Intel Hits Logs" ON \
"irc" "IRC Chat Logs" ON \
"kerberos" "Kerberos Logs" ON \
"modbus" "MODBUS Logs" ON \
"notice" "Zeek Notice Logs" ON \
"ntlm" "NTLM Logs" ON \
"pe" "PE Logs" ON \
"radius" "Radius Logs" ON \
"rfb" "RFB Logs" ON \
"rdp" "RDP Logs" ON \
"sip" "SIP Logs" ON \
"smb_files" "SMB Files Logs" ON \
"smb_mapping" "SMB Mapping Logs" ON \
"smtp" "SMTP Logs" ON \
"snmp" "SNMP Logs" ON \
"ssh" "SSH Logs" ON \
"ssl" "SSL Logs" ON \
"syslog" "Syslog Logs" ON \
"tunnel" "Tunnel Logs" ON \
"weird" "Zeek Weird Logs" ON \
"mysql" "MySQL Logs" ON \
"socks" "SOCKS Logs" ON \
"x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 )
local exitstatus=$?
IFS=' ' read -ra BLOGS <<< "$BLOGS"
return $exitstatus
}
whiptail_manager_adv_service_zeeklogs
return_code=$?
case $return_code in
1)
whiptail --title "so-zeek-logs" --msgbox "Cancelling. No changes have been made." 8 75
;;
255)
whiptail --title "so-zeek-logs" --msgbox "Whiptail error occured, exiting." 8 75
;;
*)
zeek_logs_enabled
;;
esac

View File

@@ -346,7 +346,6 @@ desktop_packages:
- snappy
- sound-theme-freedesktop
- soundtouch
- securityonion-networkminer
- speech-dispatcher
- speech-dispatcher-espeak-ng
- speex
@@ -433,6 +432,10 @@ desktop_packages:
- xorg-x11-xinit-session
- zip
install_networkminer:
pkg.latest:
- name: securityonion-networkminer
{% else %}
desktop_packages_os_fail:

View File

@@ -6,6 +6,9 @@
{% from 'docker/docker.map.jinja' import DOCKER %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# include ssl since docker service requires the intca
include:
- ssl
dockergroup:
group.present:
@@ -86,6 +89,11 @@ docker_running:
- enable: True
- watch:
- file: docker_daemon
- x509: trusttheca
- require:
- file: docker_daemon
- x509: trusttheca
# Reserve OS ports for Docker proxy in case boot settings are not already applied/present
# 57314 = Strelka, 47760-47860 = Zeek

View File

@@ -6,6 +6,7 @@
{% from 'allowed_states.map.jinja' import allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if sls.split('.')[0] in allowed_states %}
{% set node_data = salt['pillar.get']('node_data') %}
# Add EA Group
elasticfleetgroup:
@@ -67,6 +68,7 @@ eapackageupgrade:
- source: salt://elasticfleet/tools/sbin_jinja/so-elastic-fleet-package-upgrade
- user: 947
- group: 939
- mode: 755
- template: jinja
{% if GLOBALS.role != "so-fleet" %}
@@ -92,13 +94,53 @@ eaintegration:
- user: 947
- group: 939
eaoptionalintegrationsdir:
file.directory:
- name: /opt/so/conf/elastic-fleet/integrations-optional
- user: 947
- group: 939
- makedirs: True
{% for minion in node_data %}
{% set role = node_data[minion]["role"] %}
{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %}
{% set optional_integrations = salt['pillar.get']('elasticfleet:optional_integrations', {}) %}
{% set integration_keys = salt['pillar.get']('elasticfleet:optional_integrations', {}).keys() %}
fleet_server_integrations_{{ minion }}:
file.directory:
- name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}
- user: 947
- group: 939
- makedirs: True
{% for integration in integration_keys %}
{% if 'enabled_nodes' in optional_integrations[integration]%}
{% set enabled_nodes = optional_integrations[integration]["enabled_nodes"] %}
{% if minion in enabled_nodes %}
optional_integrations_dynamic_{{ minion }}_{{ integration }}:
file.managed:
- name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}/{{ integration }}.json
- source: salt://elasticfleet/files/integrations-optional/{{ integration }}.json
- user: 947
- group: 939
- template: jinja
- defaults:
NAME: {{ minion }}
{% else %}
optional_integrations_dynamic_{{ minion }}_{{ integration }}_delete:
file.absent:
- name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}/{{ integration }}.json
{% endif %}
{% endif %}
{% endfor %}
{% endif %}
{% endfor %}
ea-integrations-load:
file.absent:
- name: /opt/so/state/eaintegrations.txt
- onchanges:
- file: eaintegration
- file: eadynamicintegration
- file: eapackageupgrade
- file: /opt/so/conf/elastic-fleet/integrations-optional/*
{% endif %}
{% else %}

View File

@@ -30,18 +30,26 @@ elasticfleet:
packages:
- apache
- auditd
- auth0
- aws
- azure
- barracuda
- carbonblack_edr
- checkpoint
- cisco_asa
- cisco_duo
- cisco_meraki
- cisco_umbrella
- cloudflare
- crowdstrike
- darktrace
- elastic_agent
- elasticsearch
- endpoint
- f5_bigip
- fleet_server
- fim
- fireeye
- fleet_server
- fortinet
- fortinet_fortigate
- gcp
@@ -57,24 +65,38 @@ elasticfleet:
- m365_defender
- microsoft_defender_endpoint
- microsoft_dhcp
- mimecast
- netflow
- o365
- okta
- osquery_manager
- panw
- pfsense
- pulse_connect_secure
- redis
- sentinel_one
- snyk
- sonicwall_firewall
- sophos
- sophos_central
- symantec_endpoint
- system
- tcp
- tenable_sc
- ti_abusech
- ti_misp
- ti_otx
- ti_recordedfuture
- udp
- vsphere
- windows
- zscaler_zia
- zscaler_zpa
- 1password
optional_integrations:
sublime_platform:
enabled_nodes: []
api_key:
base_url: https://api.platform.sublimesecurity.com
poll_interval: 5m
limit: 100

View File

@@ -96,6 +96,17 @@ so-elastic-fleet:
{% endif %}
{% if GLOBALS.role != "so-fleet" %}
so-elastic-fleet-package-statefile:
file.managed:
- name: /opt/so/state/elastic_fleet_packages.txt
- contents: {{ELASTICFLEETMERGED.packages}}
so-elastic-fleet-package-upgrade:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-package-upgrade
- onchanges:
- file: /opt/so/state/elastic_fleet_packages.txt
so-elastic-fleet-integrations:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-integration-policy-load

View File

@@ -0,0 +1,44 @@
{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED -%}
{%- from 'sensoroni/map.jinja' import SENSORONIMERGED -%}
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
{%- raw -%}
{
"package": {
"name": "httpjson",
"version": ""
},
"name": "sublime-platform",
"namespace": "default",
"description": "",
"policy_id": "FleetServer_{%- endraw -%}{{ NAME }}{%- raw -%}",
"vars": {},
"inputs": {
"generic-httpjson": {
"enabled": true,
"streams": {
"httpjson.generic": {
"enabled": true,
"vars": {
"request_method": "GET",
"processors": "- drop_event:\n when:\n not:\n contains: \n message: \"flagged_rules\"\n- decode_json_fields:\n fields: [\"message\"]\n document_id: id\n target: \"\"",
"enable_request_tracer": false,
"oauth_scopes": [],
"request_transforms": "- set:\n target: header.Authorization\n value: 'Bearer {% endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.api_key }}{%- raw -%}'\n- set:\n target: header.accept\n value: application/json\n- set:\n target: url.params.last_message_created_at[gte]\n value: '[[formatDate (now (parseDuration \"-{%- endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.poll_interval }}{%- raw -%}\")) \"2006-01-02T15:04:05Z\"]]'\n- set:\n target: url.params.reviewed\n value: false\n- set:\n target: url.params.flagged\n value: true\n- set:\n target: url.params.limit\n value: {% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.limit }}{%- raw -%}",
"response_transforms": "",
"request_redirect_headers_ban_list": [],
"request_encode_as": "application/x-www-form-urlencoded",
"request_url": "{%- endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.base_url }}{%- raw -%}/v0/message-groups",
"response_split": "target: body.message_groups\ntype: array\nkeep_parent: false\ntransforms:\n - set:\n target: body.sublime.request_url\n value : '[[ .last_response.url.value ]]'",
"tags": [
"forwarded"
],
"pipeline": "sublime",
"data_stream.dataset": "sublime",
"request_interval": "1m"
}
}
}
}
}
}
{%- endraw -%}

View File

@@ -5,7 +5,7 @@
"package": {
"name": "endpoint",
"title": "Elastic Defend",
"version": "8.8.0"
"version": "8.10.2"
},
"enabled": true,
"policy_id": "endpoints-initial",

View File

@@ -40,3 +40,36 @@ elasticfleet:
helpLink: elastic-fleet.html
sensitive: True
advanced: True
optional_integrations:
sublime_platform:
enabled_nodes:
description: Fleet nodes with the Sublime Platform integration enabled. Enter one per line.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: "[]string"
api_key:
description: API key for Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
sensitive: True
base_url:
description: Base URL for Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
poll_interval:
description: Poll interval for alerts from Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: string
limit:
description: The maximum number of message groups to return from Sublime Platform.
global: True
helpLink: elastic-fleet.html
advanced: True
forcedType: int

31
salt/elasticfleet/tools/sbin/so-elastic-fleet-common Executable file → Normal file
View File

@@ -42,6 +42,23 @@ elastic_fleet_integration_create() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_fleet_integration_remove() {
AGENT_POLICY=$1
NAME=$2
INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$AGENT_POLICY" | jq -r '.item.package_policies[] | select(.name=="'"$NAME"'") | .id')
JSON_STRING=$( jq -n \
--arg INTEGRATIONID "$INTEGRATION_ID" \
'{"packagePolicyIds":[$INTEGRATIONID]}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_fleet_integration_update() {
UPDATE_ID=$1
@@ -51,6 +68,19 @@ elastic_fleet_integration_update() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/package_policies/$UPDATE_ID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_fleet_integration_policy_upgrade() {
INTEGRATION_ID=$1
JSON_STRING=$( jq -n \
--arg INTEGRATIONID "$INTEGRATION_ID" \
'{"packagePolicyIds":[$INTEGRATIONID]}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
elastic_fleet_package_version_check() {
PACKAGE=$1
curl -s -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X GET "localhost:5601/api/fleet/epm/packages/$PACKAGE" | jq -r '.item.version'
@@ -98,3 +128,4 @@ elastic_fleet_policy_update() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}

View File

@@ -0,0 +1,23 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# Usage: Run with --force to update the Elastic Defend integration policy
. /usr/sbin/so-elastic-fleet-common
# Manage Elastic Defend Integration for Initial Endpoints Policy
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json
do
printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Upgrading integration policy\n"
elastic_fleet_integration_policy_upgrade "$INTEGRATION_ID"
else
printf "\n\nIntegration does not exist - Creating integration\n"
elastic_fleet_integration_create "@$INTEGRATION"
fi
done

View File

@@ -12,6 +12,9 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
# First, check for any package upgrades
/usr/sbin/so-elastic-fleet-package-upgrade
# Second, configure Elastic Defend Integration seperately
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
# Initial Endpoints
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json
do
@@ -61,7 +64,28 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
# Fleet Server - Optional integrations
for INTEGRATION in /opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json
do
if ! [ "$INTEGRATION" == "/opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json" ]; then
FLEET_POLICY=`echo "$INTEGRATION"| cut -d'/' -f7`
printf "\n\nFleet Server Policy - Loading $INTEGRATION\n"
elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION"
if [ -n "$INTEGRATION_ID" ]; then
printf "\n\nIntegration $NAME exists - Updating integration\n"
elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION"
else
printf "\n\nIntegration does not exist - Creating integration\n"
if [ "$NAME" != "elasticsearch-logs" ]; then
elastic_fleet_integration_create "@$INTEGRATION"
fi
fi
fi
done
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/eaintegrations.txt
fi
else
exit $RETURN_CODE
fi

View File

@@ -8,8 +8,19 @@
INTCA=/etc/pki/tls/certs/intca.crt
. /usr/sbin/so-common
. /usr/sbin/so-elastic-fleet-common
# Check to make sure that Kibana API is up & ready
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
printf "Kibana API not accessible, exiting Elastic Fleet setup..."
exit 1
fi
printf "\n### Create ES Token ###\n"
ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value)
@@ -120,3 +131,4 @@ salt-call state.apply elasticfleet queue=True
# Generate installers & install Elastic Agent on the node
so-elastic-agent-gen-installers
salt-call state.apply elasticfleet.install_agent_grid queue=True
exit 0

View File

@@ -20,20 +20,12 @@
{% for NODE in ES_LOGSTASH_NODES %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %}
{% endfor %}
{% if grains.id.split('_') | last == 'manager' %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master','data','remote_cluster_client','transform']}) %}
{% else %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master', 'data_hot', 'remote_cluster_client']}) %}
{% endif %}
{% endif %}
{% elif grains.id.split('_') | last == 'searchnode' %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['data_hot', 'ingest']}) %}
{% if HIGHLANDER %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.roles.extend(['ml', 'master', 'transform']) %}
{% endif %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': [GLOBALS.manager]}}) %}
{% elif grains.id.split('_') | last == 'heavynode' %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master', 'data', 'remote_cluster_client', 'ingest']}) %}
{% endif %}
{% if HIGHLANDER %}
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.xpack.ml.update({'enabled': true}) %}
@@ -53,3 +45,5 @@
{% endif %}
{% endfor %}
{% endif %}
{% do ELASTICSEARCHMERGED.config.node.update({'roles': ELASTICSEARCHMERGED.so_roles[GLOBALS.role].config.node.roles}) %}

File diff suppressed because it is too large Load Diff

View File

@@ -110,7 +110,7 @@ escomponenttemplates:
- group: 939
- clean: True
- onchanges_in:
- cmd: so-elasticsearch-templates
- file: so-elasticsearch-templates-reload
# Auto-generate templates from defaults file
{% for index, settings in ES_INDEX_SETTINGS.items() %}
@@ -123,7 +123,7 @@ es_index_template_{{index}}:
TEMPLATE_CONFIG: {{ settings.index_template }}
- template: jinja
- onchanges_in:
- cmd: so-elasticsearch-templates
- file: so-elasticsearch-templates-reload
{% endif %}
{% endfor %}
@@ -142,7 +142,7 @@ es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}:
- user: 930
- group: 939
- onchanges_in:
- cmd: so-elasticsearch-templates
- file: so-elasticsearch-templates-reload
{% endfor %}
{% endif %}
@@ -167,6 +167,10 @@ so-elasticsearch-ilm-policy-load:
- onchanges:
- file: so-elasticsearch-ilm-policy-load-script
so-elasticsearch-templates-reload:
file.absent:
- name: /opt/so/state/estemplates.txt
so-elasticsearch-templates:
cmd.run:
- name: /usr/sbin/so-elasticsearch-templates-load

View File

@@ -0,0 +1,34 @@
{
"description" : " Email alerts from Sublime",
"processors" : [
{ "set": { "field": "event.module", "value": "sublime" } },
{ "set": { "field": "event.dataset", "value": "alert" } },
{ "set": { "field": "event.severity", "value": 3, "override": true } },
{ "set": { "field": "rule.name", "value": "Sublime Platform: {{ flagged_rules.0.name }}", "override": true } },
{ "set": { "field": "sublime.message_group_id", "value": "{{ _id }}", "override": true } },
{ "set": { "field": "email.address", "value": "{{ messages.0.recipients.0.email }}", "override": true } },
{ "set": { "field": "email.forwarded_recipents", "value": "{{ messages.0.forwarded_receipients }}", "override": true } },
{ "set": { "field": "email.sender.address", "value": "{{ messages.0.sender.email }}", "override": true } },
{ "set": { "field": "email.subject", "value": "{{ messages.0.subject }}", "override": true } },
{ "set": { "field": "email.forwarded_at", "value": "{{ messages.0.forwarded_at }}", "override": true } },
{ "set": { "field": "email.created_at", "value": "{{ messages.0.created_at }}", "override": true } },
{ "set": { "field": "email.read_at", "value": "{{ messages.0.read_at }}", "override": true } },
{ "set": { "field": "email.replied_at", "value": "{{ messages.0.replied_at }}", "override": true } },
{
"grok": {
"field": "sublime.request_url",
"patterns": ["^https://api.%{DATA:sublime_host}/v0%{GREEDYDATA}$"],
"ignore_failure": true
}
},
{ "rename": { "field": "sublime_host", "target_field": "sublime.url", "ignore_missing": true } },
{ "rename": { "field": "data", "target_field": "sublime", "ignore_missing": true } },
{ "rename": { "field": "flagged_rules", "target_field": "sublime.flagged_rules", "ignore_missing": true } },
{ "rename": { "field": "organization_id", "target_field": "sublime.organization_id", "ignore_missing": true } },
{ "rename": { "field": "review_status", "target_field": "sublime.review_status", "ignore_missing": true } },
{ "rename": { "field": "state", "target_field": "sublime.state", "ignore_missing": true } },
{ "rename": { "field": "user_reports", "target_field": "sublime.user_reports", "ignore_missing": true } },
{ "pipeline": { "name": "common" } }
]
}

View File

@@ -2,6 +2,7 @@
"description" : "suricata.common",
"processors" : [
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
{ "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } },
{ "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } },
{ "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } },
{ "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } },

View File

@@ -33,7 +33,6 @@ elasticsearch:
flood_stage:
description: The max percentage of used disk space that will cause the node to take protective actions, such as blocking incoming events.
helpLink: elasticsearch.html
script:
max_compilations_rate:
description: Max rate of script compilations permitted in the Elasticsearch cluster. Larger values will consume more resources.
@@ -57,32 +56,6 @@ elasticsearch:
forcedType: int
global: True
helpLink: elasticsearch.html
so-logs: &indexSettings
index_sorting:
description: Sorts the index by event time, at the cost of additional processing resource consumption.
global: True
helpLink: elasticsearch.html
index_template:
index_patterns:
description: Patterns for matching multiple indices or tables.
forceType: "[]string"
multiline: True
global: True
helpLink: elasticsearch.html
template:
settings:
index:
number_of_replicas:
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
forcedType: int
global: True
helpLink: elasticsearch.html
mapping:
total_fields:
limit:
description: Max number of fields that can exist on a single index. Larger values will consume more resources.
global: True
helpLink: elasticsearch.html
refresh_interval:
description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation.
global: True
@@ -100,48 +73,10 @@ elasticsearch:
description: The order to sort by. Must set index_sorting to True.
global: True
helpLink: elasticsearch.html
mappings:
_meta:
package:
name:
description: Meta settings for the mapping.
global: True
helpLink: elasticsearch.html
managed_by:
description: Meta settings for the mapping.
global: True
helpLink: elasticsearch.html
managed:
description: Meta settings for the mapping.
forcedType: bool
global: True
helpLink: elasticsearch.html
composed_of:
description: The index template is composed of these component templates.
forcedType: "[]string"
global: True
helpLink: elasticsearch.html
priority:
description: The priority of the index template.
forcedType: int
global: True
helpLink: elasticsearch.html
data_stream:
hidden:
description: Hide the data stream.
forcedType: bool
global: True
helpLink: elasticsearch.html
allow_custom_routing:
description: Allow custom routing for the data stream.
forcedType: bool
global: True
helpLink: elasticsearch.html
policy:
phases:
hot:
min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier.
max_age:
description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier.
global: True
helpLink: elasticsearch.html
actions:
@@ -160,10 +95,187 @@ elasticsearch:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
global: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
global: True
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
regex: ^\[0-9\]{1,5}d$
forcedType: string
global: True
actions:
set_priority:
priority:
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
helpLink: elasticsearch.html
delete:
min_age:
description: Minimum age of index. ex. 90d - This determines when the index should be deleted.
global: True
helpLink: elasticsearch.html
so-logs: &indexSettings
index_sorting:
description: Sorts the index by event time, at the cost of additional processing resource consumption.
global: True
advanced: True
helpLink: elasticsearch.html
index_template:
index_patterns:
description: Patterns for matching multiple indices or tables.
forceType: "[]string"
multiline: True
global: True
advanced: True
helpLink: elasticsearch.html
template:
settings:
index:
number_of_replicas:
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
mapping:
total_fields:
limit:
description: Max number of fields that can exist on a single index. Larger values will consume more resources.
global: True
advanced: True
helpLink: elasticsearch.html
refresh_interval:
description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation.
global: True
advanced: True
helpLink: elasticsearch.html
number_of_shards:
description: Number of shards required for this index. Using multiple shards increases fault tolerance, but also increases storage and network costs.
global: True
advanced: True
helpLink: elasticsearch.html
sort:
field:
description: The field to sort by. Must set index_sorting to True.
global: True
advanced: True
helpLink: elasticsearch.html
order:
description: The order to sort by. Must set index_sorting to True.
global: True
advanced: True
helpLink: elasticsearch.html
mappings:
_meta:
package:
name:
description: Meta settings for the mapping.
global: True
advanced: True
helpLink: elasticsearch.html
managed_by:
description: Meta settings for the mapping.
global: True
advanced: True
helpLink: elasticsearch.html
managed:
description: Meta settings for the mapping.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
composed_of:
description: The index template is composed of these component templates.
forcedType: "[]string"
global: True
advanced: True
helpLink: elasticsearch.html
priority:
description: The priority of the index template.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
data_stream:
hidden:
description: Hide the data stream.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
allow_custom_routing:
description: Allow custom routing for the data stream.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
policy:
phases:
hot:
min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier.
global: True
advanced: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
rollover:
max_age:
description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
max_primary_shard_size:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
warm:
min_age:
description: Minimum age of index. This determines when the index should be moved to the hot tier.
global: True
advanced: True
helpLink: elasticsearch.html
actions:
set_priority:
priority:
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
rollover:
max_age:
description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
max_primary_shard_size:
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
global: True
advanced: True
helpLink: elasticsearch.html
cold:
min_age:
description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed.
global: True
advanced: True
helpLink: elasticsearch.html
actions:
set_priority:
@@ -171,26 +283,31 @@ elasticsearch:
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
forcedType: int
global: True
advanced: True
helpLink: elasticsearch.html
delete:
min_age:
description: Minimum age of index. This determines when the index should be deleted.
global: True
advanced: True
helpLink: elasticsearch.html
_meta:
package:
name:
description: Meta settings for the mapping.
global: True
advanced: True
helpLink: elasticsearch.html
managed_by:
description: Meta settings for the mapping.
global: True
advanced: True
helpLink: elasticsearch.html
managed:
description: Meta settings for the mapping.
forcedType: bool
global: True
advanced: True
helpLink: elasticsearch.html
so-logs-system_x_auth: *indexSettings
so-logs-system_x_syslog: *indexSettings
@@ -345,3 +462,19 @@ elasticsearch:
so-strelka: *indexSettings
so-syslog: *indexSettings
so-zeek: *indexSettings
so_roles:
so-manager: &soroleSettings
config:
node:
roles:
description: List of Elasticsearch roles that the node should have. Blank assumes all roles
forcedType: "[]string"
global: False
advanced: True
helpLink: elasticsearch.html
so-managersearch: *soroleSettings
so-standalone: *soroleSettings
so-searchnode: *soroleSettings
so-heavynode: *soroleSettings
so-eval: *soroleSettings
so-import: *soroleSettings

View File

@@ -5,7 +5,7 @@
"name": "logs"
},
"codec": "best_compression",
"default_pipeline": "logs-elastic_agent-1.7.0",
"default_pipeline": "logs-elastic_agent-1.13.1",
"mapping": {
"total_fields": {
"limit": "10000"

View File

@@ -6,8 +6,6 @@
. /usr/sbin/so-common
RETURN_CODE=0
ELASTICSEARCH_HOST=$1
ELASTICSEARCH_PORT=9200
@@ -15,40 +13,20 @@ ELASTICSEARCH_PORT=9200
ELASTICSEARCH_INGEST_PIPELINES="/opt/so/conf/elasticsearch/ingest/"
# Wait for ElasticSearch to initialize
if [ ! -f /opt/so/state/espipelines.txt ]; then
echo "State file /opt/so/state/espipelines.txt not found. Running so-elasticsearch-pipelines."
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
curl -K /opt/so/conf/elasticsearch/curl.config -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT"
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
fi
retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
cd ${ELASTICSEARCH_INGEST_PIPELINES}
echo "Loading pipelines..."
for i in .[a-z]* *; do echo $i; RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done
for i in .[a-z]* *;
do
echo $i;
retry 5 5 "so-elasticsearch-query _ingest/pipeline/$i -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load pipeline: $i"
done
echo
cd - >/dev/null
if [[ "$RETURN_CODE" != "1" ]]; then
touch /opt/so/state/espipelines.txt
fi
else
exit $RETURN_CODE
touch /opt/so/state/espipelines.txt
fi

View File

@@ -7,103 +7,157 @@
{% from 'vars/globals.map.jinja' import GLOBALS %}
{%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %}
. /usr/sbin/so-common
{% if GLOBALS.role != 'so-heavynode' %}
. /usr/sbin/so-elastic-fleet-common
{% endif %}
STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt
STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt
default_conf_dir=/opt/so/conf
# Define a default directory to load pipelines from
ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/"
{% if GLOBALS.role == 'so-heavynode' %}
file="/opt/so/conf/elasticsearch/templates/index/so-common-template.json"
{% else %}
file="/usr/sbin/so-elastic-fleet-common"
{% endif %}
if [ -f "$file" ]; then
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
COUNT=0
ELASTICSEARCH_CONNECTED="no"
while [[ "$COUNT" -le 240 ]]; do
so-elasticsearch-query / -k --output /dev/null --silent --head --fail
if [ $? -eq 0 ]; then
ELASTICSEARCH_CONNECTED="yes"
echo "connected!"
break
else
((COUNT+=1))
sleep 1
echo -n "."
fi
done
if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then
echo
echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
echo
exit 1
fi
{% if GLOBALS.role != 'so-heavynode' %}
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
INSTALLED=$(elastic_fleet_package_is_installed {{ SUPPORTED_PACKAGES[0] }} )
if [ "$INSTALLED" != "installed" ]; then
echo
echo "Packages not yet installed."
echo
exit 0
fi
{% endif %}
set -e
cd ${ELASTICSEARCH_TEMPLATES}/component/ecs
echo "Loading ECS component templates..."
for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE-mappings"; so-elasticsearch-query _component_template/$TEMPLATE-mappings -d@$i -XPUT 2>/dev/null; echo; done
cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent
echo "Loading Elastic Agent component templates..."
{% if GLOBALS.role == 'so-heavynode' %}
component_pattern="so-*"
{% else %}
component_pattern="*"
{% endif %}
for i in $component_pattern; do TEMPLATE=${i::-5}; echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done
# Load SO-specific component templates
cd ${ELASTICSEARCH_TEMPLATES}/component/so
echo "Loading Security Onion component templates..."
for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done
echo
# Load SO index templates
cd ${ELASTICSEARCH_TEMPLATES}/index
echo "Loading Security Onion index templates..."
shopt -s extglob
{% if GLOBALS.role == 'so-heavynode' %}
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)"
{% else %}
pattern="*"
{% endif %}
for i in $pattern; do
TEMPLATE=${i::-14};
echo "$TEMPLATE";
so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT 2>/dev/null;
echo;
done
echo
if [[ -f $STATE_FILE_INITIAL ]]; then
# The initial template load has already run. As this is a subsequent load, all dependencies should
# already be satisified. Therefore, immediately exit/abort this script upon any template load failure
# since this is an unrecoverable failure.
should_exit_on_failure=1
else
{% if GLOBALS.role == 'so-heavynode' %}
echo "Common template does not exist. Exiting..."
{% else %}
echo "Elastic Fleet not configured. Exiting..."
{% endif %}
exit 0
# This is the initial template load, and there likely are some components not yet setup in Elasticsearch.
# Therefore load as many templates as possible at this time and if an error occurs proceed to the next
# template. But if at least one template fails to load do not mark the templates as having been loaded.
# This will allow the next load to resume the load of the templates that failed to load initially.
should_exit_on_failure=0
echo "This is the initial template load"
fi
load_failures=0
load_template() {
uri=$1
file=$2
echo "Loading template file $i"
if ! retry 3 5 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"; then
if [[ $should_exit_on_failure -eq 1 ]]; then
fail "Could not load template file: $file"
else
load_failures=$((load_failures+1))
echo "Incremented load failure counter: $load_failures"
fi
fi
}
if [ ! -f $STATE_FILE_SUCCESS ]; then
echo "State file $STATE_FILE_SUCCESS not found. Running so-elasticsearch-templates-load."
. /usr/sbin/so-common
{% if GLOBALS.role != 'so-heavynode' %}
if [ -f /usr/sbin/so-elastic-fleet-common ]; then
. /usr/sbin/so-elastic-fleet-common
fi
{% endif %}
default_conf_dir=/opt/so/conf
# Define a default directory to load pipelines from
ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/"
{% if GLOBALS.role == 'so-heavynode' %}
file="/opt/so/conf/elasticsearch/templates/index/so-common-template.json"
{% else %}
file="/usr/sbin/so-elastic-fleet-common"
{% endif %}
if [ -f "$file" ]; then
# Wait for ElasticSearch to initialize
echo -n "Waiting for ElasticSearch..."
retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
{% if GLOBALS.role != 'so-heavynode' %}
SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}')
INSTALLED=$(elastic_fleet_package_is_installed {{ SUPPORTED_PACKAGES[0] }} )
if [ "$INSTALLED" != "installed" ]; then
echo
echo "Packages not yet installed."
echo
exit 0
fi
{% endif %}
touch $STATE_FILE_INITIAL
cd ${ELASTICSEARCH_TEMPLATES}/component/ecs
echo "Loading ECS component templates..."
for i in *; do
TEMPLATE=$(echo $i | cut -d '.' -f1)
load_template "_component_template/${TEMPLATE}-mappings" "$i"
done
echo
cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent
echo "Loading Elastic Agent component templates..."
{% if GLOBALS.role == 'so-heavynode' %}
component_pattern="so-*"
{% else %}
component_pattern="*"
{% endif %}
for i in $component_pattern; do
TEMPLATE=${i::-5}
load_template "_component_template/$TEMPLATE" "$i"
done
echo
# Load SO-specific component templates
cd ${ELASTICSEARCH_TEMPLATES}/component/so
echo "Loading Security Onion component templates..."
for i in *; do
TEMPLATE=$(echo $i | cut -d '.' -f1);
load_template "_component_template/$TEMPLATE" "$i"
done
echo
# Load SO index templates
cd ${ELASTICSEARCH_TEMPLATES}/index
echo "Loading Security Onion index templates..."
shopt -s extglob
{% if GLOBALS.role == 'so-heavynode' %}
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)"
{% else %}
pattern="*"
{% endif %}
# Index templates will be skipped if the following conditions are met:
# 1. The template is part of the "so-logs-" template group
# 2. The template name does not correlate to at least one existing component template
# In this situation, the script will treat the skipped template as a temporary failure
# and allow the templates to be loaded again on the next run or highstate, whichever
# comes first.
COMPONENT_LIST=$(so-elasticsearch-component-templates-list)
for i in $pattern; do
TEMPLATE=${i::-14}
COMPONENT_PATTERN=${TEMPLATE:3}
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -v osquery)
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" ]]; then
load_failures=$((load_failures+1))
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
else
load_template "_index_template/$TEMPLATE" "$i"
fi
done
else
{% if GLOBALS.role == 'so-heavynode' %}
echo "Common template does not exist. Exiting..."
{% else %}
echo "Elastic Fleet not configured. Exiting..."
{% endif %}
exit 0
fi
cd - >/dev/null
if [[ $load_failures -eq 0 ]]; then
echo "All template loaded successfully"
touch $STATE_FILE_SUCCESS
else
echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate"
fi
else
echo "Templates already loaded"
fi
cd - >/dev/null

View File

@@ -89,7 +89,6 @@ COMMIT
-A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
-A INPUT -i lo -j ACCEPT
-A INPUT -m conntrack --ctstate INVALID -j DROP
-A INPUT -j REJECT --reject-with icmp-host-prohibited
-A INPUT -p icmp -j ACCEPT
-A INPUT -j LOGGING
-A FORWARD -j DOCKER-USER
@@ -103,6 +102,7 @@ COMMIT
-A FORWARD -m conntrack --ctstate INVALID -j DROP
-A FORWARD -j REJECT --reject-with icmp-host-prohibited
-A OUTPUT -o lo -j ACCEPT
# block icmp timestamp reply
-A OUTPUT -p icmp -m icmp --icmp-type 14 -j DROP
{%- for rule in D2 %}

View File

@@ -1,454 +0,0 @@
#!/usr/bin/env python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
"""
Local exit codes:
- General error: 1
- Invalid argument: 2
- File error: 3
"""
import sys, os, subprocess, argparse, signal
import copy
import re
import textwrap
import yaml
minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions'
salt_proc: subprocess.CompletedProcess = None
def print_err(string: str):
print(string, file=sys.stderr)
def check_apply(args: dict, prompt: bool = True):
if args.apply:
print('Configuration updated. Applying changes:')
return apply()
else:
if prompt:
message = 'Configuration updated. Would you like to apply your changes now? (y/N) '
answer = input(message)
while answer.lower() not in [ 'y', 'n', '' ]:
answer = input(message)
if answer.lower() in [ 'n', '' ]:
return 0
else:
print('Applying changes:')
return apply()
else:
return 0
def apply():
salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'idstools.sync_files', 'queue=True']
update_cmd = ['so-rule-update']
print('Syncing config files...')
cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL)
if cmd.returncode == 0:
print('Updating rules...')
return subprocess.run(update_cmd).returncode
else:
return cmd.returncode
def find_minion_pillar() -> str:
regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$'
result = []
for root, _, files in os.walk(minion_pillar_dir):
for f_minion_id in files:
if re.search(regex, f_minion_id):
result.append(os.path.join(root, f_minion_id))
if len(result) == 0:
print_err('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?')
sys.exit(3)
elif len(result) > 1:
res_arr = []
for r in result:
res_arr.append(f'\"{r}\"')
res_str = ', '.join(res_arr)
print_err('(This should not happen, the system is in an error state if you see this message.)\n')
print_err('More than one manager-type pillar exists, minion id\'s listed below:')
print_err(f' {res_str}')
sys.exit(3)
else:
return result[0]
def read_pillar(pillar: str):
try:
with open(pillar, 'r') as f:
loaded_yaml = yaml.safe_load(f.read())
if loaded_yaml is None:
print_err(f'Could not parse {pillar}')
sys.exit(3)
return loaded_yaml
except:
print_err(f'Could not open {pillar}')
sys.exit(3)
def write_pillar(pillar: str, content: dict):
try:
sids = content['idstools']['sids']
if sids['disabled'] is not None:
if len(sids['disabled']) == 0: sids['disabled'] = None
if sids['enabled'] is not None:
if len(sids['enabled']) == 0: sids['enabled'] = None
if sids['modify'] is not None:
if len(sids['modify']) == 0: sids['modify'] = None
with open(pillar, 'w') as f:
return yaml.dump(content, f, default_flow_style=False)
except Exception as e:
print_err(f'Could not open {pillar}')
sys.exit(3)
def check_sid_pattern(sid_pattern: str):
message = f'SID {sid_pattern} is not valid, did you forget the \"re:\" prefix for a regex pattern?'
if sid_pattern.startswith('re:'):
r_string = sid_pattern[3:]
if not valid_regex(r_string):
print_err('Invalid regex pattern.')
return False
else:
return True
else:
sid: int
try:
sid = int(sid_pattern)
except:
print_err(message)
return False
if sid >= 0:
return True
else:
print_err(message)
return False
def valid_regex(pattern: str):
try:
re.compile(pattern)
return True
except re.error:
return False
def sids_key_exists(pillar: dict, key: str):
return key in pillar.get('idstools', {}).get('sids', {})
def rem_from_sids(pillar: dict, key: str, val: str, optional = False):
pillar_dict = copy.deepcopy(pillar)
arr = pillar_dict['idstools']['sids'][key]
if arr is None or val not in arr:
if not optional: print(f'{val} already does not exist in {key}')
else:
pillar_dict['idstools']['sids'][key].remove(val)
return pillar_dict
def add_to_sids(pillar: dict, key: str, val: str, optional = False):
pillar_dict = copy.deepcopy(pillar)
if pillar_dict['idstools']['sids'][key] is None:
pillar_dict['idstools']['sids'][key] = []
if val in pillar_dict['idstools']['sids'][key]:
if not optional: print(f'{val} already exists in {key}')
else:
pillar_dict['idstools']['sids'][key].append(val)
return pillar_dict
def add_rem_disabled(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'disabled'):
pillar_dict['idstools']['sids']['disabled'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'disabled', args.sid_pattern)
if temp_pillar_dict['idstools']['sids']['disabled'] == pillar_dict['idstools']['sids']['disabled']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
if not args.remove:
if sids_key_exists(pillar_dict, 'enabled'):
pillar_dict = rem_from_sids(pillar_dict, 'enabled', args.sid_pattern, optional=True)
modify = pillar_dict.get('idstools', {}).get('sids', {}).get('modify')
if modify is not None:
rem_candidates = []
for action in modify:
if action.startswith(f'{args.sid_pattern} '):
rem_candidates.append(action)
if len(rem_candidates) > 0:
for item in rem_candidates:
print(f' - {item}')
answer = input(f'The above modify actions contain {args.sid_pattern}. Would you like to remove them? (Y/n) ')
while answer.lower() not in [ 'y', 'n', '' ]:
for item in rem_candidates:
print(f' - {item}')
answer = input(f'The above modify actions contain {args.sid_pattern}. Would you like to remove them? (Y/n) ')
if answer.lower() in [ 'y', '' ]:
for item in rem_candidates:
modify.remove(item)
pillar_dict['idstools']['sids']['modify'] = modify
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_disabled_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
disabled = pillar_dict.get('idstools', {}).get('sids', {}).get('disabled')
if disabled is None:
print('No rules disabled.')
return 0
else:
print('Disabled rules:')
for rule in disabled:
print(f' - {rule}')
return 0
def add_rem_enabled(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'enabled'):
pillar_dict['idstools']['sids']['enabled'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'enabled', args.sid_pattern)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'enabled', args.sid_pattern)
if temp_pillar_dict['idstools']['sids']['enabled'] == pillar_dict['idstools']['sids']['enabled']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
if not args.remove:
if sids_key_exists(pillar_dict, 'disabled'):
pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern, optional=True)
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_enabled_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
enabled = pillar_dict.get('idstools', {}).get('sids', {}).get('enabled')
if enabled is None:
print('No rules explicitly enabled.')
return 0
else:
print('Enabled rules:')
for rule in enabled:
print(f' - {rule}')
return 0
def add_rem_modify(args: dict):
global salt_proc
if not check_sid_pattern(args.sid_pattern):
return 2
if not valid_regex(args.search_term):
print_err('Search term is not a valid regex pattern.')
string_val = f'{args.sid_pattern} \"{args.search_term}\" \"{args.replace_term}\"'
pillar_dict = read_pillar(args.pillar)
if not sids_key_exists(pillar_dict, 'modify'):
pillar_dict['idstools']['sids']['modify'] = None
if args.remove:
temp_pillar_dict = rem_from_sids(pillar_dict, 'modify', string_val)
else:
temp_pillar_dict = add_to_sids(pillar_dict, 'modify', string_val)
if temp_pillar_dict['idstools']['sids']['modify'] == pillar_dict['idstools']['sids']['modify']:
salt_proc = check_apply(args, prompt=False)
return salt_proc
else:
pillar_dict = temp_pillar_dict
# TODO: Determine if a rule should be removed from disabled if modified.
if not args.remove:
if sids_key_exists(pillar_dict, 'disabled'):
pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern, optional=True)
write_pillar(pillar=args.pillar, content=pillar_dict)
salt_proc = check_apply(args)
return salt_proc
def list_modified_rules(args: dict):
pillar_dict = read_pillar(args.pillar)
modify = pillar_dict.get('idstools', {}).get('sids', {}).get('modify')
if modify is None:
print('No rules currently modified.')
return 0
else:
print('Modified rules + modifications:')
for rule in modify:
print(f' - {rule}')
return 0
def sigint_handler(*_):
print('Exiting gracefully on Ctrl-C')
if salt_proc is not None: salt_proc.send_signal(signal.SIGINT)
sys.exit(0)
def main():
signal.signal(signal.SIGINT, sigint_handler)
if os.geteuid() != 0:
print_err('You must run this script as root')
sys.exit(1)
apply_help='After updating rule configuration, apply the idstools state.'
main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter)
subcommand_desc = textwrap.dedent(
"""\
disabled Manage and list disabled rules (add, remove, list)
enabled Manage and list enabled rules (add, remove, list)
modify Manage and list modified rules (add, remove, list)
"""
)
subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command')
sid_or_regex_help = 'A valid SID (ex: "4321") or regular expression pattern (ex: "re:heartbleed|spectre")'
# Disabled actions
disabled = subparsers.add_parser('disabled')
disabled_sub = disabled.add_subparsers()
disabled_add = disabled_sub.add_parser('add')
disabled_add.set_defaults(func=add_rem_disabled)
disabled_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
disabled_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
disabled_rem = disabled_sub.add_parser('remove')
disabled_rem.set_defaults(func=add_rem_disabled, remove=True)
disabled_rem.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
disabled_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
disabled_list = disabled_sub.add_parser('list')
disabled_list.set_defaults(func=list_disabled_rules)
# Enabled actions
enabled = subparsers.add_parser('enabled')
enabled_sub = enabled.add_subparsers()
enabled_add = enabled_sub.add_parser('add')
enabled_add.set_defaults(func=add_rem_enabled)
enabled_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
enabled_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
enabled_rem = enabled_sub.add_parser('remove')
enabled_rem.set_defaults(func=add_rem_enabled, remove=True)
enabled_rem.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
enabled_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
enabled_list = enabled_sub.add_parser('list')
enabled_list.set_defaults(func=list_enabled_rules)
search_term_help='A properly escaped regex search term (ex: "\\\$EXTERNAL_NET")'
replace_term_help='The text to replace the search term with'
# Modify actions
modify = subparsers.add_parser('modify')
modify_sub = modify.add_subparsers()
modify_add = modify_sub.add_parser('add')
modify_add.set_defaults(func=add_rem_modify)
modify_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help)
modify_add.add_argument('search_term', metavar='SEARCH_TERM', help=search_term_help)
modify_add.add_argument('replace_term', metavar='REPLACE_TERM', help=replace_term_help)
modify_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
modify_rem = modify_sub.add_parser('remove')
modify_rem.set_defaults(func=add_rem_modify, remove=True)
modify_rem.add_argument('sid_pattern', metavar='SID', help=sid_or_regex_help)
modify_rem.add_argument('search_term', metavar='SEARCH_TERM', help=search_term_help)
modify_rem.add_argument('replace_term', metavar='REPLACE_TERM', help=replace_term_help)
modify_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help)
modify_list = modify_sub.add_parser('list')
modify_list.set_defaults(func=list_modified_rules)
# Begin parse + run
args = main_parser.parse_args(sys.argv[1:])
if not hasattr(args, 'remove'):
args.remove = False
args.pillar = find_minion_pillar()
if hasattr(args, 'func'):
exit_code = args.func(args)
else:
if args.command is None:
main_parser.print_help()
else:
if args.command == 'disabled':
disabled.print_help()
elif args.command == 'enabled':
enabled.print_help()
elif args.command == 'modify':
modify.print_help()
sys.exit(0)
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@@ -22,7 +22,7 @@ so-influxdb:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }}
- environment:
- INFLUXD_CONFIG_PATH=/conf
- INFLUXD_CONFIG_PATH=/conf/config.yaml
- INFLUXDB_HTTP_LOG_ENABLED=false
- DOCKER_INFLUXDB_INIT_MODE=setup
- DOCKER_INFLUXDB_INIT_USERNAME=so

View File

@@ -21,8 +21,10 @@ kibana:
appenders:
- default
- file
migrations:
discardCorruptObjects: "8.10.4"
telemetry:
enabled: False
enabled: False
security:
showInsecureClusterWarning: False
xpack:

View File

@@ -1 +1 @@
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.8.2","id": "8.8.2","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}
{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.4","id": "8.10.4","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="}

View File

@@ -0,0 +1 @@
user = "{{ salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:user', 'NO_USER_SET') }}:{{ salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:pass', 'NO_PW_SET') }}"

View File

@@ -0,0 +1,20 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
echo "Checking to make sure that Kibana API is up & ready..."
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
echo "Kibana API not accessible, exiting script..."
exit 1
fi

View File

@@ -63,7 +63,7 @@ update() {
IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))'
for i in "${LINES[@]}"; do
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.8.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.4" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ")
echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi
done

View File

@@ -51,6 +51,14 @@ kratosschema:
- group: 928
- mode: 600
kratosoidc:
file.managed:
- name: /opt/so/conf/kratos/oidc.jsonnet
- source: salt://kratos/files/oidc.jsonnet
- user: 928
- group: 928
- mode: 600
kratosconfig:
file.managed:
- name: /opt/so/conf/kratos/kratos.yaml

View File

@@ -1,5 +1,18 @@
kratos:
enabled: False
oidc:
enabled: false
config:
id: SSO
mapper_url: file:///kratos-conf/oidc.jsonnet
subject_source: userinfo
scope:
- email
- profile
requested_claims:
id_token:
email:
essential: true
config:
session:
lifespan: 24h

View File

@@ -21,8 +21,7 @@ so-kratos:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-kratos'].ip }}
- binds:
- /opt/so/conf/kratos/schema.json:/kratos-conf/schema.json:ro
- /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro
- /opt/so/conf/kratos/:/kratos-conf:ro
- /opt/so/log/kratos/:/kratos-log:rw
- /nsm/kratos/db:/kratos-data:rw
{% if DOCKER.containers['so-kratos'].custom_bind_mounts %}

View File

@@ -0,0 +1,8 @@
local claims = std.extVar('claims');
{
identity: {
traits: {
email: if 'email' in claims then claims.email else claims.preferred_username
},
},
}

View File

@@ -20,3 +20,19 @@
{% do KRATOSDEFAULTS.kratos.config.courier.smtp.update({'connection_uri': KRATOSDEFAULTS.kratos.config.courier.smtp.connection_uri | replace("URL_BASE", GLOBALS.url_base)}) %}
{% set KRATOSMERGED = salt['pillar.get']('kratos', default=KRATOSDEFAULTS.kratos, merge=true) %}
{% if KRATOSMERGED.oidc.enabled and 'oidc' in salt['pillar.get']('features') %}
{% do KRATOSMERGED.config.selfservice.methods.update({'oidc': {'enabled': true, 'config': {'providers': [KRATOSMERGED.oidc.config]}}}) %}
{% endif %}
{% if KRATOSMERGED.oidc.config.auth_url is defined and not KRATOSMERGED.oidc.config.auth_url.strip() | length %}
{% do KRATOSMERGED.oidc.config.pop('auth_url') %}
{% endif %}
{% if KRATOSMERGED.oidc.config.issuer_url is defined and not KRATOSMERGED.oidc.config.issuer_url.strip() | length %}
{% do KRATOSMERGED.oidc.config.pop('issuer_url') %}
{% endif %}
{% if KRATOSMERGED.oidc.config.token_url is defined and not KRATOSMERGED.oidc.config.token_url.strip() | length %}
{% do KRATOSMERGED.oidc.config.pop('token_url') %}
{% endif %}

View File

@@ -3,6 +3,90 @@ kratos:
description: You can enable or disable Kratos.
advanced: True
helpLink: kratos.html
oidc:
enabled:
description: Set to True to enable OIDC / Single Sign-On (SSO) to SOC. Requires a valid Security Onion license key.
global: True
helpLink: oidc.html
config:
id:
description: Customize the OIDC provider name. This name appears on the login page. Required.
global: True
forcedType: string
helpLink: oidc.html
provider:
description: "Specify the provider type. Required. Valid values are: auth0, generic, github, google, microsoft"
global: True
forcedType: string
regex: "auth0|generic|github|google|microsoft"
regexFailureMessage: "Valid values are: auth0, generic, github, google, microsoft"
helpLink: oidc.html
client_id:
description: Specify the client ID, also referenced as the application ID. Required.
global: True
forcedType: string
helpLink: oidc.html
client_secret:
description: Specify the client secret. Required.
global: True
forcedType: string
helpLink: oidc.html
microsoft_tenant:
description: Specify the Microsoft Active Directory Tenant ID. Required when provider is 'microsoft'.
global: True
forcedType: string
helpLink: oidc.html
subject_source:
description: The source of the subject identifier. Typically 'userinfo'. Only used when provider is 'microsoft'.
global: True
forcedType: string
regex: me|userinfo
regexFailureMessage: "Valid values are: me, userinfo"
helpLink: oidc.html
auth_url:
description: Provider's auth URL. Required when provider is 'generic'.
global: True
forcedType: string
helpLink: oidc.html
issuer_url:
description: Provider's issuer URL. Required when provider is 'auth0' or 'generic'.
global: True
forcedType: string
helpLink: oidc.html
mapper_url:
description: A file path or URL in Jsonnet format, used to map OIDC claims to the Kratos schema. Defaults to an included file that maps the email claim. Note that the contents of the included file can be customized via the "OIDC Claims Mapping" setting.
advanced: True
global: True
forcedType: string
helpLink: oidc.html
token_url:
description: Provider's token URL. Required when provider is 'generic'.
global: True
forcedType: string
helpLink: oidc.html
scope:
description: List of scoped data categories to request in the authentication response. Typically 'email' and 'profile' are the minimum required scopes. However, GitHub requires `user:email', instead and Auth0 requires 'profile', 'email', and 'openid'.
global: True
forcedType: "[]string"
helpLink: oidc.html
requested_claims:
id_token:
email:
essential:
description: Specifies whether the email claim is necessary. Typically leave this value set to true.
advanced: True
global: True
helpLink: oidc.html
files:
oidc__jsonnet:
title: OIDC Claims Mapping
description: Customize the OIDC claim mappings to the Kratos schema. The default mappings include the minimum required for login functionality, so this typically does not need to be customized. Visit https://jsonnet.org for more information about this file format.
advanced: True
file: True
global: True
helpLink: oidc.html
config:
session:
lifespan:
@@ -19,10 +103,10 @@ kratos:
methods:
password:
enabled:
description: Set to True to enable traditional password authentication. Leave as default to ensure proper security protections remain in place.
description: Set to True to enable traditional password authentication to SOC. Typically set to true, except when exclusively using OIDC authentication. Some external tool interfaces may not be accessible if local password authentication is disabled.
global: True
advanced: True
helpLink: kratos.html
helpLink: oidc.html
config:
haveibeenpwned_enabled:
description: Set to True to check if a newly chosen password has ever been found in a published list of previously-compromised passwords. Requires outbound Internet connectivity when enabled.
@@ -30,7 +114,7 @@ kratos:
helpLink: kratos.html
totp:
enabled:
description: Set to True to enable Time-based One-Time Password (TOTP) multi-factor authentication (MFA). Enable to ensure proper security protections remain in place. Be aware that disabling this setting, after users have already setup TOTP, may prevent users from logging in.
description: Set to True to enable Time-based One-Time Password (TOTP) multi-factor authentication (MFA) to SOC. Enable to ensure proper security protections remain in place. Be aware that disabling this setting, after users have already setup TOTP, may prevent users from logging in.
global: True
helpLink: kratos.html
config:
@@ -41,7 +125,7 @@ kratos:
helpLink: kratos.html
webauthn:
enabled:
description: Set to True to enable Security Keys (WebAuthn / PassKeys) for passwordless or multi-factor authentication (MFA) logins. Security Keys are a Public-Key Infrastructure (PKI) based authentication method, typically involving biometric hardware devices, such as laptop fingerprint scanners and USB hardware keys. Be aware that disabling this setting, after users have already setup their accounts with Security Keys, may prevent users from logging in.
description: Set to True to enable Security Keys (WebAuthn / PassKeys) for passwordless or multi-factor authentication (MFA) SOC logins. Security Keys are a Public-Key Infrastructure (PKI) based authentication method, typically involving biometric hardware devices, such as laptop fingerprint scanners and USB hardware keys. Be aware that disabling this setting, after users have already setup their accounts with Security Keys, may prevent users from logging in.
global: True
helpLink: kratos.html
config:
@@ -65,6 +149,7 @@ kratos:
global: True
advanced: True
helpLink: kratos.html
flows:
settings:
privileged_session_max_age:

View File

@@ -11,7 +11,10 @@ input {
}
}
filter {
if ![metadata] {
mutate {
rename => {"@metadata" => "metadata"}
}
}
}

View File

@@ -13,10 +13,11 @@ input {
filter {
if "fleet-lumberjack-input" in [tags] {
if ![metadata] {
mutate {
rename => {"@metadata" => "metadata"}
}
}
}

View File

@@ -1,13 +1,16 @@
output {
if "elastic-agent" in [tags] {
if [metadata][pipeline] {
if [metadata][pipeline] {
if [metadata][_id] {
elasticsearch {
hosts => "{{ GLOBALS.manager }}"
ecs_compatibility => v8
data_stream => true
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
document_id => "%{[metadata][_id]}"
pipeline => "%{[metadata][pipeline]}"
silence_errors_in_log => ["version_conflict_engine_exception"]
ssl => true
ssl_certificate_verification => false
}
@@ -19,10 +22,22 @@ output {
data_stream => true
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
pipeline => "%{[metadata][pipeline]}"
ssl => true
ssl_certificate_verification => false
ssl_certificate_verification => false
}
}
}
}
else {
elasticsearch {
hosts => "{{ GLOBALS.manager }}"
ecs_compatibility => v8
data_stream => true
user => "{{ ES_USER }}"
password => "{{ ES_PASS }}"
ssl => true
ssl_certificate_verification => false
}
}
}
}

View File

@@ -16,6 +16,7 @@ include:
- kibana.secrets
- manager.sync_es_users
- manager.elasticsearch
- manager.kibana
repo_log_dir:
file.directory:
@@ -60,6 +61,8 @@ manager_sbin:
- user: 939
- group: 939
- file_mode: 755
- exclude_pat:
- "*_test.py"
yara_update_scripts:
file.recurse:

8
salt/manager/kibana.sls Normal file
View File

@@ -0,0 +1,8 @@
kibana_curl_config_distributed:
file.managed:
- name: /opt/so/conf/kibana/curl.config
- source: salt://kibana/files/curl.config.template
- template: jinja
- mode: 600
- show_changes: False
- makedirs: True

View File

@@ -1,15 +0,0 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
. /usr/sbin/so-common
echo ""
echo "Hosts/Networks that have access to login to the Security Onion Console:"
so-firewall includedhosts analyst

View File

@@ -1,19 +1,9 @@
#!/usr/bin/env python3
# Copyright 2014-2023 Security Onion Solutions, LLC
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import os
import subprocess

View File

@@ -406,12 +406,17 @@ function update_logstash_outputs() {
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
}
function checkMine() {
local func=$1
# make sure the minion sees itself in the mine since it needs to see itself for states as opposed to using salt-run
retry 20 1 "salt '$MINION_ID' mine.get '\*' '$func'" "$MINION_ID"
}
function updateMine() {
salt "$MINION_ID" mine.send network.ip_addrs interface="$MNIC"
}
function apply_ES_state() {
salt-call state.apply elasticsearch concurrent=True
retry 20 1 "salt '$MINION_ID' mine.update" True
}
function createEVAL() {
is_pcaplimit=true
add_elasticsearch_to_minion
@@ -547,8 +552,6 @@ function createSEARCHNODE() {
add_elasticsearch_to_minion
add_logstash_to_minion
add_telegraf_to_minion
updateMine
apply_ES_state
}
function createRECEIVER() {
@@ -563,6 +566,19 @@ function createDESKTOP() {
}
function testConnection() {
# the minion should be trying to auth every 10 seconds so 15 seconds should be more than enough time to see this in the log
# this retry was put in because it is possible that a minion is attempted to be pinged before it has authenticated and connected to the Salt master
# causing the first ping to fail and typically wouldn't be successful until the second ping
# this check may pass without the minion being authenticated if it was previously connected and the line exists in the log
retry 15 1 "grep 'Authentication accepted from $MINION_ID' /opt/so/log/salt/master"
local retauth=$?
if [[ $retauth != 0 ]]; then
echo "The Minion did not authenticate with the Salt master in the allotted time"
echo "Deleting the key"
deleteminion
exit 1
fi
retry 15 3 "salt '$MINION_ID' test.ping" True
local ret=$?
if [[ $ret != 0 ]]; then
@@ -582,9 +598,9 @@ if [[ "$OPERATION" = 'delete' ]]; then
deleteminion
fi
if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
if [[ "$OPERATION" == 'add' || "$OPERATION" == 'setup' ]]; then
# Skip this if its setup
if [ $OPERATION != 'setup' ]; then
if [[ $OPERATION == 'add' ]]; then
# Accept the salt key
acceptminion
# Test to see if the minion was accepted
@@ -605,8 +621,26 @@ if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then
else
add_sensoroni_to_minion
fi
create$NODETYPE
echo "Minion file created for $MINION_ID"
if [[ "$OPERATION" == 'add' ]]; then
# tell the minion to populate the mine with data from mine_functions which is populated during setup
# this only needs to happen on non managers since they handle this during setup
# and they need to wait for ca creation to update the mine
updateMine
checkMine "network.ip_addrs"
# apply the elasticsearch state to the manager if a new searchnode was added
if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then
# calls so-common and set_minionid sets MINIONID to local minion id
set_minionid
salt $MINIONID state.apply elasticsearch queue=True --async
salt $MINIONID state.apply soc queue=True --async
fi
# run this async so the cli doesn't wait for a return
salt "$MINION_ID" state.highstate --async queue=True
fi
fi
if [[ "$OPERATION" = 'test' ]]; then

View File

@@ -235,8 +235,8 @@ function updatePassword() {
# Update DB with new hash
echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB), created_at=datetime('now'), updated_at=datetime('now') where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name='password');" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
# Deactivate MFA
echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath"
[[ $? != 0 ]] && fail "Unable to update password"
fi
}
@@ -341,14 +341,19 @@ function syncElastic() {
" and ic.identity_id=i.id " \
" and ict.id=ic.identity_credential_type_id " \
" and ict.name='password' " \
" and instr(ic.config, 'hashed_password') " \
" and i.state == 'active' " \
"order by ici.identifier;" | \
sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath")
[[ $? != 0 ]] && fail "Unable to read credential hashes from database"
echo "${userData}" | \
jq -r '.user + ":" + .data.hashed_password' \
>> "$usersTmpFile"
user_data_formatted=$(echo "${userData}" | jq -r '.user + ":" + .data.hashed_password')
if lookup_salt_value "licensed_features" "" "pillar" | grep -x oidc; then
# generate random placeholder salt/hash for users without passwords
random_crypt=$(get_random_value 53)
user_data_formatted=$(echo "${user_data_formatted}" | sed -r "s/^(.+:)\$/\\1\$2a\$12${random_crypt}/")
fi
echo "${user_data_formatted}" >> "$usersTmpFile"
# Append the user roles
while IFS="" read -r rolePair || [ -n "$rolePair" ]; do

View File

@@ -0,0 +1,95 @@
#!/usr/bin/env python3
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
import os
import sys
import time
import yaml
lockFile = "/tmp/so-yaml.lock"
def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
print(' General commands:')
print(' remove - Removes a yaml top-level key, if it exists. Requires KEY arg.')
print(' help - Prints this usage information.')
print('')
print(' Where:')
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
print(' KEY - Top level key only, does not support dot-notations for nested keys at this time. Ex: level1')
sys.exit(1)
def loadYaml(filename):
file = open(filename, "r")
content = file.read()
return yaml.safe_load(content)
def writeYaml(filename, content):
file = open(filename, "w")
return yaml.dump(content, file)
def remove(args):
if len(args) != 2:
print('Missing filename or key arg', file=sys.stderr)
showUsage(None)
return
filename = args[0]
content = loadYaml(filename)
content.pop(args[1], None)
writeYaml(filename, content)
return 0
def main():
args = sys.argv[1:]
if len(args) < 1:
showUsage(None)
return
commands = {
"help": showUsage,
"remove": remove,
}
code = 1
try:
lockAttempts = 0
maxAttempts = 30
while lockAttempts < maxAttempts:
lockAttempts = lockAttempts + 1
try:
f = open(lockFile, "x")
f.close()
break
except Exception:
if lockAttempts == 1:
print("Waiting for lock file to be released from another process...")
time.sleep(2)
if lockAttempts == maxAttempts:
print("Lock file (" + lockFile + ") could not be created; proceeding without lock.")
cmd = commands.get(args[0], showUsage)
code = cmd(args[1:])
finally:
if os.path.exists(lockFile):
os.remove(lockFile)
sys.exit(code)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,77 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
from io import StringIO
import sys
from unittest.mock import patch, MagicMock
import unittest
import importlib
soyaml = importlib.import_module("so-yaml")
class TestRemove(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd"]
soyaml.main()
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_main_help_locked(self):
filename = "/tmp/so-yaml.lock"
file = open(filename, "w")
file.write = "fake lock file"
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('time.sleep', new=MagicMock()) as mock_sleep:
sys.argv = ["cmd", "help"]
soyaml.main()
sysmock.assert_called()
mock_sleep.assert_called_with(2)
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_main_help(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
sys.argv = ["cmd", "help"]
soyaml.main()
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Usage:")
def test_remove(self):
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false}")
file.close()
soyaml.remove([filename, "key1"])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "key2: false\n"
self.assertEqual(actual, expected)
def test_remove_missing_args(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false}")
file.close()
soyaml.remove([filename])
file = open(filename, "r")
actual = file.read()
file.close()
expected = "{key1: { child1: 123, child2: abc }, key2: false}"
self.assertEqual(actual, expected)
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")

View File

@@ -403,6 +403,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5
[[ "$INSTALLEDVERSION" == 2.4.5 ]] && up_to_2.4.10
[[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20
[[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30
true
}
@@ -414,7 +415,8 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4
[[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5
[[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20
[[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30
true
}
@@ -429,8 +431,7 @@ post_to_2.4.4() {
}
post_to_2.4.5() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
echo "Nothing to apply"
POSTVERSION=2.4.5
}
@@ -446,6 +447,19 @@ post_to_2.4.20() {
POSTVERSION=2.4.20
}
post_to_2.4.30() {
echo "Regenerating Elastic Agent Installers"
/sbin/so-elastic-agent-gen-installers
salt-call state.apply ca queue=True
stop_salt_minion
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
systemctl_func "start" "salt-minion"
salt-call state.apply nginx queue=True
enable_highstate
POSTVERSION=2.4.30
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -460,7 +474,6 @@ stop_salt_master() {
echo ""
echo "Killing any queued Salt jobs on the manager."
pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1
set -e
echo ""
echo "Storing salt-master pid."
@@ -468,6 +481,7 @@ stop_salt_master() {
echo "Found salt-master PID $MASTERPID"
systemctl_func "stop" "salt-master"
timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option."
set -e
}
stop_salt_minion() {
@@ -480,14 +494,12 @@ stop_salt_minion() {
echo ""
echo "Killing Salt jobs on this node."
salt-call saltutil.kill_all_jobs --local
set -e
echo "Storing salt-minion pid."
MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1)
echo "Found salt-minion PID $MINIONPID"
systemctl_func "stop" "salt-minion"
set +e
timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion
set -e
}
@@ -506,7 +518,7 @@ up_to_2.4.4() {
}
up_to_2.4.5() {
determine_elastic_agent_upgrade
echo "Nothing to do for 2.4.5"
INSTALLEDVERSION=2.4.5
}
@@ -523,6 +535,23 @@ up_to_2.4.20() {
INSTALLEDVERSION=2.4.20
}
up_to_2.4.30() {
# Remove older defend integration json & installed integration
rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
. $UPDATE_DIR/salt/elasticfleet/tools/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
rm -f /opt/so/state/eaintegrations.txt
# Elastic Update for this release, so download Elastic Agent files
determine_elastic_agent_upgrade
rm -f /opt/so/state/estemplates*.txt
INSTALLEDVERSION=2.4.30
}
determine_elastic_agent_upgrade() {
if [[ $is_airgap -eq 0 ]]; then
update_elastic_agent_airgap
@@ -568,7 +597,7 @@ update_airgap_rules() {
rsync -av $UPDATE_DIR/agrules/* /nsm/repo/rules/
}
update_centos_repo() {
update_airgap_repo() {
# Update the files in the repo
echo "Syncing new updates to /nsm/repo"
rsync -av $AGREPO/* /nsm/repo/
@@ -578,9 +607,9 @@ update_centos_repo() {
}
update_salt_mine() {
echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host."
echo "Populating the mine with mine_functions for each host."
set +e
salt \* cmd.run cmd='MAININT=$(salt-call pillar.get host:mainint --out=newline_values_only) && salt-call mine.send name=network.ip_addrs interface="$MAININT"'
salt \* mine.update -b 50
set -e
}
@@ -620,6 +649,7 @@ upgrade_check_salt() {
if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then
echo "You are already running the correct version of Salt for Security Onion."
else
echo "Salt needs to be upgraded to $NEWSALTVERSION."
UPGRADESALT=1
fi
}
@@ -628,22 +658,48 @@ upgrade_salt() {
SALTUPGRADED=True
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
echo ""
# If CentOS
if [[ $OS == 'centos' ]]; then
# If rhel family
if [[ $is_rpm ]]; then
echo "Removing yum versionlock for Salt."
echo ""
yum versionlock delete "salt-*"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# if oracle run with -r to ignore repos set by bootstrap
if [[ $OS == 'oracle' ]]; then
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
# if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos
else
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
fi
set -e
echo "Applying yum versionlock for Salt."
echo ""
yum versionlock add "salt-*"
# Else do Ubuntu things
elif [[ $is_deb ]]; then
echo "Removing apt hold for Salt."
echo ""
apt-mark unhold "salt-common"
apt-mark unhold "salt-master"
apt-mark unhold "salt-minion"
echo "Updating Salt packages."
echo ""
set +e
run_check_net_err \
"sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \
"Could not update salt, please check $SOUP_LOG for details."
set -e
echo "Applying apt hold for Salt."
echo ""
apt-mark hold "salt-common"
apt-mark hold "salt-master"
apt-mark hold "salt-minion"
fi
echo "Checking if Salt was upgraded."
@@ -655,7 +711,7 @@ upgrade_salt() {
echo "Once the issue is resolved, run soup again."
echo "Exiting."
echo ""
exit 0
exit 1
else
echo "Salt upgrade success."
echo ""
@@ -691,13 +747,31 @@ verify_latest_update_script() {
# Keeping this block in case we need to do a hotfix that requires salt update
apply_hotfix() {
# if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then
# fix_wazuh
# elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then
# 2_3_10_hotfix_1
# else
if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then
salt-call state.apply elasticfleet -l info queue=True
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
/usr/sbin/so-elastic-fleet-integration-policy-elastic-defend
elif [[ "$INSTALLEDVERSION" == "2.4.30" ]] ; then
if [[ -f /etc/pki/managerssl.key.old ]]; then
echo "Skipping Certificate Generation"
else
rm -f /opt/so/conf/elastic-fleet/integrations/endpoints-initial/elastic-defend-endpoints.json
so-kibana-restart --force
so-kibana-api-check
. /usr/sbin/so-elastic-fleet-common
elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints
rm -f /opt/so/state/eaintegrations.txt
salt-call state.apply ca queue=True
stop_salt_minion
mv /etc/pki/managerssl.crt /etc/pki/managerssl.crt.old
mv /etc/pki/managerssl.key /etc/pki/managerssl.key.old
systemctl_func "start" "salt-minion"
fi
else
echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)"
# fi
fi
}
@@ -733,14 +807,8 @@ main() {
echo ""
set_os
if ! check_salt_master_status; then
echo "Could not talk to salt master"
echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "SOUP will now attempt to start the salt-master service and exit."
exit 1
fi
echo "This node can communicate with the salt-master."
check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master."
echo "Checking to see if this is a manager."
echo ""
@@ -786,11 +854,13 @@ main() {
set -e
if [[ $is_airgap -eq 0 ]]; then
yum clean all
update_airgap_repo
dnf clean all
check_os_updates
elif [[ $OS == 'oel' ]]; then
elif [[ $OS == 'oracle' ]]; then
# sync remote repo down to local if not airgap
repo_sync
dnf clean all
check_os_updates
fi
@@ -805,7 +875,8 @@ main() {
echo "Hotfix applied"
update_version
enable_highstate
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
else
echo ""
echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION."
@@ -826,7 +897,7 @@ main() {
else
update_registry
set +e
update_docker_containers "soup" "" "" "$SOUP_LOG"
update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG"
set -e
fi
@@ -841,6 +912,14 @@ main() {
echo "Upgrading Salt"
# Update the repo files so it can actually upgrade
upgrade_salt
# for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt
# * WARN: Not starting daemons on Debian based distributions
# is not working mostly because starting them is the default behaviour.
if [[ $is_deb ]]; then
stop_salt_minion
stop_salt_master
fi
fi
preupgrade_changes
@@ -851,11 +930,6 @@ main() {
update_airgap_rules
fi
# Only update the repo if its airgap
if [[ $is_airgap -eq 0 && $UPGRADESALT -ne 1 ]]; then
update_centos_repo
fi
# since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars
if [[ ! "$MINIONID" =~ "_import" ]]; then
echo ""
@@ -878,7 +952,7 @@ main() {
# Testing that salt-master is up by checking that is it connected to itself
set +e
echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
# update the salt-minion configs here and start the minion
@@ -903,7 +977,8 @@ main() {
echo ""
echo "Running a highstate. This could take several minutes."
set +e
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
set -e
stop_salt_master
@@ -914,11 +989,12 @@ main() {
set +e
echo "Waiting on the Salt Master service to be ready."
salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details."
check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details."
set -e
echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes."
salt-call state.highstate -l info queue=True
(wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG"
highstate
postupgrade_changes
[[ $is_airgap -eq 0 ]] && unmount_update

View File

@@ -0,0 +1,96 @@
#!/bin/bash
#
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
. /usr/sbin/so-common
require_manager
# Inform user we are about to remove Elastic Fleet data
echo
echo "This script will remove the current Elastic Fleet install and all of its data and then rerun Elastic Fleet setup."
echo "This includes data previously ingested with Fleet such as Zeek and Suricata logs."
echo "Deployed Elastic Agents will no longer be enrolled and will need to be reinstalled."
echo "This script should only be used as a last resort to reinstall Elastic Fleet."
echo
echo "If you would like to proceed, type AGREE and hit ENTER."
echo
# Read user input
read INPUT
if [ "${INPUT^^}" != 'AGREE' ]; then exit 0; fi
status "Uninstalling all Elastic Agents on all Grid Nodes..."
salt \* cmd.run "elastic-agent uninstall -f" queue=True
status "Stopping Fleet Container..."
so-elastic-fleet-stop --force
status "Deleting Fleet Data from Pillars..."
so-yaml.py remove /opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls elasticfleet
sed -i "/fleet_grid_enrollment_token_general.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls
sed -i "/fleet_grid_enrollment_token_heavy.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls
status "Deleting Elastic Fleet data..."
# Check to make sure that Elasticsearch is up & ready
RETURN_CODE=0
wait_for_web_response "https://localhost:9200/_cat/indices/.kibana*" "green open" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
status "Elasticsearch not accessible, exiting script..."
exit 1
fi
ALIASES=".fleet-servers .fleet-policies-leader .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest"
for ALIAS in ${ALIASES}
do
# Get all concrete indices from alias
INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]')
# Delete all resolved indices
for INDX in ${INDXS}
do
status "Deleting $INDX"
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
done
done
status "Deleting Fleet-related Data Streams..."
DATASTREAMS="logs-suricata-so","logs-kratos-so","logs-soc-so","logs-zeek-so"
JSON_STRING=$( jq -n \
--arg DATASTREAMLIST "$DATASTREAMS" \
'{"dataStreams":[$DATASTREAMLIST]}'
)
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/index_management/delete_data_streams" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
status "Restarting Kibana..."
so-kibana-restart --force
status "Checking to make sure that Kibana API is up & ready..."
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
status "Kibana API not accessible, exiting script..."
exit 1
fi
status "Removing Integrations State File..."
rm -f /opt/so/state/eaintegrations.txt
status "Starting Elastic Fleet Setup..."
so-elastic-fleet-setup
status "Re-installing Elastic Agent on all Grid Nodes..."
salt \* state.apply elasticfleet.install_agent_grid queue=True
status "Elastic Fleet Reset complete...."

View File

@@ -147,7 +147,7 @@ http {
proxy_set_header X-Forwarded-Proto $scheme;
}
location ~ ^/auth/.*?(login) {
location ~ ^/auth/.*?(login|oidc/callback/) {
rewrite /auth/(.*) /$1 break;
limit_req zone=auth_throttle burst={{ NGINXMERGED.config.throttle_login_burst }} nodelay;
limit_req_status 429;

View File

@@ -41,7 +41,7 @@ pcap_sbin:
- file_mode: 755
{% if PCAPBPF %}
{% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
{% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
{% if BPF_CALC['stderr'] == "" %}
{% set BPF_COMPILED = ",\\\"--filter=" + BPF_CALC['stdout'] + "\\\"" %}
{% else %}

View File

@@ -1,28 +0,0 @@
# -*- coding: utf-8 -*-
import logging
from time import sleep
from os import remove
log = logging.getLogger(__name__)
def start(interval=30):
log.info("checkmine engine started")
minionid = __grains__['id']
while True:
try:
ca_crt = __salt__['saltutil.runner']('mine.get', tgt=minionid, fun='x509.get_pem_entries')[minionid]['/etc/pki/ca.crt']
log.info('Successfully queried Salt mine for the CA.')
except:
log.error('Could not pull CA from the Salt mine.')
log.info('Removing /var/cache/salt/master/minions/%s/mine.p to force Salt mine to be repopulated.' % minionid)
try:
remove('/var/cache/salt/master/minions/%s/mine.p' % minionid)
log.info('Removed /var/cache/salt/master/minions/%s/mine.p' % minionid)
except FileNotFoundError:
log.error('/var/cache/salt/master/minions/%s/mine.p does not exist' % minionid)
__salt__['mine.send'](name='x509.get_pem_entries', glob_path='/etc/pki/ca.crt')
log.warning('Salt mine repopulated with /etc/pki/ca.crt')
sleep(interval)

View File

@@ -0,0 +1,90 @@
# -*- coding: utf-8 -*-
import logging
from time import sleep
import os
import salt.client
log = logging.getLogger(__name__)
local = salt.client.LocalClient()
def start(interval=60):
def mine_delete(minion, func):
log.warning('checkmine engine: deleting mine function %s for %s' % (func, minion))
local.cmd(minion, 'mine.delete', [func])
def mine_flush(minion):
log.warning('checkmine engine: flushing mine cache for %s' % minion)
local.cmd(minion, 'mine.flush')
def mine_update(minion):
log.warning('checkmine engine: updating mine cache for %s' % minion)
local.cmd(minion, 'mine.update')
log.info("checkmine engine: started")
cachedir = __opts__['cachedir']
while True:
log.debug('checkmine engine: checking which minions are alive')
manage_alived = __salt__['saltutil.runner']('manage.alived', show_ip=False)
log.debug('checkmine engine: alive minions: %s' % ' , '.join(manage_alived))
for minion in manage_alived:
mine_path = os.path.join(cachedir, 'minions', minion, 'mine.p')
# it is possible that a minion is alive, but hasn't created a mine file yet
try:
mine_size = os.path.getsize(mine_path)
log.debug('checkmine engine: minion: %s mine_size: %i' % (minion, mine_size))
# For some reason the mine file can be corrupt and only be 1 byte in size
if mine_size == 1:
log.error('checkmine engine: found %s to be 1 byte' % mine_path)
mine_flush(minion)
mine_update(minion)
continue
except FileNotFoundError:
log.warning('checkmine engine: minion: %s %s does not exist' % (minion, mine_path))
mine_flush(minion)
mine_update(minion)
continue
# if a manager check that the ca in in the mine and it is correct
if minion.split('_')[-1] in ['manager', 'managersearch', 'eval', 'standalone', 'import']:
x509 = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='x509.get_pem_entries')
try:
ca_crt = x509[minion]['/etc/pki/ca.crt']
log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt))
# since the cert is defined, make sure it is valid
import salt.modules.x509_v2 as x509_v2
if not x509_v2.verify_private_key('/etc/pki/ca.key', '/etc/pki/ca.crt'):
log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion))
log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
else:
log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion))
mine_delete(minion, 'x509.get_pem_entries')
mine_update(minion)
continue
except KeyError:
log.error('checkmine engine: found minion %s is not in the mine' % (minion))
mine_flush(minion)
mine_update(minion)
continue
# Update the mine if the ip in the mine doesn't match returned from manage.alived
network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs')
try:
mine_ip = network_ip_addrs[minion][0]
log.debug('checkmine engine: found minion %s has mine_ip: %s' % (minion, mine_ip))
except IndexError:
log.error('checkmine engine: found minion %s does\'t have a mine_ip' % (minion))
mine_delete(minion, 'network.ip_addrs')
mine_update(minion)
except KeyError:
log.error('checkmine engine: found minion %s is not in the mine' % (minion))
mine_flush(minion)
mine_update(minion)
sleep(interval)

View File

@@ -1,4 +1,8 @@
mine_interval: 35
mine_interval: 25
mine_functions:
network.ip_addrs:
- interface: {{ GLOBALS.main_interface }}
- interface: {{ pillar.host.mainint }}
{% if grains.role in ['so-eval','so-import','so-manager','so-managersearch','so-standalone'] -%}
x509.get_pem_entries:
- glob_path: '/etc/pki/ca.crt'
{% endif -%}

View File

@@ -3,4 +3,4 @@ engines_dirs:
engines:
- checkmine:
interval: 30
interval: 60

View File

@@ -5,25 +5,21 @@
{% set SPLITCHAR = '+' %}
{% set SALTNOTHELD = salt['cmd.run']('apt-mark showhold | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/modules' %}
{% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %}
{% else %}
{% set SPLITCHAR = '-' %}
{% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %}
{% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %}
{% set SALT_STATE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/states' %}
{% set SALT_MODULE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/modules' %}
{% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %}
{% endif %}
{% set INSTALLEDSALTVERSION = grains.saltversion %}
{% if grains.saltversion|string != SALTVERSION|string %}
{% if grains.os|lower in ['Rocky', 'redhat', 'CentOS Stream'] %}
{% if grains.os_family|lower == 'redhat' %}
{% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
{% elif grains.os_family|lower == 'debian' %}
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %}
{% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F -x python3 stable ' ~ SALTVERSION %}
{% endif %}
{% else %}
{% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %}

View File

@@ -2,4 +2,4 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
master:
version: 3006.1
version: 3006.3

View File

@@ -12,25 +12,34 @@ hold_salt_master_package:
- name: salt-master
{% endif %}
salt_master_service:
service.running:
- name: salt-master
- enable: True
# prior to 2.4.30 this engine ran on the manager with salt-minion
# this has changed to running with the salt-master in 2.4.30
remove_engines_config:
file.absent:
- name: /etc/salt/minion.d/engines.conf
- source: salt://salt/files/engines.conf
- watch_in:
- service: salt_minion_service
checkmine_engine:
file.managed:
- name: /etc/salt/engines/checkmine.py
- source: salt://salt/engines/checkmine.py
- source: salt://salt/engines/master/checkmine.py
- makedirs: True
- watch_in:
- service: salt_minion_service
engines_config:
file.managed:
- name: /etc/salt/minion.d/engines.conf
- name: /etc/salt/master.d/engines.conf
- source: salt://salt/files/engines.conf
- watch_in:
- service: salt_minion_service
salt_master_service:
service.running:
- name: salt-master
- enable: True
- watch:
- file: checkmine_engine
- file: engines_config
- order: last
{% else %}
@@ -38,4 +47,4 @@ engines_config:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
{% endif %}

View File

@@ -0,0 +1,13 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
# this state was seperated from salt.minion state since it is called during setup
# GLOBALS are imported in the salt.minion state and that is not available at that point in setup
# this state is included in the salt.minion state
mine_functions:
file.managed:
- name: /etc/salt/minion.d/mine_functions.conf
- source: salt://salt/etc/minion.d/mine_functions.conf.jinja
- template: jinja

View File

@@ -2,6 +2,6 @@
# When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions
salt:
minion:
version: 3006.1
version: 3006.3
check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default
service_start_delay: 30 # in seconds.

View File

@@ -12,6 +12,7 @@ include:
- salt
- systemd.reload
- repo.client
- salt.mine_functions
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
@@ -66,6 +67,9 @@ set_log_levels:
- "log_level: info"
- "log_level_logfile: info"
# prior to 2.4.30 this managed file would restart the salt-minion service when updated
# since this file is currently only adding a sleep timer on service start
# it is not required to restart the service
salt_minion_service_unit_file:
file.managed:
- name: {{ SYSTEMD_UNIT_FILE }}
@@ -78,14 +82,6 @@ salt_minion_service_unit_file:
{% endif %}
mine_functions:
file.managed:
- name: /etc/salt/minion.d/mine_functions.conf
- source: salt://salt/etc/minion.d/mine_functions.conf.jinja
- template: jinja
- defaults:
GLOBALS: {{ GLOBALS }}
# this has to be outside the if statement above since there are <requisite>_in calls to this state
salt_minion_service:
service.running:
@@ -96,6 +92,5 @@ salt_minion_service:
- file: mine_functions
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
- file: set_log_levels
- file: salt_minion_service_unit_file
{% endif %}
- order: last

File diff suppressed because it is too large Load Diff

View File

@@ -3,23 +3,6 @@
COMMAND=$1
SENSORONI_CONTAINER=${SENSORONI_CONTAINER:-so-sensoroni}
function ci() {
HOME_DIR=$(dirname "$0")
TARGET_DIR=${1:-.}
PATH=$PATH:/usr/local/bin
if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
echo "Missing dependencies. Consider running the following command:"
echo " python -m pip install flake8 pytest pytest-cov"
exit 1
fi
pip install pytest pytest-cov
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"
}
function download() {
ANALYZERS=$1
if [[ $ANALYZERS = "all" ]]; then
@@ -36,5 +19,5 @@ function download() {
if [[ "$COMMAND" == "download" ]]; then
download "$2"
else
ci
../../../../pyci.sh $@
fi

View File

@@ -0,0 +1,10 @@
# Malware Hash Registry
## Description
Search Team Cymru's Malware Hash Registry for a file hash.
## Configuration Requirements
None.
**NOTE:** If you try to run the Malware Hash Registry analyzer but it results in a "Name or service not known" error, then it may be a DNS issue. Folks using 8.8.4.4 or 8.8.8.8 as their DNS resolver have reported this issue. A potential workaround is to switch to another DNS resolver like 1.1.1.1.

View File

@@ -3,5 +3,5 @@ post_setup_cron:
- name: 'PATH=$PATH:/usr/sbin salt-call state.highstate'
- identifier: post_setup_cron
- user: root
- minute: '*/1'
- minute: '*/5'
- identifier: post_setup_cron

View File

@@ -13,11 +13,13 @@
{% do SOCDEFAULTS.soc.config.server.modules[module].update({'hostUrl': application_url}) %}
{% endfor %}
{# add nodes from the logstash:nodes pillar to soc.server.modules.elastic.remoteHostUrls #}
{# add all grid heavy nodes to soc.server.modules.elastic.remoteHostUrls #}
{% for node_type, minions in salt['pillar.get']('logstash:nodes', {}).items() %}
{% for m in minions.keys() %}
{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %}
{% endfor %}
{% if node_type in ['heavynode'] %}
{% for m in minions.keys() %}
{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %}
{% endfor %}
{% endif %}
{% endfor %}
{% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %}

View File

@@ -59,6 +59,12 @@ soc:
target: _blank
links:
- 'https://www.virustotal.com/gui/search/{value}'
- name: Sublime Platform Email Review
description: Review email in Sublime Platform
icon: fa-external-link-alt
target: _blank
links:
- 'https://{:sublime.url}/messages/{:sublime.message_group_id}'
eventFields:
default:
- soc_timestamp

View File

@@ -67,10 +67,10 @@ function manage_minion() {
response=$(so-minion "-o=$op" "-m=$minion_id")
exit_code=$?
if [[ exit_code -eq 0 ]]; then
log "Successful command execution"
log "Successful '$op' command execution on $minion_id"
respond "$id" "true"
else
log "Unsuccessful command execution: $response ($exit_code)"
log "Unsuccessful '$op' command execution on $minion_id: $response ($exit_code)"
respond "$id" "false"
fi
}

View File

@@ -52,6 +52,8 @@ so-soctopus:
- {{ XTRAENV }}
{% endfor %}
{% endif %}
- watch:
- file: /opt/so/conf/soctopus/SOCtopus.conf
- require:
- file: soctopusconf
- file: navigatordefaultlayer

View File

@@ -129,7 +129,7 @@ surithresholding:
# BPF compilation and configuration
{% if SURICATABPF %}
{% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %}
{% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %}
{% if BPF_CALC['stderr'] == "" %}
{% set BPF_STATUS = 1 %}
{% else %}

View File

@@ -280,7 +280,7 @@ suricata:
mqtt:
enabled: 'no'
http2:
enabled: 'no'
enabled: 'yes'
asn1-max-frames: 256
run-as:
user: suricata

View File

@@ -152,7 +152,7 @@ plcronscript:
# BPF compilation and configuration
{% if ZEEKBPF %}
{% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %}
{% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %}
{% if BPF_CALC['stderr'] == "" %}
{% set BPF_STATUS = 1 %}
{% else %}

View File

@@ -49,12 +49,13 @@ zeek:
- frameworks/files/hash-all-files
- frameworks/files/detect-MHR
- policy/frameworks/notice/extend-email/hostnames
- policy/frameworks/notice/community-id
- policy/protocols/conn/community-id-logging
- ja3
- hassh
- intel
- cve-2020-0601
- securityonion/bpfconf
- securityonion/communityid
- securityonion/file-extraction
- oui-logging
- icsnpp-modbus
@@ -75,7 +76,7 @@ zeek:
- LogAscii::use_json = T;
- CaptureLoss::watch_interval = 5 mins;
networks:
HOME_NET:
HOME_NET:
- 192.168.0.0/16
- 10.0.0.0/8
- 172.16.0.0/12
@@ -120,4 +121,4 @@ zeek:
- stats
- stderr
- stdout

View File

@@ -268,15 +268,6 @@ collect_dockernet() {
fi
}
collect_es_space_limit() {
whiptail_log_size_limit "$log_size_limit"
while ! valid_int "$log_size_limit"; do # Upper/lower bounds?
whiptail_invalid_input
whiptail_log_size_limit "$log_size_limit"
done
}
collect_gateway() {
whiptail_management_interface_gateway
@@ -286,28 +277,6 @@ collect_gateway() {
done
}
collect_homenet_mngr() {
whiptail_homenet_manager "10.0.0.0/8,192.168.0.0/16,172.16.0.0/12"
while ! valid_cidr_list "$HNMANAGER"; do
whiptail_invalid_input
whiptail_homenet_manager "$HNMANAGER"
done
}
collect_homenet_snsr() {
if whiptail_homenet_sensor_inherit; then
export HNSENSOR=inherit
else
whiptail_homenet_sensor "10.0.0.0/8,192.168.0.0/16,172.16.0.0/12"
while ! valid_cidr_list "$HNSENSOR"; do
whiptail_invalid_input
whiptail_homenet_sensor "$HNSENSOR"
done
fi
}
collect_hostname() {
collect_hostname_validate
@@ -346,26 +315,6 @@ collect_idh_preferences() {
if [[ "$idh_preferences" != "" ]]; then IDH_MGTRESTRICT='True'; fi
}
collect_idh_services() {
whiptail_idh_services
case "$IDH_SERVICES" in
'Linux Webserver (NAS Skin)')
IDH_SERVICES='"HTTP","FTP","SSH"'
;;
'MySQL Server')
IDH_SERVICES='"MYSQL","SSH"'
;;
'MSSQL Server')
IDH_SERVICES='"MSSQL","VNC'
;;
'Custom')
whiptail_idh_services_custom
IDH_SERVICES=$(echo "$IDH_SERVICES" | tr '[:blank:]' ',' )
;;
esac
}
collect_int_ip_mask() {
whiptail_management_interface_ip_mask
@@ -425,71 +374,6 @@ collect_net_method() {
fi
}
collect_ntp_servers() {
if whiptail_ntp_ask; then
[[ $is_airgap ]] && ntp_string=""
whiptail_ntp_servers "$ntp_string"
while ! valid_ntp_list "$ntp_string"; do
whiptail_invalid_input
whiptail_ntp_servers "$ntp_string"
done
IFS="," read -r -a ntp_servers <<< "$ntp_string" # Split string on commas into array
else
ntp_servers=()
fi
}
collect_oinkcode() {
whiptail_oinkcode
while ! valid_string "$OINKCODE" "" "128"; do
whiptail_invalid_input
whiptail_oinkcode "$OINKCODE"
done
}
collect_patch_schedule() {
whiptail_patch_schedule
case "$patch_schedule" in
'New Schedule')
whiptail_patch_schedule_select_days
whiptail_patch_schedule_select_hours
collect_patch_schedule_name_new
patch_schedule_os_new
;;
'Import Schedule')
collect_patch_schedule_name_import
;;
'Automatic')
PATCHSCHEDULENAME='auto'
;;
'Manual')
PATCHSCHEDULENAME='manual'
;;
esac
}
collect_patch_schedule_name_new() {
whiptail_patch_name_new_schedule
while ! valid_string "$PATCHSCHEDULENAME"; do
whiptail_invalid_string "schedule name"
whiptail_patch_name_new_schedule "$PATCHSCHEDULENAME"
done
}
collect_patch_schedule_name_import() {
whiptail_patch_schedule_import
while ! valid_string "$PATCHSCHEDULENAME"; do
whiptail_invalid_string "schedule name"
whiptail_patch_schedule_import "$PATCHSCHEDULENAME"
done
}
collect_proxy() {
[[ -n $TESTING ]] && return
local ask=${1:-true}
@@ -649,8 +533,8 @@ configure_minion() {
"log_level_logfile: info"\
"log_file: /opt/so/log/salt/minion" >> "$minion_config"
cp -f ../salt/salt/etc/minion.d/mine_functions.conf.jinja /etc/salt/minion.d/mine_functions.conf
sed -i "s/{{ GLOBALS.main_interface }}/$MNIC/" /etc/salt/minion.d/mine_functions.conf
info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "$MNIC"}}'"
salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="{'host': {'mainint': $MNIC}}"
{
logCmd "systemctl enable salt-minion";
@@ -658,47 +542,6 @@ configure_minion() {
} >> "$setup_log" 2>&1
}
configure_ntp() {
local chrony_conf=/etc/chrony.conf
# Install chrony if it isn't already installed
if ! command -v chronyc &> /dev/null; then
logCmd "dnf -y install chrony"
fi
[[ -f $chrony_conf ]] && mv $chrony_conf "$chrony_conf.bak"
printf '%s\n' "# NTP server list" > $chrony_conf
# Build list of servers
for addr in "${ntp_servers[@]}"; do
echo "server $addr iburst" >> $chrony_conf
done
printf '\n%s\n' "# Config options" >> $chrony_conf
printf '%s\n' \
'driftfile /var/lib/chrony/drift' \
'makestep 1.0 3' \
'rtcsync' \
'logdir /var/log/chrony' >> $chrony_conf
if [[ $is_rpm ]]; then
systemctl enable chronyd
systemctl restart chronyd
else
systemctl enable chrony
systemctl restart chrony
fi
# Tell the chrony daemon to sync time & update the system time
# Since these commands only make a call to chronyd, wait after each command to make sure the changes are made
printf "Syncing chrony time to server: "
chronyc -a 'burst 4/4' && sleep 30
printf "Forcing chrony to update the time: "
chronyc -a makestep && sleep 30
}
checkin_at_boot() {
local minion_config=/etc/salt/minion
@@ -719,7 +562,7 @@ check_requirements() {
req_cores=4
req_nics=2
elif [[ $is_standalone ]]; then
req_mem=24
req_mem=16
req_cores=4
req_nics=2
elif [[ $is_manager ]]; then
@@ -743,7 +586,7 @@ check_requirements() {
req_cores=4
req_nics=1
elif [[ $is_heavynode ]]; then
req_mem=24
req_mem=16
req_cores=4
req_nics=2
elif [[ $is_idh ]]; then
@@ -808,6 +651,17 @@ check_requirements() {
if [[ $total_mem_hr -lt $req_mem ]]; then
whiptail_requirements_error "memory" "${total_mem_hr} GB" "${req_mem} GB"
if [[ $is_standalone || $is_heavynode ]]; then
echo "This install type will fail with less than $req_mem GB of memory. Exiting setup."
exit 0
fi
fi
if [[ $is_standalone || $is_heavynode ]]; then
if [[ $total_mem_hr -gt 15 && $total_mem_hr -lt 24 ]]; then
low_mem=true
else
low_mem=false
fi
fi
}
@@ -1055,16 +909,6 @@ download_elastic_agent_artifacts() {
fi
}
installer_progress_loop() {
local i=0
local msg="${1:-Performing background actions...}"
while true; do
[[ $i -lt 98 ]] && ((i++))
set_progress_str "$i" "$msg" nolog
[[ $i -gt 0 ]] && sleep 5s
done
}
installer_prereq_packages() {
if [[ $is_deb ]]; then
# Print message to stdout so the user knows setup is doing something
@@ -1123,9 +967,7 @@ docker_seed_registry() {
if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then
if [ "$install_type" == 'IMPORT' ]; then
container_list 'so-import'
elif [ "$install_type" == 'HELIXSENSOR' ]; then
container_list 'so-helix'
container_list 'so-import'
else
container_list
fi
@@ -1258,7 +1100,7 @@ generate_ssl() {
# if the install type is a manager then we need to wait for the minion to be ready before trying
# to run the ssl state since we need the minion to sign the certs
if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then
wait_for_salt_minion
(wait_for_salt_minion "$MINION_ID" "5" '/dev/stdout' || fail_setup) 2>&1 | tee -a "$setup_log"
fi
info "Applying SSL state"
logCmd "salt-call state.apply ssl -l info"
@@ -1384,7 +1226,7 @@ ls_heapsize() {
fi
case "$install_type" in
'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE')
'MANAGERSEARCH' | 'HEAVYNODE' | 'STANDALONE')
LS_HEAP_SIZE='1000m'
;;
'EVAL')
@@ -1648,21 +1490,6 @@ network_setup() {
logCmd "sed -i '/\$MNIC/${INTERFACE}/g' /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable"
}
ntp_pillar_entries() {
local pillar_file=$local_salt_dir/pillar/minions/$MINION_ID.sls
if [[ ${#ntp_servers[@]} -gt 0 ]]; then
printf '%s\n'\
"ntp:"\
" servers:" > "$pillar_file"
for addr in "${ntp_servers[@]}"; do
printf '%s\n' " - '$addr'" >> "$pillar_file"
done
fi
}
parse_install_username() {
# parse out the install username so things copy correctly
INSTALLUSERNAME=${SUDO_USER:-${USER}}
@@ -1881,7 +1708,11 @@ drop_install_options() {
echo "INTERFACE=$INTERFACE" >> /opt/so/install.txt
NODETYPE=${install_type^^}
echo "NODETYPE=$NODETYPE" >> /opt/so/install.txt
echo "CORECOUNT=$lb_procs" >> /opt/so/install.txt
if [[ $low_mem == "true" ]]; then
echo "CORECOUNT=1" >> /opt/so/install.txt
else
echo "CORECOUNT=$lb_procs" >> /opt/so/install.txt
fi
echo "LSHOSTNAME=$HOSTNAME" >> /opt/so/install.txt
echo "LSHEAP=$LS_HEAP_SIZE" >> /opt/so/install.txt
echo "CPUCORES=$num_cpu_cores" >> /opt/so/install.txt
@@ -1972,6 +1803,7 @@ securityonion_repo() {
}
repo_sync_local() {
SALTVERSION=$(egrep 'version: [0-9]{4}' ../salt/salt/master.defaults.yaml | sed 's/^.*version: //')
info "Repo Sync"
if [[ $is_supported ]]; then
# Sync the repo from the the SO repo locally.
@@ -2021,7 +1853,7 @@ repo_sync_local() {
curl -fsSL https://repo.securityonion.net/file/so-repo/prod/2.4/so/so.repo | tee /etc/yum.repos.d/so.repo
rpm --import https://repo.saltproject.io/salt/py3/redhat/9/x86_64/SALT-PROJECT-GPG-PUBKEY-2023.pub
dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
curl -fsSL https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/3006.1.repo | tee /etc/yum.repos.d/salt.repo
curl -fsSL "https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/$SALTVERSION.repo" | tee /etc/yum.repos.d/salt.repo
dnf repolist
curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install
else
@@ -2111,11 +1943,6 @@ saltify() {
}
# Run a salt command to generate the minion key
salt_firstcheckin() {
salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput
}
salt_install_module_deps() {
logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/"
logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/"
@@ -2498,10 +2325,6 @@ wait_for_file() {
return 1
}
wait_for_salt_minion() {
retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup
}
verify_setup() {
info "Verifying setup"
set -o pipefail

View File

@@ -91,7 +91,7 @@ fi
# if packages are updated and the box isn't rebooted
if [[ $is_debian ]]; then
update_packages
if [[ -f "/var/run/reboot-required" ]]; then
if [[ -f "/var/run/reboot-required" ]] && [ -z "$test_profile" ]; then
whiptail_debian_reboot_required
reboot
fi
@@ -676,7 +676,11 @@ if ! [[ -f $install_opt_file ]]; then
export MAINIP=$MAINIP
export PATCHSCHEDULENAME=$PATCHSCHEDULENAME
export INTERFACE=$INTERFACE
export CORECOUNT=$lb_procs
if [[ $low_mem == "true" ]]; then
export CORECOUNT=1
else
export CORECOUNT=$lb_procs
fi
export LSHOSTNAME=$HOSTNAME
export LSHEAP=$LS_HEAP_SIZE
export CPUCORES=$num_cpu_cores
@@ -714,6 +718,17 @@ if ! [[ -f $install_opt_file ]]; then
logCmd "salt-call state.apply common.packages"
logCmd "salt-call state.apply common"
# this will apply the salt.minion state first since salt.master includes salt.minion
logCmd "salt-call state.apply salt.master"
# wait here until we get a response from the salt-master since it may have just restarted
# exit setup after 5-6 minutes of trying
check_salt_master_status || fail "Can't access salt master or it is not ready"
# apply the ca state to create the ca and put it in the mine early in the install
# the minion ip will already be in the mine from configure_minion function in so-functions
generate_ca
# this will also call the ssl state since docker requires the intca
# the salt-minion service will need to be up on the manager to sign requests
generate_ssl
logCmd "salt-call state.apply docker"
firewall_generate_templates
set_initial_firewall_policy
@@ -721,8 +736,6 @@ if ! [[ -f $install_opt_file ]]; then
title "Downloading Elastic Agent Artifacts"
download_elastic_agent_artifacts
generate_ca
generate_ssl
logCmd "salt-call state.apply -l info firewall"
# create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf
@@ -759,8 +772,11 @@ if ! [[ -f $install_opt_file ]]; then
info "Restarting SOC to pick up initial user"
logCmd "so-soc-restart"
title "Setting up Elastic Fleet"
logCmd "salt-call state.apply elasticfleet.config"
logCmd "so-elastic-fleet-setup"
logCmd "salt-call state.apply elasticfleet.config"
if ! logCmd so-elastic-fleet-setup; then
error "Failed to run so-elastic-fleet-setup"
fail_setup
fi
if [[ ! $is_import ]]; then
title "Setting up Playbook"
logCmd "so-playbook-reset"
@@ -768,8 +784,6 @@ if ! [[ -f $install_opt_file ]]; then
checkin_at_boot
set_initial_firewall_access
logCmd "salt-call schedule.enable -linfo --local"
systemctl restart salt-master
systemctl restart salt-minion
verify_setup
else
touch /root/accept_changes

View File

@@ -25,7 +25,8 @@ log_has_errors() {
# Ignore salt mast cached public key and minion failed to auth because this is a test
# to see if the salt key had already been accepted.
# Ignore failed to connect to ::1 since we have most curls wrapped in a retry.
# Ignore failed to connect to since we have most curls wrapped in a retry and there are
# multiple mirrors available.
# Ignore perl-Error- since that is the name of a Perl package SO installs.
@@ -35,11 +36,15 @@ log_has_errors() {
# This is ignored for Ubuntu
# Failed to restart snapd.mounts-pre.target: Operation refused, unit snapd.mounts-pre.target
# may be requested by dependency only (it is configured to refuse manual start/stop).
# Command failed with exit code is output during retry loops.
# "remove failed" is caused by a warning generated by upgrade of libwbclient
grep -E "FAILED|Failed|failed|ERROR|Result: False|Error is not recoverable" "$setup_log" | \
grep -vE "The Salt Master has cached the public key for this node" | \
grep -vE "Minion failed to authenticate with the master" | \
grep -vE "Failed to connect to ::1" | \
grep -vE "Failed to connect to " | \
grep -vE "Failed to set locale" | \
grep -vE "perl-Error-" | \
grep -vE "Failed:\s*?[0-9]+" | \
@@ -50,15 +55,20 @@ log_has_errors() {
grep -vE "code: 100" | \
grep -vE "/nsm/rules/sigma*" | \
grep -vE "/nsm/rules/yara*" | \
grep -vE "remove failed" | \
grep -vE "Failed to restart snapd" | \
grep -vE "Login Failed Details" | \
grep -vE "response from daemon: unauthorized" | \
grep -vE "Reading first line of patchfile" | \
grep -vE "Command failed with exit code" | \
grep -vE "Running scope as unit" &> "$error_log"
if [[ $? -eq 0 ]]; then
# This function succeeds (returns 0) if errors are detected
return 0
fi
# No errors found, return 1 (function failed to find errors)
return 1
}
@@ -117,7 +127,10 @@ main() {
echo "WARNING: Failed setup a while ago"
exit_code=1
elif log_has_errors; then
echo "WARNING: Errors detected during setup"
echo "WARNING: Errors detected during setup."
echo "--------- ERRORS ---------"
cat $error_log
echo "--------------------------"
exit_code=1
touch /root/failure
elif using_iso && cron_error_in_mail_spool; then

Binary file not shown.

Binary file not shown.

Binary file not shown.

Some files were not shown because too many files have changed in this diff Show More