Compare commits

..

293 Commits

Author SHA1 Message Date
Mike Reeves c2d43e5d22 Merge pull request #13255 from Security-Onion-Solutions/2.4/dev
2.4.80
2024-06-25 15:28:13 -04:00
Mike Reeves 51bb4837f5 Merge pull request #13259 from Security-Onion-Solutions/TOoSmOotH-patch-5
Update .gitleaks.toml
2024-06-25 14:48:41 -04:00
Mike Reeves caec424e44 Update .gitleaks.toml 2024-06-25 14:47:50 -04:00
Mike Reeves 156176c628 Merge pull request #13256 from Security-Onion-Solutions/fixmain
Fix git
2024-06-25 08:30:19 -04:00
Mike Reeves 81b4c4e2c0 Merge branch '2.4/main' of github.com:Security-Onion-Solutions/securityonion into fixmain 2024-06-25 08:24:27 -04:00
Mike Reeves d4107dc60a Merge pull request #13254 from Security-Onion-Solutions/2.4.80
2.4.80
2024-06-25 08:17:59 -04:00
Mike Reeves d34605a512 Update DOWNLOAD_AND_VERIFY_ISO.md 2024-06-25 08:16:31 -04:00
Mike Reeves af5e7cd72c 2.4.80 2024-06-24 15:41:47 -04:00
Jorge Reyes 93378e92e6 Merge pull request #13253 from Security-Onion-Solutions/kafkaflt
Remove unused sbin_jinja for kafka
2024-06-24 14:18:32 -04:00
reyesj2 81ce762250 delete commented block
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-24 14:06:48 -04:00
reyesj2 cb727bf48d remove unused sbin_jinja from kafka config
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-24 13:45:13 -04:00
Jorge Reyes 9a0bad88cc Merge pull request #13251 from Security-Onion-Solutions/kafkaflt
FIX: update firewall defaults
2024-06-24 12:29:48 -04:00
reyesj2 680e84851b Re-add manager sbin_jinja file recurse
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-24 12:27:52 -04:00
reyesj2 ea771ed21b update firewall
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-24 12:01:01 -04:00
reyesj2 c332cd777c remove import/heavynode artifact caused by kafka cert not existing but being bound in docker. (empty dir created)
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-24 08:50:37 -04:00
Mike Reeves 9fce85c988 Merge pull request #13245 from Security-Onion-Solutions/proxysoup
Fix soup for proxy servers
2024-06-21 16:13:02 -04:00
weslambert 6141c7a849 Merge pull request #13246 from Security-Onion-Solutions/fix/detections_license_none
Add option for detections without a license
2024-06-21 15:59:09 -04:00
weslambert bf91030204 Add option for detections without license 2024-06-21 15:33:11 -04:00
Mike Reeves 9577c3f59d Make soup use reposync from the repo 2024-06-21 15:24:54 -04:00
Mike Reeves 77dedc575e Make soup use reposync from the repo 2024-06-21 15:20:07 -04:00
Mike Reeves 0295b8d658 Make soup use reposync from the repo 2024-06-21 15:11:23 -04:00
Mike Reeves 6a9d78fa7c Make soup use reposync from the repo 2024-06-21 15:10:44 -04:00
Mike Reeves b84521cdd2 Make soup use reposync from the repo 2024-06-21 14:49:16 -04:00
Mike Reeves ff4679ec08 Make soup use reposync from the repo 2024-06-21 14:45:06 -04:00
Mike Reeves c5ce7102e8 Make soup use reposync from the repo 2024-06-21 14:41:27 -04:00
Mike Reeves 70c001e22b Update so-repo-sync 2024-06-21 13:37:36 -04:00
Mike Reeves f1dc22a200 Merge pull request #13244 from Security-Onion-Solutions/TOoSmOotH-patch-4
Update soc_manager.yaml
2024-06-21 12:36:17 -04:00
Mike Reeves aae1b69093 Update soc_manager.yaml 2024-06-21 12:35:01 -04:00
Jorge Reyes 8781419b4a Merge pull request #13242 from Security-Onion-Solutions/annotupd
update kafka annotations
2024-06-20 16:18:40 -04:00
reyesj2 2eea671857 more precise wording in kafka annotation
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-20 16:16:55 -04:00
reyesj2 73acfbf864 update kafka annotations
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-20 16:02:45 -04:00
Doug Burks ae0e994461 Merge pull request #13239 from Security-Onion-Solutions/dougburks-patch-1
Update defaults.yaml to put Process actions in logical order
2024-06-20 10:12:06 -04:00
Doug Burks 07b9011636 Update defaults.yaml to put Process actions in logical order 2024-06-20 10:09:27 -04:00
Matthew Wright bc2b3b7f8f Merge pull request #13236 from Security-Onion-Solutions/mwright/licenseDropdown
Added license presets to defaults.yaml file
2024-06-18 18:05:15 -04:00
unknown ea02a2b868 Added license presets to defaults.yaml file 2024-06-18 16:52:00 -04:00
Jorge Reyes ba3a6cbe87 Merge pull request #13234 from Security-Onion-Solutions/reyesj2-patch-4
update receiver node allowed states
2024-06-18 15:55:32 -04:00
reyesj2 268dcbe00b update receiver node allowed states
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-18 15:44:51 -04:00
Josh Patterson 6be97f13d0 Merge pull request #13233 from Security-Onion-Solutions/minefunc
fix ca mine_function
2024-06-18 13:58:35 -04:00
Jorge Reyes 95d6c93a07 Merge pull request #13231 from Security-Onion-Solutions/kfeval 2024-06-18 13:15:18 -04:00
m0duspwnens a2bb220043 fix x509 mine_function 2024-06-18 12:33:33 -04:00
reyesj2 911d6dcce1 update kafka output policy only on eligible grid types
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-18 12:09:59 -04:00
Doug Burks 5f6a9850eb Merge pull request #13227 from Security-Onion-Solutions/dougburks-patch-1
FEATURE: Add new Process actions #13226
2024-06-18 10:57:52 -04:00
Doug Burks de18bf06c3 FEATURE: Add new Process actions #13226 2024-06-18 10:36:41 -04:00
Jorge Reyes 73473d671d Merge pull request #13222 from Security-Onion-Solutions/reyesj2-patch-3
update profile
2024-06-18 09:16:35 -04:00
Josh Brower 3fbab7c3af Merge pull request #13223 from Security-Onion-Solutions/2.4/timeout
Update defaults
2024-06-18 08:55:30 -04:00
DefensiveDepth 521cccaed6 Update defaults 2024-06-18 08:43:00 -04:00
reyesj2 35da3408dc update profile
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-17 15:53:49 -04:00
Jorge Reyes c03096e806 Merge pull request #13221 from Security-Onion-Solutions/reyesj2/ksoup
suppress fleet policy update in soup
2024-06-17 14:18:34 -04:00
reyesj2 2afc947d6c suppress fleet policy update in soup
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-17 14:12:33 -04:00
Doug Burks 076da649cf Merge pull request #13217 from Security-Onion-Solutions/dougburks-patch-1
FEATURE: Add more links and descriptions to SOC MOTD #13216
2024-06-17 12:18:29 -04:00
Doug Burks 93ced0959c FEATURE: Add more links and descriptions to SOC MOTD #13216 2024-06-17 09:25:01 -04:00
Doug Burks 6f13fa50bf FEATURE: Add more links and descriptions to SOC MOTD #13216 2024-06-17 09:24:32 -04:00
Doug Burks 3bface12e0 FEATURE: Add more links and descriptions to SOC MOTD #13216 2024-06-17 09:23:14 -04:00
Doug Burks b584c8e353 FEATURE: Add more links and descriptions to SOC MOTD #13216 2024-06-17 09:13:17 -04:00
Jason Ertel 6caf87df2d Merge pull request #13209 from Security-Onion-Solutions/kfix
Fix errors on new installs
2024-06-15 05:09:48 -04:00
reyesj2 4d1f2c2bc1 fix kafka elastic fleet output policy setup
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-14 23:04:08 -04:00
reyesj2 0b1175b46c kafka logstash input plugin handle empty brokers list
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-14 23:03:36 -04:00
reyesj2 4e50dabc56 refix typos
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-14 23:03:06 -04:00
Jason Ertel ce45a5926a Merge pull request #13207 from Security-Onion-Solutions/kaffix
Standalone logstash error
2024-06-14 18:01:35 -04:00
Josh Brower c540a4f257 Merge pull request #13208 from Security-Onion-Solutions/2.4/ruletemplates
Update rule templates
2024-06-14 16:01:26 -04:00
DefensiveDepth 7af94c172f Change spelling 2024-06-14 16:00:22 -04:00
DefensiveDepth 7556587e35 Update rule templates 2024-06-14 15:47:57 -04:00
reyesj2 a0030b27e2 add additional retries to elasticfleet scripts
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-14 15:34:40 -04:00
reyesj2 8080e05444 on fresh install kafka nodes pillar may not have populated. Avoiding this by only generating kafka input pipeline when kafka nodes pillar is not empty
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-14 14:17:26 -04:00
Josh Brower af11879545 Merge pull request #13205 from Security-Onion-Solutions/2.4/customsuricatasources
Initial support for custom suricata urls and local rulesets
2024-06-14 13:50:06 -04:00
DefensiveDepth c89f1c9d95 remove multiline 2024-06-14 13:48:55 -04:00
DefensiveDepth b7ac599a42 set to empty 2024-06-14 13:21:36 -04:00
DefensiveDepth 8363877c66 move to custom rules 2024-06-14 12:41:44 -04:00
DefensiveDepth 4bcb4b5b9c removed unneeded import 2024-06-14 09:32:34 -04:00
DefensiveDepth 68302e14b9 add to defaults and tweaks 2024-06-14 09:28:23 -04:00
DefensiveDepth c1abc7a7f1 Update description 2024-06-14 08:51:34 -04:00
DefensiveDepth 484717d57d initial support for custom suricata urls and local rulesets 2024-06-14 08:42:10 -04:00
Jorge Reyes b91c608fcf Merge pull request #13204 from Security-Onion-Solutions/kaffix
Only comment out so-kafka from so-status when it exists & only run en…
2024-06-13 15:54:50 -04:00
reyesj2 8f8ece2b34 Only comment out so-kafka from so-status when it exists & only run ensure_default_pipeline when Kafka is configured
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-13 15:50:34 -04:00
Jorge Reyes 9b5c1c01e9 Merge pull request #13200 from Security-Onion-Solutions/kafka/fix 2024-06-13 12:26:57 -04:00
reyesj2 816a1d446e Generate kafka-logstash cert on standalone,manager,managersearch in addition to searchnodes.
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-13 12:18:13 -04:00
reyesj2 19bfd5beca fix kafka nodeid assignment to increment correctly
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-13 12:16:39 -04:00
Jorge Reyes 9ac7e051b3 Merge pull request #13190 from Security-Onion-Solutions/reyesj2/kafka
Initial Kafka support
2024-06-13 09:42:59 -04:00
reyesj2 80b1d51f76 wrong location for global.pipeline check
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-13 08:50:53 -04:00
Doug Burks 6340ebb36d Merge pull request #13197 from Security-Onion-Solutions/dougburks-patch-1
Update DOWNLOAD_AND_VERIFY_ISO.md
2024-06-12 16:49:21 -04:00
Doug Burks 70721afa51 Update DOWNLOAD_AND_VERIFY_ISO.md 2024-06-12 16:47:26 -04:00
reyesj2 9c31622598 telegraft should only include jolokia config when Kafka is set as the global.pipeline
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-12 15:42:00 -04:00
reyesj2 f372b0907b Use kafka:password for kafka certs
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-12 15:41:10 -04:00
coreyogburn fac96e0b08 Merge pull request #13183 from Security-Onion-Solutions/cogburn/cleanup-config
Fix unnecessary escaping
2024-06-12 11:57:31 -06:00
reyesj2 2bc53f9868 Merge remote-tracking branch 'remotes/origin/2.4/dev' into reyesj2/kafka 2024-06-12 12:36:58 -04:00
reyesj2 e8106befe9 Append '-securityonion' to all Security Onion related Kafka topics. Adjust logstash to ingest all topics ending in '-securityonion' to avoid having to manually list topic names
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-12 12:05:16 -04:00
reyesj2 83412b813f Renamed Kafka pillar
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-12 11:19:25 -04:00
reyesj2 b56d497543 Revert a so-setup change. Kafka is not an installable option
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-12 11:17:06 -04:00
reyesj2 dd40962288 Revert a whiptail menu change. Kafka is not an install option
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-12 11:07:23 -04:00
reyesj2 b7eebad2a5 Update Kafka self reset & add initial Kafka wrapper scripts to build out
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-12 11:01:40 -04:00
Josh Patterson 092f716f12 Merge pull request #13189 from Security-Onion-Solutions/soupmsgq
remove this \n
2024-06-12 10:41:49 -04:00
m0duspwnens c38f48c7f2 remove this \n 2024-06-12 10:34:32 -04:00
Corey Ogburn d5ef0e5744 Fix unnecessary escaping 2024-06-11 12:34:32 -06:00
Josh Brower e90557d7dc Merge pull request #13179 from Security-Onion-Solutions/2.4/fixintegritycheck
Add new bind - suricata all.rules
2024-06-11 13:08:40 -04:00
reyesj2 628893fd5b remove redundant 'kafka_' from annotations & defaults
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-11 11:56:21 -04:00
reyesj2 a81e4c3362 remove dash(-) from kafka.id
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-11 11:55:17 -04:00
reyesj2 ca7b89c308 Added Kafka reset to SOC UI. Incase of changing an active broker to a controller topics may become unavailable. Resolving this would require manual intervention. This option allows running a reset to start from a clean slate to then configure cluster to desired state before reenabling Kafka as global pipeline.
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-11 11:21:13 -04:00
Josh Patterson 03335cc015 Merge pull request #13182 from Security-Onion-Solutions/dockerup
upgrade docker
2024-06-11 11:08:40 -04:00
reyesj2 08557ae287 kafka.id field should only be present when metadata for kafka exists
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-11 11:01:34 -04:00
DefensiveDepth 08d2a6242d Add new bind - suricata all.rules 2024-06-11 10:03:33 -04:00
m0duspwnens 4b481bd405 add epoch to docker for oracle 2024-06-11 09:41:58 -04:00
m0duspwnens 0b1e3b2a7f upgrade docker for focal 2024-06-10 16:24:44 -04:00
m0duspwnens dbd9873450 upgrade docker for jammy 2024-06-10 16:04:11 -04:00
m0duspwnens c6d0a17669 docker upgrade debian 12 2024-06-10 15:43:29 -04:00
m0duspwnens adeab10f6d upgrade docker and containerd.io for oracle 2024-06-10 12:14:27 -04:00
reyesj2 824f852ed7 merge 2.4/dev
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-10 11:26:23 -04:00
reyesj2 284c1be85f Update Kafka controller(s) via SOC UI
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-10 11:08:54 -04:00
Jason Ertel 7ad6baf483 Merge pull request #13171 from Security-Onion-Solutions/jertel/yaml
correct placement of error check override
2024-06-08 08:21:20 -04:00
Jason Ertel f1638faa3a correct placement of error check override 2024-06-08 08:18:34 -04:00
Jason Ertel dea786abfa Merge pull request #13170 from Security-Onion-Solutions/jertel/yaml
gracefully handle missing parent key
2024-06-08 07:49:49 -04:00
Jason Ertel f96b82b112 gracefully handle missing parent key 2024-06-08 07:44:46 -04:00
Josh Patterson 95fe11c6b4 Merge pull request #13162 from Security-Onion-Solutions/soupmsgq
fix elastic templates not loading due to global_override phases
2024-06-07 16:23:03 -04:00
Jason Ertel f2f688b9b8 Update soup 2024-06-07 16:18:09 -04:00
m0duspwnens 0139e18271 additional description 2024-06-07 16:03:21 -04:00
Mike Reeves 657995d744 Merge pull request #13165 from Security-Onion-Solutions/TOoSmOotH-patch-3
Update defaults.yaml
2024-06-07 15:38:01 -04:00
Mike Reeves 4057238185 Update defaults.yaml 2024-06-07 15:33:49 -04:00
coreyogburn fb07ff65c9 Merge pull request #13164 from Security-Onion-Solutions/cogburn/tls-options
AdditionalCA and InsecureSkipVerify
2024-06-07 13:10:45 -06:00
Mike Reeves dbc56ffee7 Update defaults.yaml 2024-06-07 15:09:09 -04:00
Corey Ogburn ee696be51d Remove rootCA and insecureSkipVerify from SOC defaults 2024-06-07 13:07:04 -06:00
Corey Ogburn 5d3fd3d389 AdditionalCA and InsecureSkipVerify
New fields have been added to manager and then duplicated over to SOC's config in the same vein as how proxy was updated earlier this week.

AdditionalCA holds the PEM formatted public keys that should be trusted when making requests. It has been implemented for both Sigma's zip downloads and Sigma and Suricata's repository clones and pulls.

InsecureSkipVerify has been added to help our users troubleshoot their configuration. Setting it to true will not verify the cert on outgoing requests. Self signed, missing, or invalid certs will not throw an error.
2024-06-07 12:47:09 -06:00
Corey Ogburn fa063722e1 RootCA and InsecureSkipVerify
New empty settings and their annotations.
2024-06-07 09:10:14 -06:00
m0duspwnens f5cc35509b fix output alignment 2024-06-07 11:03:26 -04:00
m0duspwnens d39c8fae54 format output 2024-06-07 09:01:16 -04:00
m0duspwnens d3b81babec check for phases with so-yaml, remove if exists 2024-06-06 16:15:21 -04:00
coreyogburn f35f6bd4c8 Merge pull request #13154 from Security-Onion-Solutions/cogburn/soc-proxy
SOC Proxy Setting
2024-06-06 14:03:16 -06:00
Mike Reeves d5cfef94a3 Merge pull request #13156 from Security-Onion-Solutions/TOoSmOotH-patch-3 2024-06-06 16:01:22 -04:00
Mike Reeves f37f5ba97b Update soc_suricata.yaml 2024-06-06 15:57:58 -04:00
Corey Ogburn 42818a9950 Remove proxy from SOC defaults 2024-06-06 13:28:07 -06:00
Corey Ogburn e85c3e5b27 SOC Proxy Setting
The so_proxy value we build during install is now copied to SOC's config.
2024-06-06 11:55:27 -06:00
m0duspwnens a39c88c7b4 add set to troubleshoot failure 2024-06-06 12:56:24 -04:00
m0duspwnens 73ebf5256a Merge remote-tracking branch 'origin/2.4/dev' into soupmsgq 2024-06-06 12:44:45 -04:00
Jason Ertel 6d31cd2a41 Merge pull request #13150 from Security-Onion-Solutions/jertel/yaml
add ability to retrieve yaml values via so-yaml.py; improve so-minion id matching
2024-06-06 12:09:03 -04:00
Jason Ertel 5600fed9c4 add ability to retrieve yaml values via so-yaml.py; improve so-minion id matching 2024-06-06 11:56:07 -04:00
m0duspwnens 6920b77b4a fix msg 2024-06-06 11:00:43 -04:00
m0duspwnens ccd6b3914c add final msg queue for soup. 2024-06-06 10:33:55 -04:00
reyesj2 c4723263a4 Remove unused kafka reactor
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-06 08:59:17 -04:00
reyesj2 4581a46529 Merge remote-tracking branch 'remotes/origin/2.4/dev' into reyesj2/kafka 2024-06-05 20:47:41 -04:00
Josh Patterson 33a2c5dcd8 Merge pull request #13141 from Security-Onion-Solutions/sotcprp
move so-tcpreplay from common state to sensor state
2024-06-05 09:49:39 -04:00
m0duspwnens f6a8a21f94 remove space 2024-06-05 08:58:46 -04:00
m0duspwnens ff5773c837 move so-tcpreplay back to common. return empty string if no sensor.interface pillar 2024-06-05 08:56:32 -04:00
m0duspwnens 66f8084916 Merge remote-tracking branch 'origin/2.4/dev' into sotcprp 2024-06-05 08:32:54 -04:00
m0duspwnens a2467d0418 move so-tcpreplay to sensor state 2024-06-05 08:24:57 -04:00
reyesj2 3b0339a9b3 create kafka.id from kafka {partition}-{offset}-{timestamp} for tracking event
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-04 14:27:52 -04:00
reyesj2 fb1d4fdd3c update license
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-04 12:33:51 -04:00
Josh Patterson 56a16539ae Merge pull request #13134 from Security-Onion-Solutions/sotcprp
so-tcpreplay now runs if manager is offline
2024-06-04 10:43:33 -04:00
m0duspwnens c0b2cf7388 add the curlys 2024-06-04 10:28:21 -04:00
reyesj2 d9c58d9333 update receiver pillar access
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-04 08:33:45 -04:00
Josh Patterson ef3a52468f Merge pull request #13129 from Security-Onion-Solutions/salt3006.8
salt 3006.6
2024-06-03 15:29:19 -04:00
m0duspwnens c88b731793 revert to 3006.6 2024-06-03 15:27:08 -04:00
reyesj2 2e85a28c02 Remove so-kafka-clusterid script, created during soup
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-06-02 18:25:59 -04:00
weslambert 964fef1aab Merge pull request #13117 from Security-Onion-Solutions/fix/items_and_lists
Add templates for .items and .lists indices
2024-05-31 16:34:29 -04:00
reyesj2 1a832fa0a5 Move soup kafka needfuls to up_to_2.4.80
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-31 14:04:46 -04:00
reyesj2 75bdc92bbf Merge remote-tracking branch 'remotes/origin/2.4/dev' into reyesj2/kafka 2024-05-31 14:02:43 -04:00
Wes a8c231ad8c Add component templates 2024-05-31 17:47:01 +00:00
Wes f396247838 Add index templates and lifecycle policies 2024-05-31 17:46:19 +00:00
reyesj2 e3ea4776c7 Update kafka nodes pillar before running highstate with pillarwatch engine. This allows configuring your Kafka controllers before cluster comes up for the first time
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-31 13:34:28 -04:00
coreyogburn 37a928b065 Merge pull request #13107 from Security-Onion-Solutions/cogburn/detection-templates
Added TemplateDetections To Detection ClientParams
2024-05-30 16:26:17 -06:00
Corey Ogburn 85c269e697 Added TemplateDetections To Detection ClientParams
The UI can now insert templates when you select a Detection language. These are those templates, annotated.
2024-05-30 15:59:03 -06:00
m0duspwnens 6e70268ab9 Merge remote-tracking branch 'origin/2.4/dev' into sotcprp 2024-05-30 16:34:37 -04:00
Josh Patterson fb8929ea37 Merge pull request #13103 from Security-Onion-Solutions/salt3006.8
Salt3006.8
2024-05-30 16:32:05 -04:00
weslambert 5d9c0dd8b5 Merge pull request #13101 from Security-Onion-Solutions/fix/separate_suricata
Separate Suricata alerts into a specific data stream
2024-05-30 16:30:55 -04:00
m0duspwnens debf093c54 Merge remote-tracking branch 'origin/2.4/dev' into salt3006.8 2024-05-30 15:58:10 -04:00
reyesj2 00b5a5cc0c Revert "revert version for soup test before 2.4.80 pipeline unpaused"
This reverts commit 48713a4e7b.
2024-05-30 15:13:16 -04:00
reyesj2 dbb99d0367 Remove bad config
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-30 15:10:15 -04:00
m0duspwnens 7702f05756 upgrade salt 3006.8. soup for 2.4.80 2024-05-30 15:00:32 -04:00
Wes 2c635bce62 Set index for Suricata alerts 2024-05-30 17:02:31 +00:00
reyesj2 48713a4e7b revert version for soup test before 2.4.80 pipeline unpaused
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-30 13:00:34 -04:00
Wes e831354401 Add Suricata alerts setting for configuration 2024-05-30 17:00:11 +00:00
Wes 55c5ea5c4c Add template for Suricata alerts 2024-05-30 16:58:56 +00:00
reyesj2 1fd5165079 Merge remote-tracking branch 'origin/2.4/dev' into reyesj2/kafka
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-29 23:37:40 -04:00
reyesj2 949cea95f4 Update pillarWatch config for global.pipeline
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-29 23:19:44 -04:00
Mike Reeves 12762e08ef Merge pull request #13093 from Security-Onion-Solutions/TOoSmOotH-patch-1
Update VERSION
2024-05-29 16:54:31 -04:00
Mike Reeves 62bdb2627a Update VERSION 2024-05-29 16:53:27 -04:00
reyesj2 386be4e746 WIP: Manage Kafka nodes pillar role value
This way when kafka_controllers is updated the pillar value gets updated and any non-controllers get updated to revert to 'broker' only role.
 Needs more testing when a new controller joins in this manner Kafka errors due to cluster metadata being out of sync. One solution is to remove /nsm/kafka/data/__cluster_metadata-0/quorum-state and restart cluster. Alternative is working with Kafka cli tools to inform cluster of new voter, likely best option but requires a wrapper script of some sort to be created for updating cluster in-place.
Easiest option is to have all receivers join grid and then configure Kafka with specific controllers via SOC UI prior to enabling Kafka. This way Kafka cluster comes up in the desired configuration with no need for immediately modifying cluster

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-29 16:48:39 -04:00
reyesj2 d9ec556061 Update some annotations and defaults
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-29 16:41:02 -04:00
reyesj2 876d860488 elastic agent should be able to communicate over 9092 for sending logs to kafka brokers
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-29 16:40:15 -04:00
reyesj2 59097070ef Revert "Remove unneeded jolokia aggregate metrics to reduce data ingested to influx"
This reverts commit 1c1a1a1d3f.
2024-05-28 12:17:43 -04:00
reyesj2 77b5aa4369 Correct dashboard name
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-28 11:34:35 -04:00
reyesj2 0d7c331ff0 only show specific fields when hovering over Kafka influxdb panels
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-28 11:29:38 -04:00
reyesj2 1c1a1a1d3f Remove unneeded jolokia aggregate metrics to reduce data ingested to influx
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-28 11:14:19 -04:00
reyesj2 47efcfd6e2 Add basic Kafka metrics to 'Security Onion Performance' influxdb dashboard
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-28 10:55:11 -04:00
reyesj2 15a0b959aa Add jolokia metrics for influxdb dashboard
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-28 10:51:39 -04:00
reyesj2 fcb6a47e8c Remove redis.sh telegraf script when Kafka is global pipeline
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-26 21:10:41 -04:00
m0duspwnens b5f656ae58 dont render pillar each time so-tcpreplay runs 2024-05-23 13:22:22 -04:00
reyesj2 382cd24a57 Small changes needed for using new Kafka docker image + added Kafka logging output to /opt/so/log/kafka/
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-22 13:39:21 -04:00
reyesj2 b1beb617b3 Logstash should be disabled when Kafka is enabled except when a minion override exists OR node is a standalone
- Standalone subscribes to Kafka topics via logstash for ingest

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-22 13:38:09 -04:00
reyesj2 91f8b1fef7 Set default replication factor back to Kafka default
If replication factor is > 1 Kafka will fail to start until another broker is added
  - For internal automated testing purposes a Standalone will be utilized

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-22 13:35:09 -04:00
reyesj2 2ad87bf1fe merge 2.4/dev
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-08 16:30:45 -04:00
reyesj2 eca2a4a9c8 Logstash consumer threads should match topic partition count
- Default is set to 3. If there are too many consumer threads it may lead to idle logstash worker threads and could require decreasing this value to saturate workers

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-08 16:17:09 -04:00
reyesj2 dff609d829 Add basic read-only metric collection from Kafka
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-08 16:13:09 -04:00
reyesj2 e960ae66a3 Merge remote-tracking branch 'remotes/origin/2.4/dev' into reyesj2/kafka 2024-05-02 15:12:27 -04:00
reyesj2 093cbc5ebc Reconfigure Kafka defaults
- Set default number of partitions per topic -> 3. Helps ensure that out of the box we can take advantage of multi-node Kafka clusters via load balancing across atleast 3 brokers. Also multiple searchnodes will be able to pull from each topic. In this case 3 searchnodes (consumers) would be able to pull from all topics concurrently.
- Set default replication factor -> 2. This is the minimum value required for redundancy. Every partition will have 1 replica. In this case if we have 2 brokers each topic will have 3 partitions (load balanced across brokers) and each partition will have a replica on separate broker for redundancy

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-02 15:10:13 -04:00
reyesj2 f663ef8c16 Setup Kafka to use PKCS12 and remove need for converting to JKS
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-02 14:53:28 -04:00
reyesj2 de9f6425f9 Automatically switch between Kafka output policy and logstash output policy when globals.pipeline changes
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-02 12:13:46 -04:00
reyesj2 47ced60243 Create new Kafka output policy using salt
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-01 14:49:51 -04:00
reyesj2 58ebbfba20 Add kafka state to standalone highstate
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-01 13:03:14 -04:00
reyesj2 e164d15ec6 Generate different Kafka certs for different SO nodetypes
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-01 13:02:47 -04:00
reyesj2 3efdb4e532 Reconfigure logstash Kafka input
- TODO: Configure what topics are pulled to searchnodes via the SOC UI

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-01 13:01:29 -04:00
reyesj2 de0af58cf8 Write out Kafka pillar path
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-01 10:45:46 -04:00
reyesj2 84abfa6881 Remove check for existing value since Kafka pillar is made empty on upgrade
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-01 10:45:05 -04:00
reyesj2 6b60e85a33 Make kafka configuration changes prior to 2.4.70 upgrade
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-01 10:15:26 -04:00
reyesj2 63f3e23e2b soup typo
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-01 09:54:19 -04:00
reyesj2 eb1249618b Update soup for Kafka
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-01 09:27:01 -04:00
reyesj2 cef9bb1487 Dynamically create Kafka topics based on event.module from elastic agent logs eg. zeek-topic. Depends on Kafka brokers having auto.create.topics.enable set to true
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-05-01 09:16:13 -04:00
reyesj2 bb49944b96 Setup elastic fleet rollover from logstash -> kafka output policy
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-30 16:47:40 -04:00
reyesj2 fcc4050f86 Add id to grid-kafka fleet output policy
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-30 12:59:53 -04:00
reyesj2 9c83a52c6d Add Kafka output to elastic-fleet setup. Includes separating topics by event.module with fallback to default-logs if no event.module is specified or doesn't match processors
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-30 12:01:31 -04:00
reyesj2 a6e8b25969 Add Kafka connectivity between manager - > receiver nodes.
Add connectivity to Kafka between other node types that may need to publish to Kafka.

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-29 15:48:57 -04:00
reyesj2 529bc01d69 Add missing configuration for nodes running Kafka broker role only
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-29 14:53:52 -04:00
reyesj2 11055b1d32 Rename kafkapass -> kafka_pass
Run so-kafka-clusterid within nodes.sls state so switchover is consistent

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-29 14:09:09 -04:00
reyesj2 fd9a91420d Use SOC UI to configure list of KRaft (Kafka) controllers for cluster
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-29 11:37:24 -04:00
reyesj2 529c8d7cf2 Remove salt reactor for Kafka
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-29 11:35:46 -04:00
reyesj2 086ebe1a7c Split kafka defaults between broker / controller
Setup config.map.jinja to update broker / controller / combined node types

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-29 09:08:14 -04:00
reyesj2 29c964cca1 Set kafka.nodes state to run first to populate kafka.nodes pillar
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-29 09:04:52 -04:00
reyesj2 36573d6005 Update kafka cert permissions
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-23 16:45:36 -04:00
reyesj2 aa0c589361 Update kafka managed node pillar template to include its process.role
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-23 13:51:12 -04:00
reyesj2 685b80e519 Merge remote-tracking branch 'remotes/origin/kaffytaffy' into reyesj2/kafka 2024-04-22 16:45:59 -04:00
reyesj2 5a401af1fd Update kafka process_x_roles annotation
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-22 16:44:35 -04:00
reyesj2 25d63f7516 Setup kafka reactor for managing kafka controllers globally
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-22 16:42:59 -04:00
m0duspwnens 6c5e0579cf logging changes. ensure salt master has pillarWatch engine 2024-04-19 09:32:32 -04:00
reyesj2 4ac04a1a46 add kafkapass soc annotation
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-18 16:46:36 -04:00
reyesj2 746128e37b update so-kafka-clusterid
This is a temporary script used to setup kafka secret and clusterid needed for kafka to start. This scripts functionality will be replaced by soup/setup scripts

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-18 15:13:29 -04:00
reyesj2 fe81ffaf78 Variables no longer used. Replaced by map file
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-18 15:11:22 -04:00
m0duspwnens 1f6eb9cdc3 match keys better. go through files reverse first found is prio 2024-04-18 13:50:37 -04:00
reyesj2 5cc358de4e Update map files to handle empty kafka:nodes pillar
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-18 11:58:25 -04:00
m0duspwnens 610dd2c08d improve it 2024-04-18 11:11:14 -04:00
m0duspwnens 506bbd314d more comments, better logging 2024-04-18 10:26:10 -04:00
m0duspwnens 4caa6a10b5 watch a pillar in files and take action 2024-04-17 18:09:04 -04:00
reyesj2 665b7197a6 Update Kafka nodeid
Update so-minion to include running kafka.nodes state to ensure nodeid is generated for new brokers

Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-17 17:08:41 -04:00
m0duspwnens 4b79623ce3 watch pillar files for changes and do something 2024-04-16 16:51:35 -04:00
m0duspwnens c4994a208b restart salt minion if a manager and signing policies change 2024-04-15 11:37:21 -04:00
reyesj2 eedea2ca88 Merge remote-tracking branch 'remotes/origin/kaffytaffy' into reyesj2/kafka 2024-04-12 16:24:33 -04:00
reyesj2 de6ea29e3b update default process.role to broker only
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 16:18:53 -04:00
m0duspwnens bb983d4ba2 just broker as default process 2024-04-12 16:16:03 -04:00
m0duspwnens c014508519 need /opt/so/conf/ca/cacerts on receiver for kafka to run 2024-04-12 13:50:25 -04:00
reyesj2 fcfbb1e857 Merge kaffytaffy
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 12:50:56 -04:00
reyesj2 911ee579a9 Typo
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 12:16:20 -04:00
reyesj2 a6ff92b099 Note to remove so-kafka-clusterid. Update soup and setup to generate needed kafka pillar values
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 12:11:18 -04:00
m0duspwnens d73ba7dd3e order kafka pillar assignment 2024-04-12 11:55:26 -04:00
m0duspwnens 04ddcd5c93 add receiver managersearch and standalone to kafka.nodes pillar 2024-04-12 11:52:57 -04:00
reyesj2 af29ae1968 Merge kaffytaffy
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 11:43:46 -04:00
reyesj2 fbd3cff90d Make global.pipeline use GLOBALMERGED value
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-12 11:21:19 -04:00
m0duspwnens 0ed9894b7e create kratos local pillar dirs during setup 2024-04-12 11:19:46 -04:00
m0duspwnens a54a72c269 move kafka_cluster_id to kafka:cluster_id 2024-04-12 11:19:20 -04:00
m0duspwnens f514e5e9bb add kafka to receiver 2024-04-11 16:23:05 -04:00
reyesj2 3955587372 Use global.pipeline for redis / kafka states
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-11 16:20:09 -04:00
reyesj2 6b28dc72e8 Update annotation for global.pipeline
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-11 15:38:33 -04:00
reyesj2 ca7253a589 Run kafka-clusterid script when pillar values are missing
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-11 15:38:03 -04:00
reyesj2 af53dcda1b Remove references to kafkanode
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-11 15:32:00 -04:00
m0duspwnens d3bd56b131 disable logstash and redis if kafka enabled 2024-04-10 14:13:27 -04:00
m0duspwnens e9e61ea2d8 Merge remote-tracking branch 'origin/2.4/dev' into kaffytaffy 2024-04-10 13:14:13 -04:00
m0duspwnens 86b984001d annotations and enable/disable from ui 2024-04-10 10:39:06 -04:00
m0duspwnens fa7f8104c8 Merge remote-tracking branch 'origin/reyesj2/kafka' into kaffytaffy 2024-04-09 11:13:02 -04:00
m0duspwnens bd5fe43285 jinja config files 2024-04-09 11:07:53 -04:00
m0duspwnens d38051e806 fix client and server properties formatting 2024-04-09 10:36:37 -04:00
m0duspwnens daa5342986 items not keys in for loop 2024-04-09 10:22:05 -04:00
m0duspwnens c48436ccbf fix dict update 2024-04-09 10:19:17 -04:00
m0duspwnens 7aa00faa6c fix var 2024-04-09 09:31:54 -04:00
m0duspwnens 6217a7b9a9 add defaults and jijafy kafka config 2024-04-09 09:27:21 -04:00
reyesj2 d67ebabc95 Remove logstash output to kafka pipeline. Add additional topics for searchnodes to ingest and add partition/offset info to event
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-08 16:38:03 -04:00
reyesj2 65274e89d7 Add client_id to logstash pipeline. To identify which searchnode is pulling messages
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-05 15:38:00 -04:00
reyesj2 721e04f793 initial logstash input from kafka over ssl
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-05 13:37:14 -04:00
reyesj2 433309ef1a Generate kafka cluster id if it doesn't exist
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-05 09:35:12 -04:00
reyesj2 735cfb4c29 Autogenerate kafka topics when a message it sent to non-existing topic
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-04 16:45:58 -04:00
reyesj2 6202090836 Merge remote-tracking branch 'origin/kaffytaffy' into reyesj2/kafka 2024-04-04 16:27:06 -04:00
reyesj2 436cbc1f06 Add kafka signing_policy for client/server auth. Add kafka-client cert on manager so manager can interact with kafka using its own cert
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-04 16:21:29 -04:00
reyesj2 40b08d737c Generate kafka keystore on changes to kafka.key
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-04 16:16:53 -04:00
m0duspwnens 4c5b42b898 restart container on server config changes 2024-04-04 15:47:01 -04:00
m0duspwnens 7a6b72ebac add so-kafka to manager for firewall 2024-04-04 15:46:11 -04:00
m0duspwnens 1b8584d4bb allow manager to manager on kafka ports 2024-04-03 15:36:35 -04:00
reyesj2 13105c4ab3 Generate certs for use with elasticfleet kafka output policy
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-03 14:34:07 -04:00
reyesj2 dc27bbb01d Set kafka heap size. To be later configured from SOC
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-03 14:30:52 -04:00
m0duspwnens b863060df1 kafka broker and listener on 0.0.0.0 2024-04-03 11:05:24 -04:00
m0duspwnens 18f95e867f port 9093 for kafka docker 2024-04-03 10:24:53 -04:00
m0duspwnens ed6137a76a allow sensor and searchnode to connect to manager kafka ports 2024-04-03 10:24:10 -04:00
m0duspwnens c3f02a698e add kafka nodes as extra hosts for the container 2024-04-03 10:23:36 -04:00
m0duspwnens db106f8ca1 listen on 0.0.0.0 for CONTROLLER 2024-04-03 10:22:47 -04:00
m0duspwnens 8e47cc73a5 kafka.nodes pillar to lf 2024-04-03 08:54:17 -04:00
m0duspwnens 639bf05081 add so-manager to kafka.nodes pillar 2024-04-03 08:52:26 -04:00
m0duspwnens 4e142e0212 put alphabetical 2024-04-02 16:47:35 -04:00
m0duspwnens c9bf1c86c6 Merge remote-tracking branch 'origin/reyesj2/kafka' into kaffytaffy 2024-04-02 16:40:47 -04:00
reyesj2 82830c8173 Fix typos and fix error related to elasticsearch saltstate being called from logstash state. Logstash will be removed from kafkanodes in future
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 16:37:39 -04:00
reyesj2 7f5741c43b Fix kafka storage setup
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 16:36:22 -04:00
reyesj2 643d4831c1 CRLF -> LF
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 16:35:14 -04:00
reyesj2 b032eed22a Update kafka to use manager docker registry
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 16:34:06 -04:00
reyesj2 1b49c8540e Fix kafka keystore script
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 16:32:15 -04:00
m0duspwnens f7534a0ae3 make manager download so-kafka container 2024-04-02 16:01:12 -04:00
m0duspwnens 780ad9eb10 add kafka to manager nodes 2024-04-02 15:50:25 -04:00
m0duspwnens e25bc8efe4 Merge remote-tracking branch 'origin/reyesj2/kafka' into kaffytaffy 2024-04-02 13:36:47 -04:00
reyesj2 26abe90671 Removed duplicate kafka setup
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-04-02 12:19:46 -04:00
reyesj2 446f1ffdf5 merge 2.4/dev
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2024-03-25 13:55:48 -04:00
reyesj2 8cf29682bb Update to merge in 2.4/dev
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2023-11-29 13:41:23 -05:00
reyesj2 86dc7cc804 Kafka init
Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com>
2023-11-29 13:34:25 -05:00
91 changed files with 2458 additions and 195 deletions
+1 -1
View File
@@ -536,7 +536,7 @@ secretGroup = 4
[allowlist]
description = "global allow lists"
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''']
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''', '''ssl_.*password''']
paths = [
'''gitleaks.toml''',
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
+13 -11
View File
@@ -1,17 +1,17 @@
### 2.4.70-20240529 ISO image released on 2024/05/29
### 2.4.80-20240624 ISO image released on 2024/06/25
### Download and Verify
2.4.70-20240529 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso
2.4.80-20240624 ISO image:
https://download.securityonion.net/file/securityonion/securityonion-2.4.80-20240624.iso
MD5: 8FCCF31C2470D1ABA380AF196B611DEC
SHA1: EE5E8F8C14819E7A1FE423E6920531A97F39600B
SHA256: EF5E781D50D50660F452ADC54FD4911296ECBECED7879FA8E04687337CA89BEC
MD5: 139F9762E926F9CB3C4A9528A3752C31
SHA1: BC6CA2C5F4ABC1A04E83A5CF8FFA6A53B1583CC9
SHA256: 70E90845C84FFA30AD6CF21504634F57C273E7996CA72F7250428DDBAAC5B1BD
Signature for ISO image:
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.80-20240624.iso.sig
Signing key:
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
@@ -25,27 +25,29 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.
Download the signature file for the ISO:
```
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.70-20240529.iso.sig
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.80-20240624.iso.sig
```
Download the ISO image:
```
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.70-20240529.iso
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.80-20240624.iso
```
Verify the downloaded ISO image using the signature file:
```
gpg --verify securityonion-2.4.70-20240529.iso.sig securityonion-2.4.70-20240529.iso
gpg --verify securityonion-2.4.80-20240624.iso.sig securityonion-2.4.80-20240624.iso
```
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
```
gpg: Signature made Wed 29 May 2024 11:40:59 AM EDT using RSA key ID FE507013
gpg: Signature made Mon 24 Jun 2024 02:42:03 PM EDT using RSA key ID FE507013
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
gpg: WARNING: This key is not certified with a trusted signature!
gpg: There is no indication that the signature belongs to the owner.
Primary key fingerprint: C804 A93D 36BE 0C73 3EA1 9644 7C10 60B7 FE50 7013
```
If it fails to verify, try downloading again. If it still fails to verify, try downloading from another computer or another network.
Once you've verified the ISO image, you're ready to proceed to our Installation guide:
https://docs.securityonion.net/en/2.4/installation.html
+1 -1
View File
@@ -1 +1 @@
2.4.70
2.4.80
@@ -19,4 +19,4 @@ role:
receiver:
standalone:
searchnode:
sensor:
sensor:
+2
View File
@@ -0,0 +1,2 @@
kafka:
nodes:
+11
View File
@@ -61,6 +61,9 @@ base:
- backup.adv_backup
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
- stig.soc_stig
'*_sensor':
@@ -176,6 +179,9 @@ base:
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- stig.soc_stig
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
'*_heavynode':
- elasticsearch.auth
@@ -220,6 +226,7 @@ base:
- minions.adv_{{ grains.id }}
- stig.soc_stig
- soc.license
- kafka.nodes
'*_receiver':
- logstash.nodes
@@ -232,6 +239,10 @@ base:
- redis.adv_redis
- minions.{{ grains.id }}
- minions.adv_{{ grains.id }}
- kafka.nodes
- kafka.soc_kafka
- kafka.adv_kafka
- soc.license
'*_import':
- secrets
+9 -5
View File
@@ -15,12 +15,16 @@ TARGET_DIR=${1:-.}
PATH=$PATH:/usr/local/bin
if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then
echo "Missing dependencies. Consider running the following command:"
echo " python -m pip install flake8 pytest pytest-cov"
if [ ! -d .venv ]; then
python -m venv .venv
fi
source .venv/bin/activate
if ! pip install flake8 pytest pytest-cov pyyaml; then
echo "Unable to install dependencies."
exit 1
fi
pip install pytest pytest-cov
flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini"
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"
python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR"
+10 -4
View File
@@ -103,7 +103,8 @@
'utility',
'schedule',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-managersearch': [
'salt.master',
@@ -125,7 +126,8 @@
'utility',
'schedule',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-searchnode': [
'ssl',
@@ -159,7 +161,8 @@
'schedule',
'tcpreplay',
'docker_clean',
'stig'
'stig',
'kafka'
],
'so-sensor': [
'ssl',
@@ -190,7 +193,10 @@
'telegraf',
'firewall',
'schedule',
'docker_clean'
'docker_clean',
'kafka',
'elasticsearch.ca',
'stig'
],
'so-desktop': [
'ssl',
+14 -3
View File
@@ -1,6 +1,3 @@
mine_functions:
x509.get_pem_entries: [/etc/pki/ca.crt]
x509_signing_policies:
filebeat:
- minions: '*'
@@ -70,3 +67,17 @@ x509_signing_policies:
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 820
- copypath: /etc/pki/issued_certs/
kafka:
- minions: '*'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "digitalSignature, keyEncipherment"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- extendedKeyUsage: "serverAuth, clientAuth"
- days_valid: 820
- copypath: /etc/pki/issued_certs/
+13
View File
@@ -57,6 +57,12 @@ copy_so-yaml_manager_tools_sbin:
- force: True
- preserve: True
copy_so-repo-sync_manager_tools_sbin:
file.copy:
- name: /opt/so/saltstack/default/salt/manager/tools/sbin/so-repo-sync
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
- preserve: True
# This section is used to put the new script in place so that it can be called during soup.
# It is faster than calling the states that normally manage them to put them in place.
copy_so-common_sbin:
@@ -94,6 +100,13 @@ copy_so-yaml_sbin:
- force: True
- preserve: True
copy_so-repo-sync_sbin:
file.copy:
- name: /usr/sbin/so-repo-sync
- source: {{UPDATE_DIR}}/salt/manager/tools/sbin/so-repo-sync
- force: True
- preserve: True
{% else %}
fix_23_soup_sbin:
cmd.run:
+5
View File
@@ -31,6 +31,11 @@ if ! echo "$PATH" | grep -q "/usr/sbin"; then
export PATH="$PATH:/usr/sbin"
fi
# See if a proxy is set. If so use it.
if [ -f /etc/profile.d/so-proxy.sh ]; then
. /etc/profile.d/so-proxy.sh
fi
# Define a banner to separate sections
banner="========================================================================="
+2 -1
View File
@@ -50,6 +50,7 @@ container_list() {
"so-idh"
"so-idstools"
"so-influxdb"
"so-kafka"
"so-kibana"
"so-kratos"
"so-logstash"
@@ -64,7 +65,7 @@ container_list() {
"so-strelka-manager"
"so-suricata"
"so-telegraf"
"so-zeek"
"so-zeek"
)
else
TRUSTED_CONTAINERS=(
@@ -10,7 +10,7 @@
. /usr/sbin/so-common
. /usr/sbin/so-image-common
REPLAYIFACE=${REPLAYIFACE:-$(lookup_pillar interface sensor)}
REPLAYIFACE=${REPLAYIFACE:-"{{salt['pillar.get']('sensor:interface', '')}}"}
REPLAYSPEED=${REPLAYSPEED:-10}
mkdir -p /opt/so/samples
+9
View File
@@ -187,3 +187,12 @@ docker:
custom_bind_mounts: []
extra_hosts: []
extra_env: []
'so-kafka':
final_octet: 88
port_bindings:
- 0.0.0.0:9092:9092
- 0.0.0.0:9093:9093
- 0.0.0.0:8778:8778
custom_bind_mounts: []
extra_hosts: []
extra_env: []
+16 -16
View File
@@ -20,30 +20,30 @@ dockergroup:
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.6.21-1
- docker-ce: 5:24.0.3-1~debian.12~bookworm
- docker-ce-cli: 5:24.0.3-1~debian.12~bookworm
- docker-ce-rootless-extras: 5:24.0.3-1~debian.12~bookworm
- containerd.io: 1.6.33-1
- docker-ce: 5:26.1.4-1~debian.12~bookworm
- docker-ce-cli: 5:26.1.4-1~debian.12~bookworm
- docker-ce-rootless-extras: 5:26.1.4-1~debian.12~bookworm
- hold: True
- update_holds: True
{% elif grains.oscodename == 'jammy' %}
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.6.21-1
- docker-ce: 5:24.0.2-1~ubuntu.22.04~jammy
- docker-ce-cli: 5:24.0.2-1~ubuntu.22.04~jammy
- docker-ce-rootless-extras: 5:24.0.2-1~ubuntu.22.04~jammy
- containerd.io: 1.6.33-1
- docker-ce: 5:26.1.4-1~ubuntu.22.04~jammy
- docker-ce-cli: 5:26.1.4-1~ubuntu.22.04~jammy
- docker-ce-rootless-extras: 5:26.1.4-1~ubuntu.22.04~jammy
- hold: True
- update_holds: True
{% else %}
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.4.9-1
- docker-ce: 5:20.10.8~3-0~ubuntu-focal
- docker-ce-cli: 5:20.10.5~3-0~ubuntu-focal
- docker-ce-rootless-extras: 5:20.10.5~3-0~ubuntu-focal
- containerd.io: 1.6.33-1
- docker-ce: 5:26.1.4-1~ubuntu.20.04~focal
- docker-ce-cli: 5:26.1.4-1~ubuntu.20.04~focal
- docker-ce-rootless-extras: 5:26.1.4-1~ubuntu.20.04~focal
- hold: True
- update_holds: True
{% endif %}
@@ -51,10 +51,10 @@ dockerheldpackages:
dockerheldpackages:
pkg.installed:
- pkgs:
- containerd.io: 1.6.21-3.1.el9
- docker-ce: 24.0.4-1.el9
- docker-ce-cli: 24.0.4-1.el9
- docker-ce-rootless-extras: 24.0.4-1.el9
- containerd.io: 1.6.33-3.1.el9
- docker-ce: 3:26.1.4-1.el9
- docker-ce-cli: 1:26.1.4-1.el9
- docker-ce-rootless-extras: 26.1.4-1.el9
- hold: True
- update_holds: True
{% endif %}
+1
View File
@@ -101,3 +101,4 @@ docker:
multiline: True
forcedType: "[]string"
so-zeek: *dockerOptions
so-kafka: *dockerOptions
+12 -4
View File
@@ -27,7 +27,9 @@ wait_for_elasticsearch_elasticfleet:
so-elastic-fleet-auto-configure-logstash-outputs:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update
- retry: True
- retry:
attempts: 4
interval: 30
{% endif %}
# If enabled, automatically update Fleet Server URLs & ES Connection
@@ -35,7 +37,9 @@ so-elastic-fleet-auto-configure-logstash-outputs:
so-elastic-fleet-auto-configure-server-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-urls-update
- retry: True
- retry:
attempts: 4
interval: 30
{% endif %}
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
@@ -43,12 +47,16 @@ so-elastic-fleet-auto-configure-server-urls:
so-elastic-fleet-auto-configure-elasticsearch-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-es-url-update
- retry: True
- retry:
attempts: 4
interval: 30
so-elastic-fleet-auto-configure-artifact-urls:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
- retry: True
- retry:
attempts: 4
interval: 30
{% endif %}
@@ -19,7 +19,7 @@ NUM_RUNNING=$(pgrep -cf "/bin/bash /sbin/so-elastic-agent-gen-installers")
for i in {1..30}
do
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys?perPage=100" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',')
if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi
done
@@ -21,64 +21,104 @@ function update_logstash_outputs() {
# Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
}
function update_kafka_outputs() {
# Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup
SSL_CONFIG=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" | jq -r '.item.ssl')
# Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
JSON_STRING=$(jq -n \
--arg UPDATEDLIST "$NEW_LIST_JSON" \
--argjson SSL_CONFIG "$SSL_CONFIG" \
'{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}')
# Update Kafka outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
}
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
printf "Failed to query for current Logstash Outputs..."
exit 1
fi
{% if GLOBALS.pipeline == "KAFKA" %}
# Get current list of Kafka Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_kafka')
# Get the current list of Logstash outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_kafka" ]; then
printf "Failed to query for current Kafka Outputs..."
exit 1
fi
declare -a NEW_LIST=()
# Get the current list of kafka outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=()
# Query for the current Grid Nodes that are running kafka
KAFKANODES=$(salt-call --out=json pillar.get kafka:nodes | jq '.local')
# Query for Kafka nodes with Broker role and add hostname to list
while IFS= read -r line; do
NEW_LIST+=("$line")
done < <(jq -r 'to_entries | .[] | select(.value.role | contains("broker")) | .key + ":9092"' <<< $KAFKANODES)
{# If global pipeline isn't set to KAFKA then assume default of REDIS / logstash #}
{% else %}
# Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
printf "Failed to query for current Logstash Outputs..."
exit 1
fi
# Get the current list of Logstash outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
declare -a NEW_LIST=()
{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
{% if ELASTICFLEETMERGED.enable_manager_output %}
# Create array & add initial elements
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
else
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
fi
{% endif %}
# Query for FQDN entries & add them to the list
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
do
NEW_LIST+=("$CUSTOMNAME:5055")
done
{% endif %}
# Query for the current Grid Nodes that are running Logstash
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
# Query for Receiver Nodes & add them to the list
if grep -q "receiver" <<< $LOGSTASHNODES; then
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${RECEIVERNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Query for Fleet Nodes & add them to the list
if grep -q "fleet" <<< $LOGSTASHNODES; then
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${FLEETNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
{# If we select to not send to manager via SOC, then omit the code that adds manager to NEW_LIST #}
{% if ELASTICFLEETMERGED.enable_manager_output %}
# Create array & add initial elements
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
NEW_LIST+=("{{ GLOBALS.url_base }}:5055")
else
NEW_LIST+=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.hostname }}:5055")
fi
{% endif %}
# Query for FQDN entries & add them to the list
{% if ELASTICFLEETMERGED.config.server.custom_fqdn | length > 0 %}
CUSTOMFQDNLIST=('{{ ELASTICFLEETMERGED.config.server.custom_fqdn | join(' ') }}')
readarray -t -d ' ' CUSTOMFQDN < <(printf '%s' "$CUSTOMFQDNLIST")
for CUSTOMNAME in "${CUSTOMFQDN[@]}"
do
NEW_LIST+=("$CUSTOMNAME:5055")
done
{% endif %}
# Query for the current Grid Nodes that are running Logstash
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
# Query for Receiver Nodes & add them to the list
if grep -q "receiver" <<< $LOGSTASHNODES; then
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${RECEIVERNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Query for Fleet Nodes & add them to the list
if grep -q "fleet" <<< $LOGSTASHNODES; then
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${FLEETNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Sort & hash the new list of Logstash Outputs
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
@@ -87,9 +127,28 @@ NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
printf "\nHashes match - no update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
# Since output can be KAFKA or LOGSTASH, we need to check if the policy set as default matches the value set in GLOBALS.pipeline and update if needed
printf "Checking if the correct output policy is set as default\n"
OUTPUT_DEFAULT=$(jq -r '.item.is_default' <<< $RAW_JSON)
OUTPUT_DEFAULT_MONITORING=$(jq -r '.item.is_default_monitoring' <<< $RAW_JSON)
if [[ "$OUTPUT_DEFAULT" = "false" || "$OUTPUT_DEFAULT_MONITORING" = "false" ]]; then
printf "Default output policy needs to be updated.\n"
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
update_kafka_outputs
{%- else %}
update_logstash_outputs
{%- endif %}
else
printf "Default output policy is set - no update needed.\n"
fi
exit 0
else
printf "\nHashes don't match - update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
{%- if GLOBALS.pipeline == "KAFKA" and 'gmd' in salt['pillar.get']('features', []) %}
update_kafka_outputs
{%- else %}
update_logstash_outputs
{%- endif %}
fi
@@ -77,6 +77,11 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl
printf "\n\n"
{%- endif %}
printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n"
{% if grains.role not in ['so-import', 'so-eval'] %}
/usr/sbin/so-kafka-fleet-output-policy
{% endif %}
# Add Manager Hostname & URL Base to Fleet Host URLs
printf "\nAdd SO-Manager Fleet URL\n"
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
@@ -0,0 +1,52 @@
#!/bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% if GLOBALS.role in ['so-manager', 'so-standalone', 'so-managersearch'] %}
. /usr/sbin/so-common
# Check to make sure that Kibana API is up & ready
RETURN_CODE=0
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
RETURN_CODE=$?
if [[ "$RETURN_CODE" != "0" ]]; then
printf "Kibana API not accessible, can't setup Elastic Fleet output policy for Kafka..."
exit 1
fi
output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
if ! echo "$output" | grep -q "so-manager_kafka"; then
KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt)
KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key)
KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
KAFKA_OUTPUT_VERSION="2.6.0"
JSON_STRING=$( jq -n \
--arg KAFKACRT "$KAFKACRT" \
--arg KAFKAKEY "$KAFKAKEY" \
--arg KAFKACA "$KAFKACA" \
--arg MANAGER_IP "{{ GLOBALS.manager_ip }}:9092" \
--arg KAFKA_OUTPUT_VERSION "$KAFKA_OUTPUT_VERSION" \
'{ "name": "grid-kafka", "id": "so-manager_kafka", "type": "kafka", "hosts": [ $MANAGER_IP ], "is_default": false, "is_default_monitoring": false, "config_yaml": "", "ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }, "proxy_id": null, "client_id": "Elastic", "version": $KAFKA_OUTPUT_VERSION, "compression": "none", "auth_type": "ssl", "partition": "round_robin", "round_robin": { "group_events": 1 }, "topics":[{"topic":"%{[event.module]}-securityonion","when":{"type":"regexp","condition":"event.module:.+"}},{"topic":"default-securityonion"}], "headers": [ { "key": "", "value": "" } ], "timeout": 30, "broker_timeout": 30, "required_acks": 1 }'
)
curl -sK /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" -o /dev/null
refresh_output=$(curl -sK /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs" | jq -r .items[].id)
if ! echo "$refresh_output" | grep -q "so-manager_kafka"; then
echo -e "\nFailed to setup Elastic Fleet output policy for Kafka...\n"
exit 1
elif echo "$refresh_output" | grep -q "so-manager_kafka"; then
echo -e "\nSuccessfully setup Elastic Fleet output policy for Kafka...\n"
fi
elif echo "$output" | grep -q "so-manager_kafka"; then
echo -e "\nElastic Fleet output policy for Kafka already exists...\n"
fi
{% else %}
echo -e "\nNo update required...\n"
{% endif %}
+1 -1
View File
@@ -4,7 +4,7 @@
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% if sls.split('.')[0] in allowed_states or sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
# Move our new CA over so Elastic and Logstash can use SSL with the internal CA
+183
View File
@@ -170,6 +170,78 @@ elasticsearch:
set_priority:
priority: 50
min_age: 30d
so-items:
index_sorting: false
index_template:
composed_of:
- so-items-mappings
index_patterns:
- .items-default-**
priority: 500
template:
mappings:
date_detection: false
settings:
index:
lifecycle:
name: so-items-logs
rollover_alias: ".items-default"
routing:
allocation:
include:
_tier_preference: "data_content"
mapping:
total_fields:
limit: 10000
number_of_replicas: 0
number_of_shards: 1
refresh_interval: 30s
sort:
field: '@timestamp'
order: desc
policy:
phases:
hot:
actions:
rollover:
max_size: 50gb
min_age: 0ms
so-lists:
index_sorting: false
index_template:
composed_of:
- so-lists-mappings
index_patterns:
- .lists-default-**
priority: 500
template:
mappings:
date_detection: false
settings:
index:
lifecycle:
name: so-lists-logs
rollover_alias: ".lists-default"
routing:
allocation:
include:
_tier_preference: "data_content"
mapping:
total_fields:
limit: 10000
number_of_replicas: 0
number_of_shards: 1
refresh_interval: 30s
sort:
field: '@timestamp'
order: desc
policy:
phases:
hot:
actions:
rollover:
max_size: 50gb
min_age: 0ms
so-case:
index_sorting: false
index_template:
@@ -11088,6 +11160,117 @@ elasticsearch:
set_priority:
priority: 50
min_age: 30d
so-suricata_x_alerts:
index_sorting: false
index_template:
composed_of:
- agent-mappings
- dtc-agent-mappings
- base-mappings
- dtc-base-mappings
- client-mappings
- dtc-client-mappings
- cloud-mappings
- container-mappings
- data_stream-mappings
- destination-mappings
- dtc-destination-mappings
- pb-override-destination-mappings
- dll-mappings
- dns-mappings
- dtc-dns-mappings
- ecs-mappings
- dtc-ecs-mappings
- error-mappings
- event-mappings
- dtc-event-mappings
- file-mappings
- dtc-file-mappings
- group-mappings
- host-mappings
- dtc-host-mappings
- http-mappings
- dtc-http-mappings
- log-mappings
- network-mappings
- dtc-network-mappings
- observer-mappings
- dtc-observer-mappings
- orchestrator-mappings
- organization-mappings
- package-mappings
- process-mappings
- dtc-process-mappings
- registry-mappings
- related-mappings
- rule-mappings
- dtc-rule-mappings
- server-mappings
- service-mappings
- dtc-service-mappings
- source-mappings
- dtc-source-mappings
- pb-override-source-mappings
- suricata-mappings
- threat-mappings
- tls-mappings
- tracing-mappings
- url-mappings
- user_agent-mappings
- dtc-user_agent-mappings
- vulnerability-mappings
- common-settings
- common-dynamic-mappings
data_stream: {}
index_patterns:
- logs-suricata.alerts-*
priority: 500
template:
mappings:
date_detection: false
dynamic_templates:
- strings_as_keyword:
mapping:
ignore_above: 1024
type: keyword
match_mapping_type: string
settings:
index:
lifecycle:
name: so-suricata.alerts-logs
mapping:
total_fields:
limit: 5000
number_of_replicas: 0
number_of_shards: 1
refresh_interval: 30s
sort:
field: '@timestamp'
order: desc
policy:
phases:
cold:
actions:
set_priority:
priority: 0
min_age: 60d
delete:
actions:
delete: {}
min_age: 365d
hot:
actions:
rollover:
max_age: 1d
max_primary_shard_size: 50gb
set_priority:
priority: 100
min_age: 0ms
warm:
actions:
set_priority:
priority: 50
min_age: 30d
so-syslog:
index_sorting: false
index_template:
@@ -84,6 +84,7 @@
{ "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } },
{ "set": { "if": "ctx.event?.module == 'fim'", "override": true, "field": "event.module", "value": "file_integrity" } },
{ "rename": { "if": "ctx.winlog?.provider_name == 'Microsoft-Windows-Windows Defender'", "ignore_missing": true, "field": "winlog.event_data.Threat Name", "target_field": "winlog.event_data.threat_name" } },
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } }
],
"on_failure": [
@@ -1,6 +1,7 @@
{
"description" : "suricata.alert",
"processors" : [
{ "set": { "field": "_index", "value": "logs-suricata.alerts-so" } },
{ "set": { "field": "tags","value": "alert" }},
{ "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } },
{ "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } },
@@ -521,6 +521,7 @@ elasticsearch:
so-endgame: *indexSettings
so-idh: *indexSettings
so-suricata: *indexSettings
so-suricata_x_alerts: *indexSettings
so-import: *indexSettings
so-kratos: *indexSettings
so-kismet: *indexSettings
@@ -0,0 +1,112 @@
{
"template": {
"mappings": {
"dynamic": "strict",
"properties": {
"binary": {
"type": "binary"
},
"boolean": {
"type": "boolean"
},
"byte": {
"type": "byte"
},
"created_at": {
"type": "date"
},
"created_by": {
"type": "keyword"
},
"date": {
"type": "date"
},
"date_nanos": {
"type": "date_nanos"
},
"date_range": {
"type": "date_range"
},
"deserializer": {
"type": "keyword"
},
"double": {
"type": "double"
},
"double_range": {
"type": "double_range"
},
"float": {
"type": "float"
},
"float_range": {
"type": "float_range"
},
"geo_point": {
"type": "geo_point"
},
"geo_shape": {
"type": "geo_shape"
},
"half_float": {
"type": "half_float"
},
"integer": {
"type": "integer"
},
"integer_range": {
"type": "integer_range"
},
"ip": {
"type": "ip"
},
"ip_range": {
"type": "ip_range"
},
"keyword": {
"type": "keyword"
},
"list_id": {
"type": "keyword"
},
"long": {
"type": "long"
},
"long_range": {
"type": "long_range"
},
"meta": {
"type": "object",
"enabled": false
},
"serializer": {
"type": "keyword"
},
"shape": {
"type": "shape"
},
"short": {
"type": "short"
},
"text": {
"type": "text"
},
"tie_breaker_id": {
"type": "keyword"
},
"updated_at": {
"type": "date"
},
"updated_by": {
"type": "keyword"
}
}
},
"aliases": {}
},
"version": 2,
"_meta": {
"managed": true,
"description": "default mappings for the .items index template installed by Kibana/Security"
}
}
@@ -0,0 +1,55 @@
{
"template": {
"mappings": {
"dynamic": "strict",
"properties": {
"created_at": {
"type": "date"
},
"created_by": {
"type": "keyword"
},
"description": {
"type": "keyword"
},
"deserializer": {
"type": "keyword"
},
"immutable": {
"type": "boolean"
},
"meta": {
"type": "object",
"enabled": false
},
"name": {
"type": "keyword"
},
"serializer": {
"type": "keyword"
},
"tie_breaker_id": {
"type": "keyword"
},
"type": {
"type": "keyword"
},
"updated_at": {
"type": "date"
},
"updated_by": {
"type": "keyword"
},
"version": {
"type": "keyword"
}
}
},
"aliases": {}
},
"version": 2,
"_meta": {
"managed": true,
"description": "default mappings for the .lists index template installed by Kibana/Security"
}
}
+2
View File
@@ -27,6 +27,7 @@
'so-elastic-fleet',
'so-elastic-fleet-package-registry',
'so-influxdb',
'so-kafka',
'so-kibana',
'so-kratos',
'so-logstash',
@@ -80,6 +81,7 @@
{% set NODE_CONTAINERS = [
'so-logstash',
'so-redis',
'so-kafka'
] %}
{% elif GLOBALS.role == 'so-idh' %}
+31 -8
View File
@@ -90,12 +90,20 @@ firewall:
tcp:
- 8086
udp: []
kafka_controller:
tcp:
- 9093
udp: []
kafka_data:
tcp:
- 9092
udp: []
kibana:
tcp:
- 5601
udp: []
localrules:
tcp:
tcp:
- 7788
udp: []
nginx:
@@ -753,7 +761,6 @@ firewall:
- beats_5044
- beats_5644
- beats_5056
- redis
- elasticsearch_node
- elastic_agent_control
- elastic_agent_data
@@ -1267,35 +1274,51 @@ firewall:
chain:
DOCKER-USER:
hostgroups:
desktop:
portgroups:
- elastic_agent_data
fleet:
portgroups:
- beats_5056
- elastic_agent_data
idh:
portgroups:
- elastic_agent_data
sensor:
portgroups:
- beats_5044
- beats_5644
- elastic_agent_data
searchnode:
portgroups:
- redis
- beats_5644
- elastic_agent_data
standalone:
portgroups:
- redis
- elastic_agent_data
manager:
portgroups:
- elastic_agent_data
managersearch:
portgroups:
- redis
- beats_5644
- elastic_agent_data
self:
portgroups:
- redis
- beats_5644
- elastic_agent_data
beats_endpoint:
portgroups:
- beats_5044
beats_endpoint_ssl:
portgroups:
- beats_5644
elastic_agent_endpoint:
portgroups:
- elastic_agent_data
endgame:
portgroups:
- endgame
receiver:
portgroups: []
customhostgroup0:
portgroups: []
customhostgroup1:
+24
View File
@@ -18,4 +18,28 @@
{% endfor %}
{% endif %}
{# Only add Kafka firewall items when Kafka enabled #}
{% set role = GLOBALS.role.split('-')[1] %}
{% if GLOBALS.pipeline == 'KAFKA' and role in ['manager', 'managersearch', 'standalone'] %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[role].portgroups.append('kafka_controller') %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
{% endif %}
{% if GLOBALS.pipeline == 'KAFKA' and role == 'receiver' %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.self.portgroups.append('kafka_controller') %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.standalone.portgroups.append('kafka_controller') %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.manager.portgroups.append('kafka_controller') %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.managersearch.portgroups.append('kafka_controller') %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups.receiver.portgroups.append('kafka_controller') %}
{% endif %}
{% if GLOBALS.pipeline == 'KAFKA' and role in ['manager', 'managersearch', 'standalone', 'receiver'] %}
{% for r in ['manager', 'managersearch', 'standalone', 'receiver', 'fleet', 'idh', 'sensor', 'searchnode','heavynode', 'elastic_agent_endpoint', 'desktop'] %}
{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %}
{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('kafka_data') %}
{% endif %}
{% endfor %}
{% endif %}
{% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %}
+3 -1
View File
@@ -120,6 +120,9 @@ firewall:
influxdb:
tcp: *tcpsettings
udp: *udpsettings
kafka:
tcp: *tcpsettings
udp: *udpsettings
kibana:
tcp: *tcpsettings
udp: *udpsettings
@@ -939,7 +942,6 @@ firewall:
portgroups: *portgroupshost
customhostgroup9:
portgroups: *portgroupshost
idh:
chain:
DOCKER-USER:
+2 -1
View File
@@ -1,2 +1,3 @@
global:
pcapengine: STENO
pcapengine: STENO
pipeline: REDIS
+3 -2
View File
@@ -36,9 +36,10 @@ global:
global: True
advanced: True
pipeline:
description: Sets which pipeline technology for events to use. Currently only Redis is supported.
description: Sets which pipeline technology for events to use. Currently only Redis is fully supported. Kafka is experimental and requires a Security Onion Pro license.
regex: ^(REDIS|KAFKA)$
regexFailureMessage: You must enter either REDIS or KAFKA.
global: True
readonly: True
advanced: True
repo_host:
description: Specify the host where operating system packages will be served from.
+13
View File
@@ -33,6 +33,19 @@ idstools_sbin_jinja:
- file_mode: 755
- template: jinja
suricatacustomdirsfile:
file.directory:
- name: /nsm/rules/detect-suricata/custom_file
- user: 939
- group: 939
- makedirs: True
suricatacustomdirsurl:
file.directory:
- name: /nsm/rules/detect-suricata/custom_temp
- user: 939
- group: 939
{% else %}
{{sls}}_state_not_allowed:
+12 -6
View File
@@ -1,6 +1,8 @@
{%- from 'vars/globals.map.jinja' import GLOBALS -%}
{%- from 'idstools/map.jinja' import IDSTOOLSMERGED -%}
{%- from 'soc/merged.map.jinja' import SOCMERGED -%}
--suricata-version=6.0
--merged=/opt/so/rules/nids/suri/all.rules
--output=/nsm/rules/detect-suricata/custom_temp
--local=/opt/so/rules/nids/suri/local.rules
{%- if GLOBALS.md_engine == "SURICATA" %}
--local=/opt/so/rules/nids/suri/extraction.rules
@@ -10,8 +12,12 @@
--disable=/opt/so/idstools/etc/disable.conf
--enable=/opt/so/idstools/etc/enable.conf
--modify=/opt/so/idstools/etc/modify.conf
{%- if IDSTOOLSMERGED.config.urls | length > 0 %}
{%- for URL in IDSTOOLSMERGED.config.urls %}
--url={{ URL }}
{%- endfor %}
{%- endif %}
{%- if SOCMERGED.config.server.modules.suricataengine.customRulesets %}
{%- for ruleset in SOCMERGED.config.server.modules.suricataengine.customRulesets %}
{%- if 'url' in ruleset %}
--url={{ ruleset.url }}
{%- elif 'file' in ruleset %}
--local={{ ruleset.file }}
{%- endif %}
{%- endfor %}
{%- endif %}
@@ -26,8 +26,6 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force
{%- elif IDSTOOLSMERGED.config.ruleset == 'ETPRO' %}
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --etpro={{ IDSTOOLSMERGED.config.oinkcode }}
{%- elif IDSTOOLSMERGED.config.ruleset == 'TALOS' %}
docker exec so-idstools idstools-rulecat -v --suricata-version 6.0 -o /nsm/rules/suricata/ --merged=/nsm/rules/suricata/emerging-all.rules --force --url=https://www.snort.org/rules/snortrules-snapshot-2983.tar.gz?oinkcode={{ IDSTOOLSMERGED.config.oinkcode }}
{%- endif %}
{%- endif %}
File diff suppressed because one or more lines are too long
+91
View File
@@ -0,0 +1,91 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% from 'kafka/map.jinja' import KAFKAMERGED %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set KAFKA_NODES_PILLAR = salt['pillar.get']('kafka:nodes') %}
{% set KAFKA_PASSWORD = salt['pillar.get']('kafka:password') %}
{# Create list of KRaft controllers #}
{% set controllers = [] %}
{# Check for Kafka nodes with controller in process_x_roles #}
{% for node in KAFKA_NODES_PILLAR %}
{% if 'controller' in KAFKA_NODES_PILLAR[node].role %}
{% do controllers.append(KAFKA_NODES_PILLAR[node].nodeid ~ "@" ~ node ~ ":9093") %}
{% endif %}
{% endfor %}
{% set kafka_controller_quorum_voters = ','.join(controllers) %}
{# By default all Kafka eligible nodes are given the role of broker, except for
grid MANAGER (broker,controller) until overridden through SOC UI #}
{% set node_type = salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname + ':role') %}
{# Generate server.properties for 'broker' , 'controller', 'broker,controller' node types
anything above this line is a configuration needed for ALL Kafka nodes #}
{% if node_type == 'broker' %}
{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %}
{% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %}
{% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %}
{% do KAFKAMERGED.config.broker.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
{# Nodes with only the 'broker' role need to have the below settings for communicating with controller nodes #}
{% do KAFKAMERGED.config.broker.update({'controller_x_listener_x_names': KAFKAMERGED.config.controller.controller_x_listener_x_names }) %}
{% do KAFKAMERGED.config.broker.update({
'listener_x_security_x_protocol_x_map': KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map
+ ',' + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map })
%}
{% endif %}
{% if node_type == 'controller' %}
{% do KAFKAMERGED.config.controller.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %}
{% do KAFKAMERGED.config.controller.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %}
{% do KAFKAMERGED.config.controller.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
{% endif %}
{# Kafka nodes of this type are not recommended for use outside of development / testing. #}
{% if node_type == 'broker,controller' %}
{% do KAFKAMERGED.config.broker.update({'advertised_x_listeners': 'BROKER://'+ GLOBALS.node_ip +':9092' }) %}
{% do KAFKAMERGED.config.broker.update({'controller_x_listener_x_names': KAFKAMERGED.config.controller.controller_x_listener_x_names }) %}
{% do KAFKAMERGED.config.broker.update({'controller_x_quorum_x_voters': kafka_controller_quorum_voters }) %}
{% do KAFKAMERGED.config.broker.update({'node_x_id': salt['pillar.get']('kafka:nodes:'+ GLOBALS.hostname +':nodeid') }) %}
{% do KAFKAMERGED.config.broker.update({'process_x_roles': 'broker,controller' }) %}
{% do KAFKAMERGED.config.broker.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
{% do KAFKAMERGED.config.broker.update({
'listeners': KAFKAMERGED.config.broker.listeners + ',' + KAFKAMERGED.config.controller.listeners })
%}
{% do KAFKAMERGED.config.broker.update({
'listener_x_security_x_protocol_x_map': KAFKAMERGED.config.broker.listener_x_security_x_protocol_x_map
+ ',' + KAFKAMERGED.config.controller.listener_x_security_x_protocol_x_map })
%}
{% endif %}
{# If a password other than PLACEHOLDER isn't set remove it from the server.properties #}
{% if KAFKAMERGED.config.broker.ssl_x_truststore_x_password == 'PLACEHOLDER' %}
{% do KAFKAMERGED.config.broker.pop('ssl_x_truststore_x_password') %}
{% endif %}
{% if KAFKAMERGED.config.controller.ssl_x_truststore_x_password == 'PLACEHOLDER' %}
{% do KAFKAMERGED.config.controller.pop('ssl_x_truststore_x_password') %}
{% endif %}
{# Client properties stuff #}
{% if KAFKAMERGED.config.client.ssl_x_truststore_x_password == 'PLACEHOLDER' %}
{% do KAFKAMERGED.config.client.pop('ssl_x_truststore_x_password') %}
{% endif %}
{% do KAFKAMERGED.config.client.update({'ssl_x_keystore_x_password': KAFKA_PASSWORD }) %}
{% if 'broker' in node_type %}
{% set KAFKACONFIG = KAFKAMERGED.config.broker %}
{% else %}
{% set KAFKACONFIG = KAFKAMERGED.config.controller %}
{% endif %}
{% set KAFKACLIENT = KAFKAMERGED.config.client %}
+69
View File
@@ -0,0 +1,69 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
- ssl
kafka_group:
group.present:
- name: kafka
- gid: 960
kafka:
user.present:
- uid: 960
- gid: 960
kafka_sbin_tools:
file.recurse:
- name: /usr/sbin
- source: salt://kafka/tools/sbin
- user: 960
- group: 960
- file_mode: 755
kafka_log_dir:
file.directory:
- name: /opt/so/log/kafka
- user: 960
- group: 960
- makedirs: True
kafka_data_dir:
file.directory:
- name: /nsm/kafka/data
- user: 960
- group: 960
- makedirs: True
{% for sc in ['server', 'client'] %}
kafka_kraft_{{sc}}_properties:
file.managed:
- source: salt://kafka/etc/{{sc}}.properties.jinja
- name: /opt/so/conf/kafka/{{sc}}.properties
- template: jinja
- user: 960
- group: 960
- makedirs: True
- show_changes: False
{% endfor %}
reset_quorum_on_changes:
cmd.run:
- name: rm -f /nsm/kafka/data/__cluster_metadata-0/quorum-state
- onchanges:
- file: /opt/so/conf/kafka/server.properties
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
+62
View File
@@ -0,0 +1,62 @@
kafka:
enabled: False
cluster_id:
password:
controllers:
reset:
config:
broker:
advertised_x_listeners:
auto_x_create_x_topics_x_enable: true
controller_x_quorum_x_voters:
default_x_replication_x_factor: 1
inter_x_broker_x_listener_x_name: BROKER
listeners: BROKER://0.0.0.0:9092
listener_x_security_x_protocol_x_map: BROKER:SSL
log_x_dirs: /nsm/kafka/data
log_x_retention_x_check_x_interval_x_ms: 300000
log_x_retention_x_hours: 168
log_x_segment_x_bytes: 1073741824
node_x_id:
num_x_io_x_threads: 8
num_x_network_x_threads: 3
num_x_partitions: 3
num_x_recovery_x_threads_x_per_x_data_x_dir: 1
offsets_x_topic_x_replication_x_factor: 1
process_x_roles: broker
socket_x_receive_x_buffer_x_bytes: 102400
socket_x_request_x_max_x_bytes: 104857600
socket_x_send_x_buffer_x_bytes: 102400
ssl_x_keystore_x_location: /etc/pki/kafka.p12
ssl_x_keystore_x_type: PKCS12
ssl_x_keystore_x_password:
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
ssl_x_truststore_x_password: PLACEHOLDER
ssl_x_truststore_x_type: PEM
transaction_x_state_x_log_x_min_x_isr: 1
transaction_x_state_x_log_x_replication_x_factor: 1
client:
security_x_protocol: SSL
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
ssl_x_truststore_x_password: PLACEHOLDER
ssl_x_truststore_x_type: PEM
ssl_x_keystore_x_location: /etc/pki/kafka.p12
ssl_x_keystore_x_type: PKCS12
ssl_x_keystore_x_password:
controller:
controller_x_listener_x_names: CONTROLLER
controller_x_quorum_x_voters:
listeners: CONTROLLER://0.0.0.0:9093
listener_x_security_x_protocol_x_map: CONTROLLER:SSL
log_x_dirs: /nsm/kafka/data
log_x_retention_x_check_x_interval_x_ms: 300000
log_x_retention_x_hours: 168
log_x_segment_x_bytes: 1073741824
node_x_id:
process_x_roles: controller
ssl_x_keystore_x_location: /etc/pki/kafka.p12
ssl_x_keystore_x_type: PKCS12
ssl_x_keystore_x_password:
ssl_x_truststore_x_location: /etc/pki/java/sos/cacerts
ssl_x_truststore_x_password: PLACEHOLDER
ssl_x_truststore_x_type: PEM
+25
View File
@@ -0,0 +1,25 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'kafka/map.jinja' import KAFKAMERGED %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
so-kafka:
docker_container.absent:
- force: True
so-kafka_so-status.disabled:
file.comment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-kafka$
- onlyif: grep -q '^so-kafka$' /opt/so/conf/so-status/so-status.conf
{% if GLOBALS.is_manager and KAFKAMERGED.enabled or GLOBALS.pipeline == "KAFKA" %}
ensure_default_pipeline:
cmd.run:
- name: |
/usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False;
/usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/global/soc_global.sls global.pipeline REDIS
{% endif %}
+86
View File
@@ -0,0 +1,86 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#
# Note: Per the Elastic License 2.0, the second limitation states:
#
# "You may not move, change, disable, or circumvent the license key functionality
# in the software, and you may not remove or obscure any functionality in the
# software that is protected by the license key."
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %}
{% if 'gmd' in salt['pillar.get']('features', []) %}
include:
- elasticsearch.ca
- kafka.sostatus
- kafka.config
- kafka.storage
so-kafka:
docker_container.running:
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }}
- hostname: so-kafka
- name: so-kafka
- networks:
- sobridge:
- ipv4_address: {{ DOCKER.containers['so-kafka'].ip }}
- user: kafka
- environment:
KAFKA_HEAP_OPTS: -Xmx2G -Xms1G
KAFKA_OPTS: -javaagent:/opt/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/opt/jolokia/jolokia.xml
- extra_hosts:
{% for node in KAFKANODES %}
- {{ node }}:{{ KAFKANODES[node].ip }}
{% endfor %}
{% if DOCKER.containers['so-kafka'].extra_hosts %}
{% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %}
- {{ XTRAHOST }}
{% endfor %}
{% endif %}
- port_bindings:
{% for BINDING in DOCKER.containers['so-kafka'].port_bindings %}
- {{ BINDING }}
{% endfor %}
- binds:
- /etc/pki/kafka.p12:/etc/pki/kafka.p12:ro
- /etc/pki/tls/certs/intca.crt:/etc/pki/java/sos/cacerts:ro
- /nsm/kafka/data/:/nsm/kafka/data/:rw
- /opt/so/log/kafka:/opt/kafka/logs/:rw
- /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/server.properties:ro
- /opt/so/conf/kafka/client.properties:/opt/kafka/config/kraft/client.properties
- watch:
{% for sc in ['server', 'client'] %}
- file: kafka_kraft_{{sc}}_properties
{% endfor %}
delete_so-kafka_so-status.disabled:
file.uncomment:
- name: /opt/so/conf/so-status/so-status.conf
- regex: ^so-kafka$
{% else %}
{{sls}}_no_license_detected:
test.fail_without_changes:
- name: {{sls}}_no_license_detected
- comment:
- "Kafka for Guaranteed Message Delivery is a feature supported only for customers with a valid license.
Contact Security Onion Solutions, LLC via our website at https://securityonionsolutions.com
for more information about purchasing a license to enable this feature."
include:
- kafka.disabled
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
+7
View File
@@ -0,0 +1,7 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% from 'kafka/config.map.jinja' import KAFKACLIENT -%}
{{ KAFKACLIENT | yaml(False) | replace("_x_", ".") }}
+7
View File
@@ -0,0 +1,7 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% from 'kafka/config.map.jinja' import KAFKACONFIG -%}
{{ KAFKACONFIG | yaml(False) | replace("_x_", ".") }}
@@ -0,0 +1,10 @@
kafka:
nodes:
{% for node, values in COMBINED_KAFKANODES.items() %}
{{ node }}:
ip: {{ values['ip'] }}
nodeid: {{ values['nodeid'] }}
{%- if values['role'] != none %}
role: {{ values['role'] }}
{%- endif %}
{% endfor %}
+24
View File
@@ -0,0 +1,24 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
#
# Note: Per the Elastic License 2.0, the second limitation states:
#
# "You may not move, change, disable, or circumvent the license key functionality
# in the software, and you may not remove or obscure any functionality in the
# software that is protected by the license key."
{% from 'kafka/map.jinja' import KAFKAMERGED %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
{# Run kafka/nodes.sls before Kafka is enabled, so kafka nodes pillar is setup #}
{% if grains.role in ['so-manager','so-managersearch', 'so-standalone'] %}
- kafka.nodes
{% endif %}
{% if GLOBALS.pipeline == "KAFKA" and KAFKAMERGED.enabled %}
- kafka.enabled
{% else %}
- kafka.disabled
{% endif %}
+10
View File
@@ -0,0 +1,10 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{# This is only used to determine if Kafka is enabled / disabled. Configuration is found in kafka/config.map.jinja #}
{# kafka/config.map.jinja depends on there being a kafka nodes pillar being populated #}
{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %}
{% set KAFKAMERGED = salt['pillar.get']('kafka', KAFKADEFAULTS.kafka, merge=True) %}
+88
View File
@@ -0,0 +1,88 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{# USED TO GENERATE PILLAR/KAFKA/NODES.SLS. #}
{% import_yaml 'kafka/defaults.yaml' as KAFKADEFAULTS %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set process_x_roles = KAFKADEFAULTS.kafka.config.broker.process_x_roles %}
{% set current_kafkanodes = salt.saltutil.runner(
'mine.get',
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-receiver',
fun='network.ip_addrs',
tgt_type='compound') %}
{% set STORED_KAFKANODES = salt['pillar.get']('kafka:nodes', default=None) %}
{% set KAFKA_CONTROLLERS_PILLAR = salt['pillar.get']('kafka:controllers', default=None) %}
{% set existing_ids = [] %}
{# Check STORED_KAFKANODES for existing kafka nodes and pull their IDs so they are not reused across the grid #}
{% if STORED_KAFKANODES != none %}
{% for node, values in STORED_KAFKANODES.items() %}
{% if values.get('nodeid') %}
{% do existing_ids.append(values['nodeid']) %}
{% endif %}
{% endfor %}
{% endif %}
{# Create list of possible node ids #}
{% set all_possible_ids = range(1, 2000)|list %}
{# Create list of available node ids by looping through all_possible_ids and ensuring it isn't in existing_ids #}
{% set available_ids = [] %}
{% for id in all_possible_ids %}
{% if id not in existing_ids %}
{% do available_ids.append(id) %}
{% endif %}
{% endfor %}
{# Collect kafka eligible nodes and check if they're already in STORED_KAFKANODES to avoid potentially reassigning a nodeid #}
{% set NEW_KAFKANODES = {} %}
{% for minionid, ip in current_kafkanodes.items() %}
{% set hostname = minionid.split('_')[0] %}
{% if not STORED_KAFKANODES or hostname not in STORED_KAFKANODES %}
{% set new_id = available_ids.pop(0) %}
{% do NEW_KAFKANODES.update({hostname: {'nodeid': new_id, 'ip': ip[0], 'role': process_x_roles }}) %}
{% endif %}
{% endfor %}
{# Combine STORED_KAFKANODES and NEW_KAFKANODES for writing to the pillar/kafka/nodes.sls #}
{% set COMBINED_KAFKANODES = {} %}
{% for node, details in NEW_KAFKANODES.items() %}
{% do COMBINED_KAFKANODES.update({node: details}) %}
{% endfor %}
{% if STORED_KAFKANODES != none %}
{% for node, details in STORED_KAFKANODES.items() %}
{% do COMBINED_KAFKANODES.update({node: details}) %}
{% endfor %}
{% endif %}
{# Update the process_x_roles value for any host in the kafka_controllers_pillar configured from SOC UI #}
{% set ns = namespace(has_controller=false) %}
{% if KAFKA_CONTROLLERS_PILLAR != none %}
{% set KAFKA_CONTROLLERS_PILLAR_LIST = KAFKA_CONTROLLERS_PILLAR.split(',') %}
{% for hostname in KAFKA_CONTROLLERS_PILLAR_LIST %}
{% if hostname in COMBINED_KAFKANODES %}
{% do COMBINED_KAFKANODES[hostname].update({'role': 'controller'}) %}
{% set ns.has_controller = true %}
{% endif %}
{% endfor %}
{% for hostname in COMBINED_KAFKANODES %}
{% if hostname not in KAFKA_CONTROLLERS_PILLAR_LIST %}
{% do COMBINED_KAFKANODES[hostname].update({'role': 'broker'}) %}
{% endif %}
{% endfor %}
{# If the kafka_controllers_pillar is NOT empty check that atleast one node contains the controller role.
otherwise default to GLOBALS.manager having broker,controller role #}
{% if not ns.has_controller %}
{% do COMBINED_KAFKANODES[GLOBALS.manager].update({'role': 'broker,controller'}) %}
{% endif %}
{# If kafka_controllers_pillar is empty, default to having grid manager as 'broker,controller'
so there is always atleast 1 controller in the cluster #}
{% else %}
{% do COMBINED_KAFKANODES[GLOBALS.manager].update({'role': 'broker,controller'}) %}
{% endif %}
+18
View File
@@ -0,0 +1,18 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'kafka/nodes.map.jinja' import COMBINED_KAFKANODES %}
{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id', default=None) %}
{# Write Kafka pillar, so all grid members have access to nodeid of other kafka nodes and their roles #}
write_kafka_pillar_yaml:
file.managed:
- name: /opt/so/saltstack/local/pillar/kafka/nodes.sls
- mode: 644
- user: socore
- source: salt://kafka/files/managed_node_pillar.jinja
- template: jinja
- context:
COMBINED_KAFKANODES: {{ COMBINED_KAFKANODES }}
+9
View File
@@ -0,0 +1,9 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
wipe_kafka_data:
file.absent:
- name: /nsm/kafka/data/
- force: True
+209
View File
@@ -0,0 +1,209 @@
kafka:
enabled:
description: Set to True to enable Kafka. To avoid grid problems, do not enable Kafka until the related configuration is in place. Requires a valid Security Onion license key.
helpLink: kafka.html
cluster_id:
description: The ID of the Kafka cluster.
readonly: True
advanced: True
sensitive: True
helpLink: kafka.html
password:
description: The password to use for the Kafka certificates.
sensitive: True
helpLink: kafka.html
controllers:
description: A comma-separated list of hostnames that will act as Kafka controllers. These hosts will be responsible for managing the Kafka cluster. Note that only manager and receiver nodes are eligible to run Kafka. This configuration needs to be set before enabling Kafka. Failure to do so may result in Kafka topics becoming unavailable requiring manual intervention to restore functionality or reset Kafka, either of which can result in data loss.
forcedType: "string"
helpLink: kafka.html
reset:
description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed.
advanced: True
helpLink: kafka.html
config:
broker:
advertised_x_listeners:
description: Specify the list of listeners (hostname and port) that Kafka brokers provide to clients for communication.
title: advertised.listeners
helpLink: kafka.html
auto_x_create_x_topics_x_enable:
description: Enable the auto creation of topics.
title: auto.create.topics.enable
forcedType: bool
helpLink: kafka.html
default_x_replication_x_factor:
description: The default replication factor for automatically created topics. This value must be less than the amount of brokers in the cluster. Hosts specified in controllers should not be counted towards total broker count.
title: default.replication.factor
forcedType: int
helpLink: kafka.html
inter_x_broker_x_listener_x_name:
description: The name of the listener used for inter-broker communication.
title: inter.broker.listener.name
helpLink: kafka.html
listeners:
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
helpLink: kafka.html
listener_x_security_x_protocol_x_map:
description: Comma-seperated mapping of listener name and security protocols.
title: listener.security.protocol.map
helpLink: kafka.html
log_x_dirs:
description: Where Kafka logs are stored within the Docker container.
title: log.dirs
helpLink: kafka.html
log_x_retention_x_check_x_interval_x_ms:
description: Frequency at which log files are checked if they are qualified for deletion.
title: log.retention.check.interval.ms
helpLink: kafka.html
log_x_retention_x_hours:
description: How long, in hours, a log file is kept.
title: log.retention.hours
forcedType: int
helpLink: kafka.html
log_x_segment_x_bytes:
description: The maximum allowable size for a log file.
title: log.segment.bytes
forcedType: int
helpLink: kafka.html
num_x_io_x_threads:
description: The number of threads used by Kafka.
title: num.io.threads
forcedType: int
helpLink: kafka.html
num_x_network_x_threads:
description: The number of threads used for network communication.
title: num.network.threads
forcedType: int
helpLink: kafka.html
num_x_partitions:
description: The number of log partitions assigned per topic.
title: num.partitions
forcedType: int
helpLink: kafka.html
num_x_recovery_x_threads_x_per_x_data_x_dir:
description: The number of threads used for log recuperation at startup and purging at shutdown. This ammount of threads is used per data directory.
title: num.recovery.threads.per.data.dir
forcedType: int
helpLink: kafka.html
offsets_x_topic_x_replication_x_factor:
description: The offsets topic replication factor.
title: offsets.topic.replication.factor
forcedType: int
helpLink: kafka.html
process_x_roles:
description: The role performed by Kafka brokers.
title: process.roles
readonly: True
helpLink: kafka.html
socket_x_receive_x_buffer_x_bytes:
description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default.
title: socket.receive.buffer.bytes
#forcedType: int - soc needs to allow -1 as an int before we can use this
helpLink: kafka.html
socket_x_request_x_max_x_bytes:
description: The maximum bytes allowed for a request to the socket.
title: socket.request.max.bytes
forcedType: int
helpLink: kafka.html
socket_x_send_x_buffer_x_bytes:
description: Size, in bytes of the SO_SNDBUF buffer. A value of -1 will use the OS default.
title: socket.send.buffer.byte
#forcedType: int - soc needs to allow -1 as an int before we can use this
helpLink: kafka.html
ssl_x_keystore_x_location:
description: The key store file location within the Docker container.
title: ssl.keystore.location
helpLink: kafka.html
ssl_x_keystore_x_password:
description: The key store file password. Invalid for PEM format.
title: ssl.keystore.password
sensitive: True
helpLink: kafka.html
ssl_x_keystore_x_type:
description: The key store file format.
title: ssl.keystore.type
regex: ^(JKS|PKCS12|PEM)$
helpLink: kafka.html
ssl_x_truststore_x_location:
description: The trust store file location within the Docker container.
title: ssl.truststore.location
helpLink: kafka.html
ssl_x_truststore_x_password:
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
title: ssl.truststore.password
sensitive: True
helpLink: kafka.html
transaction_x_state_x_log_x_min_x_isr:
description: Overrides min.insync.replicas for the transaction topic. When a producer configures acks to "all" (or "-1"), this setting determines the minimum number of replicas required to acknowledge a write as successful. Failure to meet this minimum triggers an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used in conjunction, min.insync.replicas and acks enable stronger durability guarantees. For instance, creating a topic with a replication factor of 3, setting min.insync.replicas to 2, and using acks of "all" ensures that the producer raises an exception if a majority of replicas fail to receive a write.
title: transaction.state.log.min.isr
forcedType: int
helpLink: kafka.html
transaction_x_state_x_log_x_replication_x_factor:
description: Set the replication factor higher for the transaction topic to ensure availability. Internal topic creation will not proceed until the cluster size satisfies this replication factor prerequisite.
title: transaction.state.log.replication.factor
forcedType: int
helpLink: kafka.html
client:
security_x_protocol:
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
title: security.protocol
regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT)
helpLink: kafka.html
ssl_x_keystore_x_location:
description: The key store file location within the Docker container.
title: ssl.keystore.location
helpLink: kafka.html
ssl_x_keystore_x_password:
description: The key store file password. Invalid for PEM format.
title: ssl.keystore.password
sensitive: True
helpLink: kafka.html
ssl_x_keystore_x_type:
description: The key store file format.
title: ssl.keystore.type
regex: ^(JKS|PKCS12|PEM)$
helpLink: kafka.html
ssl_x_truststore_x_location:
description: The trust store file location within the Docker container.
title: ssl.truststore.location
helpLink: kafka.html
ssl_x_truststore_x_password:
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
title: ssl.truststore.password
sensitive: True
helpLink: kafka.html
controller:
controller_x_listener_x_names:
description: Set listeners used by the controller in a comma-seperated list.
title: controller.listener.names
helpLink: kafka.html
listeners:
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
helpLink: kafka.html
listener_x_security_x_protocol_x_map:
description: Comma-seperated mapping of listener name and security protocols.
title: listener.security.protocol.map
helpLink: kafka.html
log_x_dirs:
description: Where Kafka logs are stored within the Docker container.
title: log.dirs
helpLink: kafka.html
log_x_retention_x_check_x_interval_x_ms:
description: Frequency at which log files are checked if they are qualified for deletion.
title: log.retention.check.interval.ms
helpLink: kafka.html
log_x_retention_x_hours:
description: How long, in hours, a log file is kept.
title: log.retention.hours
forcedType: int
helpLink: kafka.html
log_x_segment_x_bytes:
description: The maximum allowable size for a log file.
title: log.segment.bytes
forcedType: int
helpLink: kafka.html
process_x_roles:
description: The role performed by controller node.
title: process.roles
readonly: True
helpLink: kafka.html
+21
View File
@@ -0,0 +1,21 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
append_so-kafka_so-status.conf:
file.append:
- name: /opt/so/conf/so-status/so-status.conf
- text: so-kafka
- unless: grep -q so-kafka /opt/so/conf/so-status/so-status.conf
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
+30
View File
@@ -0,0 +1,30 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
{% from 'allowed_states.map.jinja' import allowed_states %}
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set kafka_cluster_id = salt['pillar.get']('kafka:cluster_id') %}
{# Initialize kafka storage if it doesn't already exist. Just looking for meta.properties in /nsm/kafka/data #}
{% if not salt['file.file_exists']('/nsm/kafka/data/meta.properties') %}
kafka_storage_init:
cmd.run:
- name: |
docker run -v /nsm/kafka/data:/nsm/kafka/data -v /opt/so/conf/kafka/server.properties:/opt/kafka/config/kraft/newserver.properties --name so-kafkainit --user root --entrypoint /opt/kafka/bin/kafka-storage.sh {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-kafka:{{ GLOBALS.so_version }} format -t {{ kafka_cluster_id }} -c /opt/kafka/config/kraft/newserver.properties
kafka_rm_kafkainit:
cmd.run:
- name: |
docker rm so-kafkainit
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
test.fail_without_changes:
- name: {{sls}}_state_not_allowed
{% endif %}
+47
View File
@@ -0,0 +1,47 @@
#! /bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
if [ -z "$NOROOT" ]; then
# Check for prerequisites
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
fi
function usage() {
echo -e "\nUsage: $0 <script> [options]"
echo ""
echo "Available scripts:"
show_available_kafka_cli_tools
}
function show_available_kafka_cli_tools(){
docker exec so-kafka ls /opt/kafka/bin | grep kafka
}
if [ -z $1 ]; then
usage
exit 1
fi
available_tools=$(show_available_kafka_cli_tools)
script_exists=false
for script in $available_tools; do
if [ "$script" == "$1" ]; then
script_exists=true
break
fi
done
if [ "$script_exists" == true ]; then
docker exec so-kafka /opt/kafka/bin/$1 "${@:2}"
else
echo -e "\nInvalid script: $1"
usage
exit 1
fi
@@ -0,0 +1,87 @@
#! /bin/bash
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
# https://securityonion.net/license; you may not use this file except in compliance with the
# Elastic License 2.0.
if [ -z "$NOROOT" ]; then
# Check for prerequisites
if [ "$(id -u)" -ne 0 ]; then
echo "This script must be run using sudo!"
exit 1
fi
fi
usage() {
cat <<USAGE_EOF
Usage: $0 <operation> [parameters]
Where <operation> is one of the following:
topic-partitions: Increase the number of partitions for a Kafka topic
Required arguments: topic-partitions <topic name> <# partitions>
Example: $0 topic-partitions suricata-topic 6
list-topics: List of Kafka topics
Example: $0 list-topics
USAGE_EOF
exit 1
}
if [[ $# -lt 1 || $1 == --help || $1 == -h ]]; then
usage
fi
kafka_client_config="/opt/kafka/config/kraft/client.properties"
too_few_arguments() {
echo -e "\nMissing one or more required arguments!\n"
usage
}
get_kafka_brokers() {
brokers_cache="/opt/so/state/kafka_brokers"
broker_port="9092"
if [[ ! -f "$brokers_cache" ]] || [[ $(find "/$brokers_cache" -mmin +120) ]]; then
echo "Refreshing Kafka brokers list"
salt-call pillar.get kafka:nodes --out=json | jq -r --arg broker_port "$broker_port" '.local | to_entries[] | select(.value.role | contains("broker")) | "\(.value.ip):\($broker_port)"' | paste -sd "," - > "$brokers_cache"
else
echo "Using cached Kafka brokers list"
fi
brokers=$(cat "$brokers_cache")
}
increase_topic_partitions() {
get_kafka_brokers
command=$(so-kafka-cli kafka-topics.sh --bootstrap-server $brokers --command-config $kafka_client_config --alter --topic $topic --partitions $partition_count)
if $command; then
echo -e "Successfully increased the number of partitions for topic $topic to $partition_count\n"
so-kafka-cli kafka-topics.sh --bootstrap-server $brokers --command-config $kafka_client_config --describe --topic $topic
fi
}
get_kafka_topics_list() {
get_kafka_brokers
so-kafka-cli kafka-topics.sh --bootstrap-server $brokers --command-config $kafka_client_config --exclude-internal --list | sort
}
operation=$1
case "${operation}" in
"topic-partitions")
if [[ $# -lt 3 ]]; then
too_few_arguments
fi
topic=$2
partition_count=$3
increase_topic_partitions
;;
"list-topics")
get_kafka_topics_list
;;
*)
usage
;;
esac
+1
View File
@@ -37,6 +37,7 @@ logstash:
- so/0900_input_redis.conf.jinja
- so/9805_output_elastic_agent.conf.jinja
- so/9900_output_endgame.conf.jinja
- so/0800_input_kafka.conf.jinja
custom0: []
custom1: []
custom2: []
+4 -1
View File
@@ -75,10 +75,13 @@ so-logstash:
{% else %}
- /etc/pki/tls/certs/intca.crt:/usr/share/filebeat/ca.crt:ro
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode'] %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-searchnode' ] %}
- /opt/so/conf/ca/cacerts:/etc/pki/ca-trust/extracted/java/cacerts:ro
- /opt/so/conf/ca/tls-ca-bundle.pem:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem:ro
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-searchnode'] %}
- /etc/pki/kafka-logstash.p12:/usr/share/logstash/kafka-logstash.p12:ro
{% endif %}
{% if GLOBALS.role == 'so-eval' %}
- /nsm/zeek:/nsm/zeek:ro
- /nsm/suricata:/suricata:ro
+5 -1
View File
@@ -4,9 +4,13 @@
# Elastic License 2.0.
{% from 'logstash/map.jinja' import LOGSTASH_MERGED %}
{% from 'kafka/map.jinja' import KAFKAMERGED %}
include:
{% if LOGSTASH_MERGED.enabled %}
{# Disable logstash when Kafka is enabled except when the role is standalone #}
{% if LOGSTASH_MERGED.enabled and grains.role == 'so-standalone' %}
- logstash.enabled
{% elif LOGSTASH_MERGED.enabled and not KAFKAMERGED.enabled %}
- logstash.enabled
{% else %}
- logstash.disabled
@@ -0,0 +1,38 @@
{%- set kafka_password = salt['pillar.get']('kafka:password') %}
{%- set kafka_brokers = salt['pillar.get']('kafka:nodes', {}) %}
{%- set brokers = [] %}
{%- if kafka_brokers %}
{%- for key, values in kafka_brokers.items() %}
{%- if 'broker' in values['role'] %}
{%- do brokers.append(key ~ ':9092') %}
{%- endif %}
{%- endfor %}
{%- set bootstrap_servers = ','.join(brokers) %}
input {
kafka {
codec => json
topics_pattern => '.*-securityonion$'
group_id => 'searchnodes'
consumer_threads => 3
client_id => '{{ GLOBALS.hostname }}'
security_protocol => 'SSL'
bootstrap_servers => '{{ bootstrap_servers }}'
ssl_keystore_location => '/usr/share/logstash/kafka-logstash.p12'
ssl_keystore_password => '{{ kafka_password }}'
ssl_keystore_type => 'PKCS12'
ssl_truststore_location => '/etc/pki/ca-trust/extracted/java/cacerts'
ssl_truststore_password => 'changeit'
decorate_events => true
tags => [ "elastic-agent", "input-{{ GLOBALS.hostname}}", "kafka" ]
}
}
filter {
if ![metadata] {
mutate {
rename => { "@metadata" => "metadata" }
}
}
}
{% endif %}
+3 -1
View File
@@ -2,4 +2,6 @@ manager:
reposync:
enabled: True
hour: 3
minute: 0
minute: 0
additionalCA: ''
insecureSkipVerify: False
+9
View File
@@ -73,6 +73,15 @@ manager_sbin:
- exclude_pat:
- "*_test.py"
manager_sbin_jinja:
file.recurse:
- name: /usr/sbin/
- source: salt://manager/tools/sbin_jinja/
- user: socore
- group: socore
- file_mode: 755
- template: jinja
so-repo-file:
file.managed:
- name: /opt/so/conf/reposync/repodownload.conf
+7
View File
@@ -0,0 +1,7 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% import_yaml 'manager/defaults.yaml' as MANAGERDEFAULTS %}
{% set MANAGERMERGED = salt['pillar.get']('manager', MANAGERDEFAULTS.manager, merge=True) %}
+16 -3
View File
@@ -7,7 +7,7 @@ manager:
hour:
description: The hour of the day in which the repo sync takes place.
global: True
helpLink: soup.html
helpLink: soup.html
minute:
description: The minute within the hour to run the repo sync.
global: True
@@ -16,11 +16,24 @@ manager:
description: Enable elastalert 1=enabled 0=disabled.
global: True
helpLink: elastalert.html
no_proxy:
description: String of hosts to ignore the proxy settings for.
no_proxy:
description: String of hosts to ignore the proxy settings for.
global: True
helpLink: proxy.html
proxy:
description: Proxy server to use for updates.
global: True
helpLink: proxy.html
additionalCA:
description: Additional CA certificates to trust in PEM format.
global: True
advanced: True
multiline: True
forcedType: string
helpLink: proxy.html
insecureSkipVerify:
description: Disable TLS verification for outgoing requests. This will make your installation less secure to MITM attacks. Recommended only for debugging purposes.
advanced: True
forcedType: bool
global: True
helpLink: proxy.html
View File
+6 -2
View File
@@ -112,8 +112,8 @@ function testMinion() {
result=$?
# If this so-minion script is not running on the given minion ID, run so-test remotely on the sensor as well
local_id=$(lookup_grain id)
if [[ ! "$local_id" =~ "${MINION_ID}_" ]]; then
local_id=$(lookup_grain id)
if [[ ! "$local_id" =~ "${MINION_ID}_" && "$local_id" != "${MINION_ID}" ]]; then
salt "$MINION_ID" cmd.run 'so-test'
result=$?
fi
@@ -604,6 +604,10 @@ function updateMineAndApplyStates() {
if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then
salt-run state.orch orch.container_download pillar="{'setup': {'newnode': $MINION_ID }}" > /dev/null 2>&1 &
fi
if [[ "$NODETYPE" == "RECEIVER" ]]; then
# Setup nodeid for Kafka
salt-call state.apply kafka.nodes queue=True
fi
# $MINIONID is the minion id of the manager and $MINION_ID is the target node or the node being configured
salt-run state.orch orch.deploy_newnode pillar="{'setup': {'manager': $MINIONID, 'newnode': $MINION_ID }}" > /dev/null 2>&1 &
}
View File
+51 -23
View File
@@ -14,19 +14,20 @@ lockFile = "/tmp/so-yaml.lock"
def showUsage(args):
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]))
print(' General commands:')
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.')
print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.')
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.')
print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.')
print(' help - Prints this usage information.')
print('')
print(' Where:')
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml')
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2')
print(' VALUE - Value to set for a given key')
print(' LISTITEM - Item to append to a given key\'s list value')
print('Usage: {} <COMMAND> <YAML_FILE> [ARGS...]'.format(sys.argv[0]), file=sys.stderr)
print(' General commands:', file=sys.stderr)
print(' append - Append a list item to a yaml key, if it exists and is a list. Requires KEY and LISTITEM args.', file=sys.stderr)
print(' add - Add a new key and set its value. Fails if key already exists. Requires KEY and VALUE args.', file=sys.stderr)
print(' get - Displays (to stdout) the value stored in the given key. Requires KEY arg.', file=sys.stderr)
print(' remove - Removes a yaml key, if it exists. Requires KEY arg.', file=sys.stderr)
print(' replace - Replaces (or adds) a new key and set its value. Requires KEY and VALUE args.', file=sys.stderr)
print(' help - Prints this usage information.', file=sys.stderr)
print('', file=sys.stderr)
print(' Where:', file=sys.stderr)
print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml', file=sys.stderr)
print(' KEY - YAML key, does not support \' or " characters at this time. Ex: level1.level2', file=sys.stderr)
print(' VALUE - Value to set for a given key', file=sys.stderr)
print(' LISTITEM - Item to append to a given key\'s list value', file=sys.stderr)
sys.exit(1)
@@ -38,7 +39,7 @@ def loadYaml(filename):
def writeYaml(filename, content):
file = open(filename, "w")
return yaml.dump(content, file)
return yaml.safe_dump(content, file)
def appendItem(content, key, listItem):
@@ -49,15 +50,15 @@ def appendItem(content, key, listItem):
try:
content[key].append(listItem)
except AttributeError:
print("The existing value for the given key is not a list. No action was taken on the file.")
print("The existing value for the given key is not a list. No action was taken on the file.", file=sys.stderr)
return 1
except KeyError:
print("The key provided does not exist. No action was taken on the file.")
print("The key provided does not exist. No action was taken on the file.", file=sys.stderr)
return 1
def convertType(value):
if len(value) > 0 and (not value.startswith("0") or len(value) == 1):
if isinstance(value, str) and len(value) > 0 and (not value.startswith("0") or len(value) == 1):
if "." in value:
try:
value = float(value)
@@ -83,7 +84,7 @@ def append(args):
if len(args) != 3:
print('Missing filename, key arg, or list item to append', file=sys.stderr)
showUsage(None)
return
return 1
filename = args[0]
key = args[1]
@@ -112,7 +113,7 @@ def add(args):
if len(args) != 3:
print('Missing filename, key arg, and/or value', file=sys.stderr)
showUsage(None)
return
return 1
filename = args[0]
key = args[1]
@@ -137,7 +138,7 @@ def remove(args):
if len(args) != 2:
print('Missing filename or key arg', file=sys.stderr)
showUsage(None)
return
return 1
filename = args[0]
key = args[1]
@@ -153,7 +154,7 @@ def replace(args):
if len(args) != 3:
print('Missing filename, key arg, and/or value', file=sys.stderr)
showUsage(None)
return
return 1
filename = args[0]
key = args[1]
@@ -167,6 +168,32 @@ def replace(args):
return 0
def getKeyValue(content, key):
pieces = key.split(".", 1)
if len(pieces) > 1 and pieces[0] in content:
return getKeyValue(content[pieces[0]], pieces[1])
return content.get(key, None)
def get(args):
if len(args) != 2:
print('Missing filename or key arg', file=sys.stderr)
showUsage(None)
return 1
filename = args[0]
key = args[1]
content = loadYaml(filename)
output = getKeyValue(content, key)
if output is None:
print("Not found", file=sys.stderr)
return 2
print(yaml.safe_dump(output))
return 0
def main():
args = sys.argv[1:]
@@ -178,6 +205,7 @@ def main():
"help": showUsage,
"add": add,
"append": append,
"get": get,
"remove": remove,
"replace": replace,
}
@@ -195,11 +223,11 @@ def main():
break
except Exception:
if lockAttempts == 1:
print("Waiting for lock file to be released from another process...")
print("Waiting for lock file to be released from another process...", file=sys.stderr)
time.sleep(2)
if lockAttempts == maxAttempts:
print("Lock file (" + lockFile + ") could not be created; proceeding without lock.")
print("Lock file (" + lockFile + ") could not be created; proceeding without lock.", file=sys.stderr)
cmd = commands.get(args[0], showUsage)
code = cmd(args[1:])
+98 -24
View File
@@ -15,40 +15,40 @@ class TestRemove(unittest.TestCase):
def test_main_missing_input(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd"]
soyaml.main()
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Usage:")
self.assertIn("Usage:", mock_stderr.getvalue())
def test_main_help_locked(self):
filename = "/tmp/so-yaml.lock"
file = open(filename, "w")
file.write = "fake lock file"
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
with patch('time.sleep', new=MagicMock()) as mock_sleep:
sys.argv = ["cmd", "help"]
soyaml.main()
sysmock.assert_called()
mock_sleep.assert_called_with(2)
self.assertIn(mock_stdout.getvalue(), "Usage:")
self.assertIn("Usage:", mock_stderr.getvalue())
def test_main_help(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "help"]
soyaml.main()
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Usage:")
self.assertIn("Usage:", mock_stderr.getvalue())
def test_remove_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "help"]
soyaml.remove(["file"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
self.assertIn("Missing filename or key arg\n", mock_stderr.getvalue())
def test_remove(self):
filename = "/tmp/so-yaml_test-remove.yaml"
@@ -97,7 +97,7 @@ class TestRemove(unittest.TestCase):
def test_remove_missing_args(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
filename = "/tmp/so-yaml_test-remove.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: abc }, key2: false}")
@@ -112,15 +112,15 @@ class TestRemove(unittest.TestCase):
expected = "{key1: { child1: 123, child2: abc }, key2: false}"
self.assertEqual(actual, expected)
sysmock.assert_called_once_with(1)
self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n")
self.assertIn("Missing filename or key arg\n", mock_stderr.getvalue())
def test_append_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "help"]
soyaml.append(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, or list item to append\n")
self.assertIn("Missing filename, key arg, or list item to append\n", mock_stderr.getvalue())
def test_append(self):
filename = "/tmp/so-yaml_test-remove.yaml"
@@ -173,11 +173,11 @@ class TestRemove(unittest.TestCase):
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "append", filename, "key4", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n")
self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue())
def test_append_key_noexist_deep(self):
filename = "/tmp/so-yaml_test-append.yaml"
@@ -186,11 +186,11 @@ class TestRemove(unittest.TestCase):
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "append", filename, "key1.child2.deep3", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The key provided does not exist. No action was taken on the file.\n")
self.assertEqual("The key provided does not exist. No action was taken on the file.\n", mock_stderr.getvalue())
def test_append_key_nonlist(self):
filename = "/tmp/so-yaml_test-append.yaml"
@@ -199,11 +199,11 @@ class TestRemove(unittest.TestCase):
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "append", filename, "key1", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue())
def test_append_key_nonlist_deep(self):
filename = "/tmp/so-yaml_test-append.yaml"
@@ -212,11 +212,11 @@ class TestRemove(unittest.TestCase):
file.close()
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stdout', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "append", filename, "key1.child2.deep1", "h"]
soyaml.main()
sysmock.assert_called()
self.assertEqual(mock_stdout.getvalue(), "The existing value for the given key is not a list. No action was taken on the file.\n")
self.assertEqual("The existing value for the given key is not a list. No action was taken on the file.\n", mock_stderr.getvalue())
def test_add_key(self):
content = {}
@@ -244,11 +244,11 @@ class TestRemove(unittest.TestCase):
def test_add_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "help"]
soyaml.add(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
self.assertIn("Missing filename, key arg, and/or value\n", mock_stderr.getvalue())
def test_add(self):
filename = "/tmp/so-yaml_test-add.yaml"
@@ -296,11 +296,11 @@ class TestRemove(unittest.TestCase):
def test_replace_missing_arg(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stdout:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
sys.argv = ["cmd", "help"]
soyaml.replace(["file", "key"])
sysmock.assert_called()
self.assertIn(mock_stdout.getvalue(), "Missing filename, key arg, and/or value\n")
self.assertIn("Missing filename, key arg, and/or value\n", mock_stderr.getvalue())
def test_replace(self):
filename = "/tmp/so-yaml_test-add.yaml"
@@ -360,3 +360,77 @@ class TestRemove(unittest.TestCase):
self.assertEqual(soyaml.convertType("false"), False)
self.assertEqual(soyaml.convertType("FALSE"), False)
self.assertEqual(soyaml.convertType(""), "")
def test_get_int(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get([filename, "key1.child2.deep1"])
self.assertEqual(result, 0)
self.assertIn("45\n...", mock_stdout.getvalue())
def test_get_str(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: \"hello\" } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get([filename, "key1.child2.deep1"])
self.assertEqual(result, 0)
self.assertIn("hello\n...", mock_stdout.getvalue())
def test_get_list(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: \"hello\" } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get([filename, "key3"])
self.assertEqual(result, 0)
self.assertIn("- e\n- f\n- g\n", mock_stdout.getvalue())
def test_get_dict(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: \"hello\" } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get([filename, "key1"])
self.assertEqual(result, 0)
self.assertIn("child1: 123\nchild2:\n deep1: hello\n", mock_stdout.getvalue())
def test_get_missing(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get([filename, "key1.child2.deep3"])
self.assertEqual(result, 2)
self.assertEqual("", mock_stdout.getvalue())
def test_get_missing_parent(self):
with patch('sys.stdout', new=StringIO()) as mock_stdout:
filename = "/tmp/so-yaml_test-get.yaml"
file = open(filename, "w")
file.write("{key1: { child1: 123, child2: { deep1: 45 } }, key2: false, key3: [e,f,g]}")
file.close()
result = soyaml.get([filename, "key1.child3.deep3"])
self.assertEqual(result, 2)
self.assertEqual("", mock_stdout.getvalue())
def test_get_usage(self):
with patch('sys.exit', new=MagicMock()) as sysmock:
with patch('sys.stderr', new=StringIO()) as mock_stderr:
result = soyaml.get([])
self.assertEqual(result, 1)
self.assertIn("Missing filename or key arg", mock_stderr.getvalue())
sysmock.assert_called_once_with(1)
+64
View File
@@ -19,6 +19,9 @@ SOUP_LOG=/root/soup.log
WHATWOULDYOUSAYYAHDOHERE=soup
whiptail_title='Security Onion UPdater'
NOTIFYCUSTOMELASTICCONFIG=false
# used to display messages to the user at the end of soup
declare -a FINAL_MESSAGE_QUEUE=()
check_err() {
local exit_code=$1
@@ -344,6 +347,29 @@ masterunlock() {
mv -v $BACKUPTOPFILE $TOPFILE
}
phases_pillar_2_4_80() {
echo "Checking if pillar value: elasticsearch.index_settings.global_overrides.index_template.phases exists"
set +e
PHASES=$(so-yaml.py get /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases)
case $? in
0)
so-yaml.py remove /opt/so/saltstack/local/pillar/elasticsearch/soc_elasticsearch.sls elasticsearch.index_settings.global_overrides.index_template.phases
read -r -d '' msg <<- EOF
Found elasticsearch.index_settings.global_overrides.index_template.phases was set to:
${PHASES}
Removed unused pillar value: elasticsearch.index_settings.global_overrides.index_template.phases
To set policies, navigate to the SOC Grid Configuration UI at elasticsearch.index_settings.global_overrides.policy.phases
A backup of all pillar files was saved to /nsm/backup/
EOF
FINAL_MESSAGE_QUEUE+=("$msg")
;;
2) echo "Pillar elasticsearch.index_settings.global_overrides.index_template.phases does not exist. No action taken." ;;
*) echo "so-yaml.py returned something other than 0 or 2 exit code" ;; # we shouldn't see this
esac
set -e
}
preupgrade_changes() {
# This function is to add any new pillar items if needed.
echo "Checking to see if changes are needed."
@@ -358,6 +384,7 @@ preupgrade_changes() {
[[ "$INSTALLEDVERSION" == 2.4.40 ]] && up_to_2.4.50
[[ "$INSTALLEDVERSION" == 2.4.50 ]] && up_to_2.4.60
[[ "$INSTALLEDVERSION" == 2.4.60 ]] && up_to_2.4.70
[[ "$INSTALLEDVERSION" == 2.4.70 ]] && up_to_2.4.80
true
}
@@ -375,6 +402,7 @@ postupgrade_changes() {
[[ "$POSTVERSION" == 2.4.40 ]] && post_to_2.4.50
[[ "$POSTVERSION" == 2.4.50 ]] && post_to_2.4.60
[[ "$POSTVERSION" == 2.4.60 ]] && post_to_2.4.70
[[ "$POSTVERSION" == 2.4.70 ]] && post_to_2.4.80
true
}
@@ -448,6 +476,12 @@ post_to_2.4.70() {
POSTVERSION=2.4.70
}
post_to_2.4.80() {
echo -e "\nChecking if update to Elastic Fleet output policy is required\n"
so-kafka-fleet-output-policy
POSTVERSION=2.4.80
}
repo_sync() {
echo "Sync the local repo."
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
@@ -592,9 +626,30 @@ up_to_2.4.70() {
suricata_idstools_migration
toggle_telemetry
add_detection_test_pillars
INSTALLEDVERSION=2.4.70
}
up_to_2.4.80() {
phases_pillar_2_4_80
# Kafka configuration changes
# Global pipeline changes to REDIS or KAFKA
echo "Removing global.pipeline pillar configuration"
sed -i '/pipeline:/d' /opt/so/saltstack/local/pillar/global/soc_global.sls
# Kafka pillars
mkdir -p /opt/so/saltstack/local/pillar/kafka
touch /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
touch /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
echo 'kafka: ' > /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
kafka_cluster_id=$(get_random_value 22)
echo ' cluster_id: '$kafka_cluster_id >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
kafkapass=$(get_random_value)
echo ' password: '$kafkapass >> /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
INSTALLEDVERSION=2.4.80
}
add_detection_test_pillars() {
if [[ -n "$SOUP_INTERNAL_TESTING" ]]; then
echo "Adding detection pillar values for automated testing"
@@ -1255,6 +1310,15 @@ EOF
fi
# check if the FINAL_MESSAGE_QUEUE is not empty
if (( ${#FINAL_MESSAGE_QUEUE[@]} != 0 )); then
echo "The following additional information applies specifically to your grid:"
for m in "${FINAL_MESSAGE_QUEUE[@]}"; do
echo "$m"
echo
done
fi
echo "### soup has been served at $(date) ###"
}
+2 -1
View File
@@ -4,9 +4,10 @@
# Elastic License 2.0.
{% from 'redis/map.jinja' import REDISMERGED %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
include:
{% if REDISMERGED.enabled %}
{% if GLOBALS.pipeline == "REDIS" and REDISMERGED.enabled %}
- redis.enabled
{% else %}
- redis.disabled
@@ -2,7 +2,7 @@ mine_interval: 25
mine_functions:
network.ip_addrs:
- interface: {{ pillar.host.mainint }}
{% if grains.role in ['so-eval','so-import','so-manager','so-managersearch','so-standalone'] -%}
{%- if grains.role in ['so-eval','so-import','so-manager','so-managersearch','so-standalone'] %}
x509.get_pem_entries:
- glob_path: '/etc/pki/ca.crt'
{% endif -%}
+61
View File
@@ -30,4 +30,65 @@ engines:
'*':
- cmd.run:
cmd: /usr/sbin/so-rule-update
- files:
- /opt/so/saltstack/local/pillar/global/soc_global.sls
- /opt/so/saltstack/local/pillar/global/adv_global.sls
pillar: global.pipeline
default: REDIS
actions:
from:
'*':
to:
'KAFKA':
- cmd.run:
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled True
- cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs
- cmd.run:
cmd: salt-call state.apply kafka.nodes
- cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate
'KAFKA':
to:
'REDIS':
- cmd.run:
cmd: /usr/sbin/so-yaml.py replace /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.enabled False
- cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs
- cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.highstate
- files:
- /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
- /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
pillar: kafka.controllers
default: ''
actions:
from:
'*':
to:
'*':
- cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs
- cmd.run:
cmd: salt-call state.apply kafka.nodes
- cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka
- cmd.run:
cmd: salt-call state.apply elasticfleet
- files:
- /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls
- /opt/so/saltstack/local/pillar/kafka/adv_kafka.sls
pillar: kafka.reset
default: ''
actions:
from:
'*':
to:
'YES_RESET_KAFKA':
- cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' saltutil.kill_all_jobs
- cmd.run:
cmd: salt -C 'G@role:so-standalone or G@role:so-manager or G@role:so-managersearch or G@role:so-receiver' state.apply kafka.disabled,kafka.reset
- cmd.run:
cmd: /usr/sbin/so-yaml.py remove /opt/so/saltstack/local/pillar/kafka/soc_kafka.sls kafka.reset
interval: 10
+6
View File
@@ -13,6 +13,9 @@ include:
- systemd.reload
- repo.client
- salt.mine_functions
{% if GLOBALS.role in GLOBALS.manager_roles %}
- ca
{% endif %}
{% if INSTALLEDSALTVERSION|string != SALTVERSION|string %}
@@ -98,5 +101,8 @@ salt_minion_service:
- file: mine_functions
{% if INSTALLEDSALTVERSION|string == SALTVERSION|string %}
- file: set_log_levels
{% endif %}
{% if GLOBALS.role in GLOBALS.manager_roles %}
- file: /etc/salt/minion.d/signing_policies.conf
{% endif %}
- order: last
+1 -1
View File
@@ -9,4 +9,4 @@ execute_checksum:
cmd.run:
- name: /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable
- onchanges:
- file: offload_script
- file: offload_script
+88 -3
View File
@@ -72,6 +72,18 @@ soc:
target: ''
links:
- '/#/hunt?q=(process.entity_id:"{:process.entity_id}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.name | groupby process.command_line | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path'
- name: actionProcessChildInfo
description: actionProcessChildInfoHelp
icon: fa-users-line
target: ''
links:
- '/#/hunt?q=(process.entity_id:"{:process.entity_id}" OR process.parent.entity_id:"{:process.entity_id}") | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.name | groupby process.command_line | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path'
- name: actionProcessAllInfo
description: actionProcessAllInfoHelp
icon: fa-users-between-lines
target: ''
links:
- '/#/hunt?q=({:process.entity_id}) | groupby event.dataset | groupby -sankey event.dataset event.action | groupby event.action | groupby process.name | groupby process.command_line | groupby host.name user.name | groupby source.ip source.port destination.ip destination.port | groupby dns.question.name | groupby dns.answers.data | groupby file.path | groupby registry.path | groupby dll.path'
- name: actionProcessAncestors
description: actionProcessAncestorsHelp
icon: fa-people-roof
@@ -1314,7 +1326,7 @@ soc:
reposFolder: /opt/sensoroni/sigma/repos
rulesFingerprintFile: /opt/sensoroni/fingerprints/sigma.fingerprint
stateFilePath: /opt/sensoroni/fingerprints/elastalertengine.state
integrityCheckFrequencySeconds: 600
integrityCheckFrequencySeconds: 1200
rulesRepos:
default:
- repo: https://github.com/Security-Onion-Solutions/securityonion-resources
@@ -1393,18 +1405,19 @@ soc:
community: true
yaraRulesFolder: /opt/sensoroni/yara/rules
stateFilePath: /opt/sensoroni/fingerprints/strelkaengine.state
integrityCheckFrequencySeconds: 600
integrityCheckFrequencySeconds: 1200
suricataengine:
allowRegex: ''
autoUpdateEnabled: true
communityRulesImportFrequencySeconds: 86400
communityRulesImportErrorSeconds: 300
customRulesets:
failAfterConsecutiveErrorCount: 10
communityRulesFile: /nsm/rules/suricata/emerging-all.rules
denyRegex: ''
rulesFingerprintFile: /opt/sensoroni/fingerprints/emerging-all.fingerprint
stateFilePath: /opt/sensoroni/fingerprints/suricataengine.state
integrityCheckFrequencySeconds: 600
integrityCheckFrequencySeconds: 1200
client:
enableReverseLookup: false
docsUrl: /docs/
@@ -2250,6 +2263,78 @@ soc:
- suricata
- sigma
- yara
license:
customEnabled: true
labels:
- None
- Apache-2.0
- AGPL-3.0-only
- BSD-3-Clause
- DRL-1.1
- GPL-2.0-only
- GPL-3.0-only
- MIT
severityTranslations:
minor: low
major: high
templateDetections:
suricata: |
# This is a Suricata rule template. Replace all template values with your own values.
# The rule identifier [sid] is pregenerated and known to be unique for this Security Onion installation.
# Docs: https://docs.suricata.io/en/latest/rules/intro.html
# Delete these comments before attempting to "Create" the rule
alert http $EXTERNAL_NET any -> $HOME_NET any (msg:"Example Rule Title - 'example' String Detected"; content:"example"; sid:[publicId]; rev:1;)
strelka: |
/*
This is a YARA rule template. Replace all template values with your own values.
The YARA rule name is the unique identifier for the rule.
Docs: https://yara.readthedocs.io/en/stable/writingrules.html#writing-yara-rules
*/
rule Example // This identifier _must_ be unique
{
meta:
description = "Generic YARA Rule"
author = "@SecurityOnion"
date = "YYYY-MM-DD"
reference = "https://local.invalid"
strings:
$my_text_string = "text here"
$my_hex_string = { E2 34 A1 C8 23 FB }
condition:
filesize < 3MB and ($my_text_string or $my_hex_string)
}
elastalert: |
# This is a Sigma rule template, which uses YAML. Replace all template values with your own values.
# The id (UUIDv4) is pregenerated and can safely be used.
# Click "Convert" to convert the Sigma rule to use Security Onion field mappings within an EQL query
#
# Rule Creation Guide: https://github.com/SigmaHQ/sigma/wiki/Rule-Creation-Guide
# Logsources: https://sigmahq.io/docs/basics/log-sources.html
title: 'A Short Capitalized Title With Less Than 50 Characters'
id: [publicId]
status: 'experimental'
description: |
This should be a detailed description of what this Detection focuses on: what we are trying to find and why we are trying to find it.
For example, from rule 97a80ec7-0e2f-4d05-9ef4-65760e634f6b: "Detects a whoami.exe executed with the /priv command line flag instructing the tool to show all current user privileges. This is often used after a privilege escalation attempt."
references:
- 'https://local.invalid'
author: '@SecurityOnion'
date: 'YYYY/MM/DD'
tags:
- detection.threat_hunting
- attack.technique_id
logsource:
category: process_creation
product: windows
detection:
selection_img:
- Image|endswith: '\whoami.exe'
- OriginalFileName: 'whoami.exe'
selection_cli:
CommandLine|contains|windash:
- ' -priv'
condition: all of selection_*
level: 'high' # info | low | medium | high | critical
+1
View File
@@ -27,6 +27,7 @@ so-soc:
- /opt/so/conf/strelka:/opt/sensoroni/yara:rw
- /opt/so/conf/sigma:/opt/sensoroni/sigma:rw
- /opt/so/rules/elastalert/rules:/opt/sensoroni/elastalert:rw
- /opt/so/rules/nids/suri:/opt/sensoroni/nids:ro
- /opt/so/conf/soc/fingerprints:/opt/sensoroni/fingerprints:rw
- /nsm/soc/jobs:/opt/sensoroni/jobs:rw
- /nsm/soc/uploads:/nsm/soc/uploads:rw
+14 -2
View File
@@ -1,8 +1,20 @@
## Getting Started
New to Security Onion 2? Click the menu in the upper-right corner and you'll find links for [Help](/docs/) and a [Cheat Sheet](/docs/cheatsheet.pdf) that will help you best utilize Security Onion to hunt for evil! In addition, check out our free Security Onion 2 Essentials online course, available on our [Training](https://securityonionsolutions.com/training) website.
New to Security Onion? Click the menu in the upper-right corner and you'll find links for [Help](/docs/) and a [Cheat Sheet](/docs/cheatsheet.pdf) that will help you best utilize Security Onion to hunt for evil! In addition, check out our free Security Onion Essentials online course, available on our [Training](https://securityonionsolutions.com/training) website.
If you're ready to dive in, take a look at the [Alerts](/#/alerts) interface to see what Security Onion has detected so far. Then go to the [Dashboards](/#/dashboards) interface for a general overview of all logs collected or go to the [Hunt](/#/hunt) interface for more focused threat hunting. Once you've found something of interest, escalate it to [Cases](/#/cases) to then collect evidence and analyze observables as you work towards closing the case.
If you're ready to dive in, take a look at the [Alerts](/#/alerts) interface to see what Security Onion has detected so far. If you find any false positives, then you can tune those in [Detections](/#/detections).
Next, go to the [Dashboards](/#/dashboards) interface for a general overview of all logs collected. Here are a few overview dashboards to get you started:
[Overview Dashboard](/#/dashboards) | [Elastic Agent Overview](/#/dashboards?q=event.module%3Aendpoint%20%7C%20groupby%20event.dataset%20%7C%20groupby%20host.name%20%7C%20groupby%20-sankey%20host.name%20user.name%20%7C%20groupby%20user.name%20%7C%20groupby%20-sankey%20user.name%20process.name%20%7C%20groupby%20process.name) | [Network Connection Overview](/#/dashboards?q=tags%3Aconn%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20-sankey%20destination.port%20network.protocol%20%7C%20groupby%20network.protocol%20%7C%20groupby%20network.transport%20%7C%20groupby%20connection.history%20%7C%20groupby%20connection.state%20%7C%20groupby%20connection.state_description%20%7C%20groupby%20source.geo.country_name%20%7C%20groupby%20destination.geo.country_name%20%7C%20groupby%20client.ip_bytes%20%7C%20groupby%20server.ip_bytes%20%7C%20groupby%20client.oui) | [DNS](/#/dashboards?q=tags%3Adns%20%7C%20groupby%20dns.query.name%20%7C%20groupby%20source.ip%20%7C%20groupby%20-sankey%20source.ip%20destination.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20dns.highest_registered_domain%20%7C%20groupby%20dns.parent_domain%20%7C%20groupby%20dns.query.type_name%20%7C%20groupby%20dns.response.code_name%20%7C%20groupby%20dns.answers.name%20%7C%20groupby%20destination_geo.organization_name) | [Files](/#/dashboards?q=tags%3Afile%20%7C%20groupby%20file.mime_type%20%7C%20groupby%20-sankey%20file.mime_type%20file.source%20%7C%20groupby%20file.source%20%7C%20groupby%20file.bytes.total%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination_geo.organization_name) | [HTTP](/#/dashboards?q=tags%3Ahttp%20%7C%20groupby%20http.method%20%7C%20groupby%20-sankey%20http.method%20http.virtual_host%20%7C%20groupby%20http.virtual_host%20%7C%20groupby%20http.uri%20%7C%20groupby%20http.useragent%20%7C%20groupby%20http.status_code%20%7C%20groupby%20http.status_message%20%7C%20groupby%20file.resp_mime_types%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20destination_geo.organization_name) | [SSL](/#/dashboards?q=tags%3Assl%20%7C%20groupby%20ssl.version%20%7C%20groupby%20-sankey%20ssl.version%20ssl.server_name%20%7C%20groupby%20ssl.server_name%20%7C%20groupby%20source.ip%20%7C%20groupby%20destination.ip%20%7C%20groupby%20destination.port%20%7C%20groupby%20destination_geo.organization_name)
Click the drop-down menu in Dashboards to find many more dashboards. You might also want to explore the [Hunt](/#/hunt) interface for more focused threat hunting.
Once you've found something of interest, escalate it to [Cases](/#/cases) to then collect evidence and analyze observables as you work towards closing the case.
If you want to check the health of your deployment, check out the [Grid](/#/grid) interface.
For more coverage of your enterprise, you can deploy the Elastic Agent to endpoints by going to the [Downloads](/#/downloads) page.
## What's New
+6 -1
View File
@@ -1,16 +1,21 @@
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
https://securityonion.net/license; you may not use this file except in compliance with the
Elastic License 2.0. #}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'soc/defaults.map.jinja' import SOCDEFAULTS with context %}
{% from 'logstash/map.jinja' import LOGSTASH_NODES %}
{% from 'manager/map.jinja' import MANAGERMERGED %}
{% set DOCKER_EXTRA_HOSTS = LOGSTASH_NODES %}
{% do DOCKER_EXTRA_HOSTS.append({GLOBALS.influxdb_host:pillar.node_data[GLOBALS.influxdb_host].ip}) %}
{% set SOCMERGED = salt['pillar.get']('soc', SOCDEFAULTS, merge=true) %}
{% do SOCMERGED.config.server.update({'proxy': MANAGERMERGED.proxy}) %}
{% do SOCMERGED.config.server.update({'additionalCA': MANAGERMERGED.additionalCA}) %}
{% do SOCMERGED.config.server.update({'insecureSkipVerify': MANAGERMERGED.insecureSkipVerify}) %}
{# if SOCMERGED.config.server.modules.cases == httpcase details come from the soc pillar #}
{% if SOCMERGED.config.server.modules.cases != 'soc' %}
{% do SOCMERGED.config.server.modules.elastic.update({'casesEnabled': false}) %}
+18 -1
View File
@@ -119,7 +119,7 @@ soc:
advanced: True
rulesRepos:
default: &eerulesRepos
description: "Custom Git repositories to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update."
description: "Custom Git repositories to pull Sigma rules from. 'license' field is required, 'folder' is optional. 'community' disables some management options for the imported rules - they can't be deleted or edited, just tuned, duplicated and Enabled | Disabled. The new settings will be applied within 15 minutes. At that point, you will need to wait for the scheduled rule update to take place (by default, every 24 hours), or you can force the update by nagivating to Detections --> Options dropdown menu --> Elastalert --> Full Update."
global: True
advanced: True
forcedType: "[]{}"
@@ -247,6 +247,12 @@ soc:
description: 'How often the Suricata integrity checker runs (in seconds). This verifies the integrity of deployed rules.'
global: True
advanced: True
customRulesets:
description: 'URLs and/or Local File configurations for Suricata custom rulesets. Refer to the linked documentation for important specification and file placement information'
global: True
advanced: True
forcedType: "[]{}"
helpLink: suricata.html
client:
enableReverseLookup:
description: Set to true to enable reverse DNS lookups for IP addresses in the SOC UI.
@@ -319,6 +325,17 @@ soc:
cases: *appSettings
dashboards: *appSettings
detections: *appSettings
detection:
templateDetections:
suricata:
description: The template used when creating a new Suricata detection. [publicId] will be replaced with an unused Public Id.
multiline: True
strelka:
description: The template used when creating a new Strelka detection.
multiline: True
elastalert:
description: The template used when creating a new ElastAlert detection. [publicId] will be replaced with an unused Public Id.
multiline: True
grid:
maxUploadSize:
description: The maximum number of bytes for an uploaded PCAP import file.
+234 -1
View File
@@ -17,6 +17,8 @@
{% set COMMONNAME = GLOBALS.manager %}
{% endif %}
{% set kafka_password = salt['pillar.get']('kafka:password') %}
{% if grains.id.split('_')|last in ['manager', 'managersearch', 'eval', 'standalone', 'import'] %}
include:
- ca
@@ -661,9 +663,240 @@ elastickeyperms:
- name: /etc/pki/elasticsearch.key
- mode: 640
- group: 930
{%- endif %}
{% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone'] %}
elasticfleet_kafka_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-kafka.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-kafka.key') -%}
- prereq:
- x509: elasticfleet_kafka_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
elasticfleet_kafka_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-kafka.crt
- ca_server: {{ ca_server }}
- signing_policy: kafka
- private_key: /etc/pki/elasticfleet-kafka.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-kafka.key -topk8 -out /etc/pki/elasticfleet-kafka.p8 -nocrypt"
- onchanges:
- x509: elasticfleet_kafka_key
elasticfleet_kafka_cert_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.crt
- mode: 640
- user: 960
- group: 939
elasticfleet_kafka_key_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.key
- mode: 640
- user: 960
- group: 939
elasticfleet_kafka_pkcs8_perms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-kafka.p8
- mode: 640
- user: 960
- group: 939
kafka_client_key:
x509.private_key_managed:
- name: /etc/pki/kafka-client.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/kafka-client.key') -%}
- prereq:
- x509: /etc/pki/kafka-client.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
kafka_client_crt:
x509.certificate_managed:
- name: /etc/pki/kafka-client.crt
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka
- private_key: /etc/pki/kafka-client.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
kafka_client_key_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka-client.key
- mode: 640
- user: 960
- group: 939
kafka_client_crt_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka-client.crt
- mode: 640
- user: 960
- group: 939
{% endif %}
{% if grains['role'] in ['so-manager', 'so-managersearch','so-receiver', 'so-standalone'] %}
kafka_key:
x509.private_key_managed:
- name: /etc/pki/kafka.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/kafka.key') -%}
- prereq:
- x509: /etc/pki/kafka.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
kafka_crt:
x509.certificate_managed:
- name: /etc/pki/kafka.crt
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka
- private_key: /etc/pki/kafka.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka.key -in /etc/pki/kafka.crt -export -out /etc/pki/kafka.p12 -nodes -passout pass:{{ kafka_password }}"
- onchanges:
- x509: /etc/pki/kafka.key
kafka_key_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka.key
- mode: 640
- user: 960
- group: 939
kafka_crt_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka.crt
- mode: 640
- user: 960
- group: 939
kafka_pkcs12_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka.p12
- mode: 640
- user: 960
- group: 939
{% endif %}
# Standalone needs kafka-logstash for automated testing. Searchnode/manager search need it for logstash to consume from Kafka.
# Manager will have cert, but be unused until a pipeline is created and logstash enabled.
{% if grains['role'] in ['so-standalone', 'so-managersearch', 'so-searchnode', 'so-manager'] %}
kafka_logstash_key:
x509.private_key_managed:
- name: /etc/pki/kafka-logstash.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/kafka-logstash.key') -%}
- prereq:
- x509: /etc/pki/kafka-logstash.crt
{%- endif %}
- retry:
attempts: 5
interval: 30
kafka_logstash_crt:
x509.certificate_managed:
- name: /etc/pki/kafka-logstash.crt
- ca_server: {{ ca_server }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- signing_policy: kafka
- private_key: /etc/pki/kafka-logstash.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs12 -inkey /etc/pki/kafka-logstash.key -in /etc/pki/kafka-logstash.crt -export -out /etc/pki/kafka-logstash.p12 -nodes -passout pass:{{ kafka_password }}"
- onchanges:
- x509: /etc/pki/kafka-logstash.key
kafka_logstash_key_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka-logstash.key
- mode: 640
- user: 960
- group: 939
kafka_logstash_crt_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka-logstash.crt
- mode: 640
- user: 960
- group: 939
kafka_logstash_pkcs12_perms:
file.managed:
- replace: False
- name: /etc/pki/kafka-logstash.p12
- mode: 640
- user: 960
- group: 931
{% endif %}
{% else %}
{{sls}}_state_not_allowed:
+17
View File
@@ -71,3 +71,20 @@ fleet_crt:
fbcertdir:
file.absent:
- name: /opt/so/conf/filebeat/etc/pki
kafka_crt:
file.absent:
- name: /etc/pki/kafka.crt
kafka_key:
file.absent:
- name: /etc/pki/kafka.key
kafka_logstash_crt:
file.absent:
- name: /etc/pki/kafka-logstash.crt
kafka_logstash_key:
file.absent:
- name: /etc/pki/kafka-logstash.key
kafka_logstash_keystore:
file.absent:
- name: /etc/pki/kafka-logstash.p12
+2 -2
View File
@@ -951,7 +951,7 @@ the release. Additionally, the original security profile has been modified by Se
<ns5:select idref="xccdf_org.ssgproject.content_rule_no_empty_passwords_etc_shadow" selected="true" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_no_files_unowned_by_user" selected="true" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_no_host_based_files" selected="true" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_no_shelllogin_for_systemaccounts" selected="true" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_no_shelllogin_for_systemaccounts" selected="false" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_no_tmux_in_shells" selected="true" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_no_user_host_based_files" selected="true" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_package_MFEhiplsm_installed" selected="true" />
@@ -962,7 +962,7 @@ the release. Additionally, the original security profile has been modified by Se
<ns5:select idref="xccdf_org.ssgproject.content_rule_package_crypto-policies_installed" selected="true" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_package_fapolicyd_installed" selected="false" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_package_firewalld_installed" selected="false" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_package_gnutls-utils_installed" selected="true" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_package_gnutls-utils_installed" selected="false" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_package_gssproxy_removed" selected="true" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_package_iprutils_removed" selected="true" />
<ns5:select idref="xccdf_org.ssgproject.content_rule_package_libreswan_installed" selected="true" />
+5 -2
View File
@@ -150,13 +150,16 @@ suricata:
helpLink: suricata.html
vars:
address-groups:
HOME_NET: &suriaddressgroup
HOME_NET:
description: Assign a list of hosts, or networks, using CIDR notation, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable.
regex: ^(((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)(\/([0-9]|[1-2][0-9]|3[0-2]))?$|^((([0-9A-Fa-f]{1,4}:){7}([0-9A-Fa-f]{1,4}|:))|(([0-9A-Fa-f]{1,4}:){6}(:[0-9A-Fa-f]{1,4}|((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?))|:))|(([0-9A-Fa-f]{1,4}:){5}((:[0-9A-Fa-f]{1,4}){1,2}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){4}((:[0-9A-Fa-f]{1,4}){1,3}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){3}((:[0-9A-Fa-f]{1,4}){1,4}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){2}((:[0-9A-Fa-f]{1,4}){1,5}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(([0-9A-Fa-f]{1,4}:){1}((:[0-9A-Fa-f]{1,4}){1,6}|:((25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)\.){3}(25[0-5]|(2[0-4]|1[0-9])[0-9]|0?[0-9][0-9]?)|:))|(:((:[0-9A-Fa-f]{1,4}){1,7}|:)))(\/([0-9]|[1-9][0-9]|1[0-1][0-9]|12[0-8]))?$
regexFailureMessage: You must enter a valid IP address or CIDR.
helpLink: suricata.html
duplicates: True
EXTERNAL_NET: *suriaddressgroup
EXTERNAL_NET: &suriaddressgroup
description: Assign a list of hosts, or networks, or other customization, to this Suricata variable. The variable can then be re-used within Suricata rules. This allows for a single adjustment to the variable that will then affect all rules referencing the variable.
helpLink: suricata.html
duplicates: True
HTTP_SERVERS: *suriaddressgroup
SMTP_SERVERS: *suriaddressgroup
SQL_SERVERS: *suriaddressgroup
+34
View File
@@ -243,6 +243,40 @@
password = "{{ salt['pillar.get']('elasticsearch:auth:users:so_logstash_user:pass') }}"
{%- endif %}
{% if grains.role in ['so-manager','so-managersearch','so-standalone','so-receiver'] and GLOBALS.pipeline == "KAFKA" -%}
[[inputs.jolokia2_agent]]
name_prefix= "kafka_"
urls = ["http://localhost:8778/jolokia"]
[[inputs.jolokia2_agent.metric]]
name = "topics"
mbean = "kafka.server:name=*,type=BrokerTopicMetrics"
field_prefix = "$1."
[[inputs.jolokia2_agent.metric]]
name = "topic"
mbean = "kafka.server:name=*,topic=*,type=BrokerTopicMetrics"
field_prefix = "$1."
tag_keys = ["topic"]
[[inputs.jolokia2_agent.metric]]
name = "controller"
mbean = "kafka.controller:name=*,type=*"
field_prefix = "$1."
[[inputs.jolokia2_agent.metric]]
name = "partition"
mbean = "kafka.log:name=*,partition=*,topic=*,type=Log"
field_name = "$1"
tag_keys = ["topic", "partition"]
[[inputs.jolokia2_agent.metric]]
name = "partition"
mbean = "kafka.cluster:name=UnderReplicated,partition=*,topic=*,type=Partition"
field_name = "UnderReplicatedPartitions"
tag_keys = ["topic", "partition"]
{%- endif %}
# # Read metrics from one or more commands that can output to stdout
{%- if 'sostatus.sh' in TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]] %}
{%- do TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]].remove('sostatus.sh') %}
+7
View File
@@ -22,3 +22,10 @@
{% endif %}
{% endif %}
{% if GLOBALS.pipeline != 'REDIS' %}
{# When global pipeline is not REDIS remove redis.sh script. KAFKA metrics are collected via jolokia agent. Config in telegraf.conf #}
{% if GLOBALS.role in ['so-standalone', 'so-manager', 'so-managersearch', 'so-receiver', 'so-heavynode'] %}
{% do TELEGRAFMERGED.scripts[GLOBALS.role.split('-')[1]].remove('redis.sh') %}
{% endif %}
{% endif %}
+4
View File
@@ -107,6 +107,7 @@ base:
- utility
- elasticfleet
- stig
- kafka
'*_standalone and G@saltversion:{{saltversion}}':
- match: compound
@@ -141,6 +142,7 @@ base:
- utility
- elasticfleet
- stig
- kafka
'*_searchnode and G@saltversion:{{saltversion}}':
- match: compound
@@ -238,6 +240,8 @@ base:
- logstash
- redis
- elasticfleet.install_agent_grid
- kafka
- stig
'*_idh and G@saltversion:{{saltversion}}':
- match: compound
+1 -1
View File
@@ -23,7 +23,7 @@
'manager_ip': INIT.PILLAR.global.managerip,
'md_engine': INIT.PILLAR.global.mdengine,
'pcap_engine': GLOBALMERGED.pcapengine,
'pipeline': INIT.PILLAR.global.pipeline,
'pipeline': GLOBALMERGED.pipeline,
'so_version': INIT.PILLAR.global.soversion,
'so_docker_gateway': DOCKER.gateway,
'so_docker_range': DOCKER.range,
+14 -2
View File
@@ -788,6 +788,7 @@ create_manager_pillars() {
patch_pillar
nginx_pillar
kibana_pillar
kafka_pillar
}
create_repo() {
@@ -1176,6 +1177,18 @@ kibana_pillar() {
logCmd "touch $kibana_pillar_file"
}
kafka_pillar() {
KAFKACLUSTERID=$(get_random_value 22)
KAFKAPASS=$(get_random_value)
logCmd "mkdir -p $local_salt_dir/pillar/kafka"
logCmd "touch $adv_kafka_pillar_file"
logCmd "touch $kafka_pillar_file"
printf '%s\n'\
"kafka:"\
" cluster_id: $KAFKACLUSTERID"\
" password: $KAFKAPASS" > $kafka_pillar_file
}
logrotate_pillar() {
logCmd "mkdir -p $local_salt_dir/pillar/logrotate"
logCmd "touch $adv_logrotate_pillar_file"
@@ -1332,7 +1345,6 @@ create_global() {
# Continue adding other details
echo " imagerepo: '$IMAGEREPO'" >> $global_pillar_file
echo " pipeline: 'redis'" >> $global_pillar_file
echo " repo_host: '$HOSTNAME'" >> $global_pillar_file
echo " influxdb_host: '$HOSTNAME'" >> $global_pillar_file
echo " registry_host: '$HOSTNAME'" >> $global_pillar_file
@@ -1402,7 +1414,7 @@ make_some_dirs() {
mkdir -p $local_salt_dir/salt/firewall/portgroups
mkdir -p $local_salt_dir/salt/firewall/ports
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global;do
for THEDIR in bpf pcap elasticsearch ntp firewall redis backup influxdb strelka sensoroni soc docker zeek suricata nginx telegraf logstash soc manager kratos idstools idh elastalert stig global kafka;do
mkdir -p $local_salt_dir/pillar/$THEDIR
touch $local_salt_dir/pillar/$THEDIR/adv_$THEDIR.sls
touch $local_salt_dir/pillar/$THEDIR/soc_$THEDIR.sls
+6
View File
@@ -178,6 +178,12 @@ export redis_pillar_file
adv_redis_pillar_file="$local_salt_dir/pillar/redis/adv_redis.sls"
export adv_redis_pillar_file
kafka_pillar_file="$local_salt_dir/pillar/kafka/soc_kafka.sls"
export kafka_pillar_file
adv_kafka_pillar_file="$local_salt_dir/pillar/kafka/adv_kafka.sls"
export kafka_pillar_file
idh_pillar_file="$local_salt_dir/pillar/idh/soc_idh.sls"
export idh_pillar_file
Binary file not shown.