mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-05-09 12:52:38 +02:00
Compare commits
208 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
| 82e55ae87f | |||
| 3e02001544 | |||
| 8ca59e6f0c | |||
| 82dac82d15 | |||
| 288a823edf | |||
| f9e3d30a71 | |||
| 9cec79b299 | |||
| c86399327b | |||
| fa8162de02 | |||
| 33abc429d1 | |||
| b22585ca90 | |||
| 9f2ca7012f | |||
| 21aeb68188 | |||
| 81e60ec5bf | |||
| 199c2746f1 | |||
| 8eca465ef6 | |||
| a45e59239f | |||
| 2ad0bcab7c | |||
| 070d150420 | |||
| 90ecbe90d8 | |||
| 813fa03dc3 | |||
| 02381fbbe9 | |||
| 0722b681b1 | |||
| 564815e836 | |||
| 88b30adf7f | |||
| b6acf3b522 | |||
| ba55468da8 | |||
| cdd217283d | |||
| 810a582717 | |||
| a6948e8dcb | |||
| 5f35554fdc | |||
| 0ecc7ae594 | |||
| fdfca469cc | |||
| 5f2ec76ba8 | |||
| b015c8ff14 | |||
| 7e70870a9e | |||
| eadad6c163 | |||
| 22b32a16dd | |||
| 22f869734e | |||
| 398bc9e4ed | |||
| 72dbb69a1c | |||
| 339959d1c0 | |||
| d5c0ec4404 | |||
| e616b4c120 | |||
| f240a99e22 | |||
| 614f32c5e0 | |||
| cd6707a566 | |||
| edd207a9d5 | |||
| 724d76965f | |||
| dbf4fb66a4 | |||
| 5f28e9b191 | |||
| 01bd3b6e06 | |||
| 1abfd77351 | |||
| 06a555fafb | |||
| 81c0f2b464 | |||
| d5dc28e526 | |||
| 7411031e11 | |||
| 247091766c | |||
| 7f93110d68 | |||
| 05f6503d61 | |||
| a149ea7e8f | |||
| bb71e44614 | |||
| 84197fb33b | |||
| 89a6e7c0dd | |||
| a902f667ba | |||
| f72c30abd0 | |||
| 37e9257698 | |||
| 72105f1f2f | |||
| ee89b78751 | |||
| 33ef138866 | |||
| 71da27dc8e | |||
| 80bf07ffd8 | |||
| b69e50542a | |||
| 3ecd19d085 | |||
| b6a3d1889c | |||
| 1cb34b089c | |||
| 1537ba5031 | |||
| 8225d41661 | |||
| ee437265fc | |||
| 3f46caaf02 | |||
| f3181b204a | |||
| dd39db4584 | |||
| 759880a800 | |||
| f5cd90d139 | |||
| 31383bd9d0 | |||
| ebb93b4fa7 | |||
| 21076af01e | |||
| f11e9da83a | |||
| 0fddcd8fe7 | |||
| 927eba566c | |||
| af9330a9dd | |||
| b3fbd5c7a4 | |||
| 5228668be0 | |||
| 7d07f3c8fe | |||
| d9a9029ce5 | |||
| 9fe53d9ccc | |||
| f7b80f5931 | |||
| f11d315fea | |||
| 2013bf9e30 | |||
| a2ffb92b8d | |||
| 8b6d11b118 | |||
| ba00ae8a7b | |||
| 470b3bd4da | |||
| c124186989 | |||
| d24808ff98 | |||
| 7d22f7bd58 | |||
| 88582c94e8 | |||
| cefbe01333 | |||
| 76a6997de2 | |||
| 16a4a42faf | |||
| 0e4623c728 | |||
| d598e20fbb | |||
| 8b0d4b2195 | |||
| cf414423b1 | |||
| 0405a66c72 | |||
| da7c2995b0 | |||
| 696a1a729c | |||
| 5fa7006f11 | |||
| 5634aed679 | |||
| a232cd89cc | |||
| dd40e44530 | |||
| 47d226e189 | |||
| 440537140b | |||
| 29e13b2c0b | |||
| 2006a07637 | |||
| abcad9fde0 | |||
| a43947cca5 | |||
| f51de6569f | |||
| b0584a4dc5 | |||
| 08f34d408f | |||
| 6298397534 | |||
| 9ccd0acb4f | |||
| 1ffdcab3be | |||
| da1045e052 | |||
| 55be1f1119 | |||
| 9272afa9e5 | |||
| 378d1ec81b | |||
| c1b1452bd9 | |||
| cdbacdcd7e | |||
| 6b8a6267da | |||
| 89e49d0bf3 | |||
| 2dfa83dd7d | |||
| f0b67a415a | |||
| b87af8ea3d | |||
| 46e38d39bb | |||
| 81afbd32d4 | |||
| e9c4f40735 | |||
| 61bdfb1a4b | |||
| 9ec4a26f97 | |||
| 358a2e6d3f | |||
| 762e73faf5 | |||
| ef3cfc8722 | |||
| 28d31f4840 | |||
| 2166bb749a | |||
| 868cd11874 | |||
| 7356f3affd | |||
| dd56e7f1ac | |||
| 075b592471 | |||
| 51a3c04c3d | |||
| 1a8aae3039 | |||
| 8101bc4941 | |||
| 88de246ce3 | |||
| 3643b57167 | |||
| 5b3ca98b80 | |||
| 51e0ca2602 | |||
| 664f3fd18a | |||
| 76f4ccf8c8 | |||
| 2a37ad82b2 | |||
| 80540da52f | |||
| e4ba3d6a2a | |||
| 3dec6986b6 | |||
| bbfb58ea4e | |||
| c91deb97b1 | |||
| dc2598d5cf | |||
| ff45e5ebc6 | |||
| 1e2b51eae6 | |||
| 58d332ea94 | |||
| dcc67b9b8f | |||
| cd886dd0f9 | |||
| 37a6e28a6c | |||
| 434a2e7866 | |||
| 79707db6ee | |||
| 0707507412 | |||
| c7e865aa1c | |||
| a89db79854 | |||
| 812f65eee8 | |||
| cfa530ba9c | |||
| 922c008b11 | |||
| ea30749512 | |||
| 0a55592d7e | |||
| 115ca2c41d | |||
| 9e53bd3f2d | |||
| d4f1078f84 | |||
| 1f9bf45b66 | |||
| 271de757e7 | |||
| d4ac352b5a | |||
| afcef1d0e7 | |||
| 91b164b728 | |||
| 6a4501241d | |||
| c6978f9037 | |||
| 7300513636 | |||
| fb7b73c601 | |||
| f2b6d59c65 | |||
| 67162357a3 | |||
| 8ea97e4af3 | |||
| 2f9a2e15b3 | |||
| a4fcf4ddf2 | |||
| cd0d88e2c0 |
@@ -1,546 +0,0 @@
|
|||||||
title = "gitleaks config"
|
|
||||||
|
|
||||||
# Gitleaks rules are defined by regular expressions and entropy ranges.
|
|
||||||
# Some secrets have unique signatures which make detecting those secrets easy.
|
|
||||||
# Examples of those secrets would be GitLab Personal Access Tokens, AWS keys, and GitHub Access Tokens.
|
|
||||||
# All these examples have defined prefixes like `glpat`, `AKIA`, `ghp_`, etc.
|
|
||||||
#
|
|
||||||
# Other secrets might just be a hash which means we need to write more complex rules to verify
|
|
||||||
# that what we are matching is a secret.
|
|
||||||
#
|
|
||||||
# Here is an example of a semi-generic secret
|
|
||||||
#
|
|
||||||
# discord_client_secret = "8dyfuiRyq=vVc3RRr_edRk-fK__JItpZ"
|
|
||||||
#
|
|
||||||
# We can write a regular expression to capture the variable name (identifier),
|
|
||||||
# the assignment symbol (like '=' or ':='), and finally the actual secret.
|
|
||||||
# The structure of a rule to match this example secret is below:
|
|
||||||
#
|
|
||||||
# Beginning string
|
|
||||||
# quotation
|
|
||||||
# │ End string quotation
|
|
||||||
# │ │
|
|
||||||
# ▼ ▼
|
|
||||||
# (?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_\-]{32})['\"]
|
|
||||||
#
|
|
||||||
# ▲ ▲ ▲
|
|
||||||
# │ │ │
|
|
||||||
# │ │ │
|
|
||||||
# identifier assignment symbol
|
|
||||||
# Secret
|
|
||||||
#
|
|
||||||
[[rules]]
|
|
||||||
id = "gitlab-pat"
|
|
||||||
description = "GitLab Personal Access Token"
|
|
||||||
regex = '''glpat-[0-9a-zA-Z\-\_]{20}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "aws-access-token"
|
|
||||||
description = "AWS"
|
|
||||||
regex = '''(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}'''
|
|
||||||
|
|
||||||
# Cryptographic keys
|
|
||||||
[[rules]]
|
|
||||||
id = "PKCS8-PK"
|
|
||||||
description = "PKCS8 private key"
|
|
||||||
regex = '''-----BEGIN PRIVATE KEY-----'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "RSA-PK"
|
|
||||||
description = "RSA private key"
|
|
||||||
regex = '''-----BEGIN RSA PRIVATE KEY-----'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "OPENSSH-PK"
|
|
||||||
description = "SSH private key"
|
|
||||||
regex = '''-----BEGIN OPENSSH PRIVATE KEY-----'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "PGP-PK"
|
|
||||||
description = "PGP private key"
|
|
||||||
regex = '''-----BEGIN PGP PRIVATE KEY BLOCK-----'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "github-pat"
|
|
||||||
description = "GitHub Personal Access Token"
|
|
||||||
regex = '''ghp_[0-9a-zA-Z]{36}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "github-oauth"
|
|
||||||
description = "GitHub OAuth Access Token"
|
|
||||||
regex = '''gho_[0-9a-zA-Z]{36}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "SSH-DSA-PK"
|
|
||||||
description = "SSH (DSA) private key"
|
|
||||||
regex = '''-----BEGIN DSA PRIVATE KEY-----'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "SSH-EC-PK"
|
|
||||||
description = "SSH (EC) private key"
|
|
||||||
regex = '''-----BEGIN EC PRIVATE KEY-----'''
|
|
||||||
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "github-app-token"
|
|
||||||
description = "GitHub App Token"
|
|
||||||
regex = '''(ghu|ghs)_[0-9a-zA-Z]{36}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "github-refresh-token"
|
|
||||||
description = "GitHub Refresh Token"
|
|
||||||
regex = '''ghr_[0-9a-zA-Z]{76}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "shopify-shared-secret"
|
|
||||||
description = "Shopify shared secret"
|
|
||||||
regex = '''shpss_[a-fA-F0-9]{32}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "shopify-access-token"
|
|
||||||
description = "Shopify access token"
|
|
||||||
regex = '''shpat_[a-fA-F0-9]{32}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "shopify-custom-access-token"
|
|
||||||
description = "Shopify custom app access token"
|
|
||||||
regex = '''shpca_[a-fA-F0-9]{32}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "shopify-private-app-access-token"
|
|
||||||
description = "Shopify private app access token"
|
|
||||||
regex = '''shppa_[a-fA-F0-9]{32}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "slack-access-token"
|
|
||||||
description = "Slack token"
|
|
||||||
regex = '''xox[baprs]-([0-9a-zA-Z]{10,48})?'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "stripe-access-token"
|
|
||||||
description = "Stripe"
|
|
||||||
regex = '''(?i)(sk|pk)_(test|live)_[0-9a-z]{10,32}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "pypi-upload-token"
|
|
||||||
description = "PyPI upload token"
|
|
||||||
regex = '''pypi-AgEIcHlwaS5vcmc[A-Za-z0-9\-_]{50,1000}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "gcp-service-account"
|
|
||||||
description = "Google (GCP) Service-account"
|
|
||||||
regex = '''\"type\": \"service_account\"'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "heroku-api-key"
|
|
||||||
description = "Heroku API Key"
|
|
||||||
regex = ''' (?i)(heroku[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "slack-web-hook"
|
|
||||||
description = "Slack Webhook"
|
|
||||||
regex = '''https://hooks.slack.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8,12}/[a-zA-Z0-9_]{24}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "twilio-api-key"
|
|
||||||
description = "Twilio API Key"
|
|
||||||
regex = '''SK[0-9a-fA-F]{32}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "age-secret-key"
|
|
||||||
description = "Age secret key"
|
|
||||||
regex = '''AGE-SECRET-KEY-1[QPZRY9X8GF2TVDW0S3JN54KHCE6MUA7L]{58}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "facebook-token"
|
|
||||||
description = "Facebook token"
|
|
||||||
regex = '''(?i)(facebook[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "twitter-token"
|
|
||||||
description = "Twitter token"
|
|
||||||
regex = '''(?i)(twitter[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{35,44})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "adobe-client-id"
|
|
||||||
description = "Adobe Client ID (Oauth Web)"
|
|
||||||
regex = '''(?i)(adobe[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "adobe-client-secret"
|
|
||||||
description = "Adobe Client Secret"
|
|
||||||
regex = '''(p8e-)(?i)[a-z0-9]{32}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "alibaba-access-key-id"
|
|
||||||
description = "Alibaba AccessKey ID"
|
|
||||||
regex = '''(LTAI)(?i)[a-z0-9]{20}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "alibaba-secret-key"
|
|
||||||
description = "Alibaba Secret Key"
|
|
||||||
regex = '''(?i)(alibaba[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{30})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "asana-client-id"
|
|
||||||
description = "Asana Client ID"
|
|
||||||
regex = '''(?i)(asana[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9]{16})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "asana-client-secret"
|
|
||||||
description = "Asana Client Secret"
|
|
||||||
regex = '''(?i)(asana[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{32})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "atlassian-api-token"
|
|
||||||
description = "Atlassian API token"
|
|
||||||
regex = '''(?i)(atlassian[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{24})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "bitbucket-client-id"
|
|
||||||
description = "Bitbucket client ID"
|
|
||||||
regex = '''(?i)(bitbucket[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{32})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "bitbucket-client-secret"
|
|
||||||
description = "Bitbucket client secret"
|
|
||||||
regex = '''(?i)(bitbucket[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9_\-]{64})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "beamer-api-token"
|
|
||||||
description = "Beamer API token"
|
|
||||||
regex = '''(?i)(beamer[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](b_[a-z0-9=_\-]{44})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "clojars-api-token"
|
|
||||||
description = "Clojars API token"
|
|
||||||
regex = '''(CLOJARS_)(?i)[a-z0-9]{60}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "contentful-delivery-api-token"
|
|
||||||
description = "Contentful delivery API token"
|
|
||||||
regex = '''(?i)(contentful[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9\-=_]{43})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "databricks-api-token"
|
|
||||||
description = "Databricks API token"
|
|
||||||
regex = '''dapi[a-h0-9]{32}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "discord-api-token"
|
|
||||||
description = "Discord API key"
|
|
||||||
regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{64})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "discord-client-id"
|
|
||||||
description = "Discord client ID"
|
|
||||||
regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9]{18})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "discord-client-secret"
|
|
||||||
description = "Discord client secret"
|
|
||||||
regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_\-]{32})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "doppler-api-token"
|
|
||||||
description = "Doppler API token"
|
|
||||||
regex = '''['\"](dp\.pt\.)(?i)[a-z0-9]{43}['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "dropbox-api-secret"
|
|
||||||
description = "Dropbox API secret/key"
|
|
||||||
regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{15})['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "dropbox--api-key"
|
|
||||||
description = "Dropbox API secret/key"
|
|
||||||
regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{15})['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "dropbox-short-lived-api-token"
|
|
||||||
description = "Dropbox short lived API token"
|
|
||||||
regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](sl\.[a-z0-9\-=_]{135})['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "dropbox-long-lived-api-token"
|
|
||||||
description = "Dropbox long lived API token"
|
|
||||||
regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"][a-z0-9]{11}(AAAAAAAAAA)[a-z0-9\-_=]{43}['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "duffel-api-token"
|
|
||||||
description = "Duffel API token"
|
|
||||||
regex = '''['\"]duffel_(test|live)_(?i)[a-z0-9_-]{43}['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "dynatrace-api-token"
|
|
||||||
description = "Dynatrace API token"
|
|
||||||
regex = '''['\"]dt0c01\.(?i)[a-z0-9]{24}\.[a-z0-9]{64}['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "easypost-api-token"
|
|
||||||
description = "EasyPost API token"
|
|
||||||
regex = '''['\"]EZAK(?i)[a-z0-9]{54}['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "easypost-test-api-token"
|
|
||||||
description = "EasyPost test API token"
|
|
||||||
regex = '''['\"]EZTK(?i)[a-z0-9]{54}['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "fastly-api-token"
|
|
||||||
description = "Fastly API token"
|
|
||||||
regex = '''(?i)(fastly[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9\-=_]{32})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "finicity-client-secret"
|
|
||||||
description = "Finicity client secret"
|
|
||||||
regex = '''(?i)(finicity[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{20})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "finicity-api-token"
|
|
||||||
description = "Finicity API token"
|
|
||||||
regex = '''(?i)(finicity[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "flutterwave-public-key"
|
|
||||||
description = "Flutterwave public key"
|
|
||||||
regex = '''FLWPUBK_TEST-(?i)[a-h0-9]{32}-X'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "flutterwave-secret-key"
|
|
||||||
description = "Flutterwave secret key"
|
|
||||||
regex = '''FLWSECK_TEST-(?i)[a-h0-9]{32}-X'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "flutterwave-enc-key"
|
|
||||||
description = "Flutterwave encrypted key"
|
|
||||||
regex = '''FLWSECK_TEST[a-h0-9]{12}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "frameio-api-token"
|
|
||||||
description = "Frame.io API token"
|
|
||||||
regex = '''fio-u-(?i)[a-z0-9\-_=]{64}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "gocardless-api-token"
|
|
||||||
description = "GoCardless API token"
|
|
||||||
regex = '''['\"]live_(?i)[a-z0-9\-_=]{40}['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "grafana-api-token"
|
|
||||||
description = "Grafana API token"
|
|
||||||
regex = '''['\"]eyJrIjoi(?i)[a-z0-9\-_=]{72,92}['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "hashicorp-tf-api-token"
|
|
||||||
description = "HashiCorp Terraform user/org API token"
|
|
||||||
regex = '''['\"](?i)[a-z0-9]{14}\.atlasv1\.[a-z0-9\-_=]{60,70}['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "hubspot-api-token"
|
|
||||||
description = "HubSpot API token"
|
|
||||||
regex = '''(?i)(hubspot[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "intercom-api-token"
|
|
||||||
description = "Intercom API token"
|
|
||||||
regex = '''(?i)(intercom[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_]{60})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "intercom-client-secret"
|
|
||||||
description = "Intercom client secret/ID"
|
|
||||||
regex = '''(?i)(intercom[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "ionic-api-token"
|
|
||||||
description = "Ionic API token"
|
|
||||||
regex = '''(?i)(ionic[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](ion_[a-z0-9]{42})['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "linear-api-token"
|
|
||||||
description = "Linear API token"
|
|
||||||
regex = '''lin_api_(?i)[a-z0-9]{40}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "linear-client-secret"
|
|
||||||
description = "Linear client secret/ID"
|
|
||||||
regex = '''(?i)(linear[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "lob-api-key"
|
|
||||||
description = "Lob API Key"
|
|
||||||
regex = '''(?i)(lob[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]((live|test)_[a-f0-9]{35})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "lob-pub-api-key"
|
|
||||||
description = "Lob Publishable API Key"
|
|
||||||
regex = '''(?i)(lob[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]((test|live)_pub_[a-f0-9]{31})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "mailchimp-api-key"
|
|
||||||
description = "Mailchimp API key"
|
|
||||||
regex = '''(?i)(mailchimp[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32}-us20)['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "mailgun-private-api-token"
|
|
||||||
description = "Mailgun private API token"
|
|
||||||
regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](key-[a-f0-9]{32})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "mailgun-pub-key"
|
|
||||||
description = "Mailgun public validation key"
|
|
||||||
regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](pubkey-[a-f0-9]{32})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "mailgun-signing-key"
|
|
||||||
description = "Mailgun webhook signing key"
|
|
||||||
regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{32}-[a-h0-9]{8}-[a-h0-9]{8})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "mapbox-api-token"
|
|
||||||
description = "Mapbox API token"
|
|
||||||
regex = '''(?i)(pk\.[a-z0-9]{60}\.[a-z0-9]{22})'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "messagebird-api-token"
|
|
||||||
description = "MessageBird API token"
|
|
||||||
regex = '''(?i)(messagebird[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{25})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "messagebird-client-id"
|
|
||||||
description = "MessageBird API client ID"
|
|
||||||
regex = '''(?i)(messagebird[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "new-relic-user-api-key"
|
|
||||||
description = "New Relic user API Key"
|
|
||||||
regex = '''['\"](NRAK-[A-Z0-9]{27})['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "new-relic-user-api-id"
|
|
||||||
description = "New Relic user API ID"
|
|
||||||
regex = '''(?i)(newrelic[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([A-Z0-9]{64})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "new-relic-browser-api-token"
|
|
||||||
description = "New Relic ingest browser API token"
|
|
||||||
regex = '''['\"](NRJS-[a-f0-9]{19})['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "npm-access-token"
|
|
||||||
description = "npm access token"
|
|
||||||
regex = '''['\"](npm_(?i)[a-z0-9]{36})['\"]'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "planetscale-password"
|
|
||||||
description = "PlanetScale password"
|
|
||||||
regex = '''pscale_pw_(?i)[a-z0-9\-_\.]{43}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "planetscale-api-token"
|
|
||||||
description = "PlanetScale API token"
|
|
||||||
regex = '''pscale_tkn_(?i)[a-z0-9\-_\.]{43}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "postman-api-token"
|
|
||||||
description = "Postman API token"
|
|
||||||
regex = '''PMAK-(?i)[a-f0-9]{24}\-[a-f0-9]{34}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "pulumi-api-token"
|
|
||||||
description = "Pulumi API token"
|
|
||||||
regex = '''pul-[a-f0-9]{40}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "rubygems-api-token"
|
|
||||||
description = "Rubygem API token"
|
|
||||||
regex = '''rubygems_[a-f0-9]{48}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "sendgrid-api-token"
|
|
||||||
description = "SendGrid API token"
|
|
||||||
regex = '''SG\.(?i)[a-z0-9_\-\.]{66}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "sendinblue-api-token"
|
|
||||||
description = "Sendinblue API token"
|
|
||||||
regex = '''xkeysib-[a-f0-9]{64}\-(?i)[a-z0-9]{16}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "shippo-api-token"
|
|
||||||
description = "Shippo API token"
|
|
||||||
regex = '''shippo_(live|test)_[a-f0-9]{40}'''
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "linkedin-client-secret"
|
|
||||||
description = "LinkedIn Client secret"
|
|
||||||
regex = '''(?i)(linkedin[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z]{16})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "linkedin-client-id"
|
|
||||||
description = "LinkedIn Client ID"
|
|
||||||
regex = '''(?i)(linkedin[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{14})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "twitch-api-token"
|
|
||||||
description = "Twitch API token"
|
|
||||||
regex = '''(?i)(twitch[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{30})['\"]'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "typeform-api-token"
|
|
||||||
description = "Typeform API token"
|
|
||||||
regex = '''(?i)(typeform[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}(tfp_[a-z0-9\-_\.=]{59})'''
|
|
||||||
secretGroup = 3
|
|
||||||
|
|
||||||
[[rules]]
|
|
||||||
id = "generic-api-key"
|
|
||||||
description = "Generic API Key"
|
|
||||||
regex = '''(?i)((key|api[^Version]|token|secret|password)[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9a-zA-Z\-_=]{8,64})['\"]'''
|
|
||||||
entropy = 3.7
|
|
||||||
secretGroup = 4
|
|
||||||
|
|
||||||
|
|
||||||
[allowlist]
|
|
||||||
description = "global allow lists"
|
|
||||||
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''', '''ssl_.*password''', '''integration_key\s=\s"so-logs-"''']
|
|
||||||
paths = [
|
|
||||||
'''gitleaks.toml''',
|
|
||||||
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
|
||||||
'''(go.mod|go.sum)$''',
|
|
||||||
'''salt/nginx/files/enterprise-attack.json''',
|
|
||||||
'''(.*?)whl$'''
|
|
||||||
]
|
|
||||||
@@ -10,6 +10,7 @@ body:
|
|||||||
options:
|
options:
|
||||||
-
|
-
|
||||||
- 3.0.0
|
- 3.0.0
|
||||||
|
- 3.1.0
|
||||||
- Other (please provide detail below)
|
- Other (please provide detail below)
|
||||||
validations:
|
validations:
|
||||||
required: true
|
required: true
|
||||||
|
|||||||
@@ -0,0 +1,22 @@
|
|||||||
|
## Description
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Explain the purpose of the pull request. Be brief or detailed depending on the scope of the changes.
|
||||||
|
-->
|
||||||
|
|
||||||
|
## Related Issues
|
||||||
|
|
||||||
|
<!--
|
||||||
|
Optionally, list any related issues that this pull request addresses.
|
||||||
|
-->
|
||||||
|
|
||||||
|
## Checklist
|
||||||
|
|
||||||
|
- [ ] I have read and followed the [CONTRIBUTING.md](https://github.com/Security-Onion-Solutions/securityonion/blob/3/main/CONTRIBUTING.md) file.
|
||||||
|
- [ ] I have read and agree to the terms of the [Contributor License Agreement](https://securityonionsolutions.com/cla)
|
||||||
|
|
||||||
|
## Questions or Comments
|
||||||
|
|
||||||
|
<!--
|
||||||
|
If you have any questions or comments about this pull request, add them here.
|
||||||
|
-->
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
name: contrib
|
|
||||||
on:
|
|
||||||
issue_comment:
|
|
||||||
types: [created]
|
|
||||||
pull_request_target:
|
|
||||||
types: [opened,closed,synchronize]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
CLAssistant:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: "Contributor Check"
|
|
||||||
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
|
|
||||||
uses: cla-assistant/github-action@v2.3.1
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
PERSONAL_ACCESS_TOKEN : ${{ secrets.PERSONAL_ACCESS_TOKEN }}
|
|
||||||
with:
|
|
||||||
path-to-signatures: 'signatures_v1.json'
|
|
||||||
path-to-document: 'https://securityonionsolutions.com/cla'
|
|
||||||
allowlist: dependabot[bot],jertel,dougburks,TOoSmOotH,defensivedepth,m0duspwnens
|
|
||||||
remote-organization-name: Security-Onion-Solutions
|
|
||||||
remote-repository-name: licensing
|
|
||||||
|
|
||||||
@@ -1,17 +0,0 @@
|
|||||||
name: leak-test
|
|
||||||
|
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: '0'
|
|
||||||
|
|
||||||
- name: Gitleaks
|
|
||||||
uses: gitleaks/gitleaks-action@v1.6.0
|
|
||||||
with:
|
|
||||||
config-path: .github/.gitleaks.toml
|
|
||||||
+1
-1
@@ -23,7 +23,7 @@
|
|||||||
|
|
||||||
* Link the PR to the related issue, either using [keywords](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) in the PR description, or [manually](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-issues/linking-a-pull-request-to-an-issue#manually-linking-a-pull-request-to-an-issue).
|
* Link the PR to the related issue, either using [keywords](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) in the PR description, or [manually](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-issues/linking-a-pull-request-to-an-issue#manually-linking-a-pull-request-to-an-issue).
|
||||||
|
|
||||||
* **Pull requests should be opened against the `dev` branch of this repo**, and should clearly describe the problem and solution.
|
* **Pull requests should be opened against the current `?/dev` branch of this repo**, and should clearly describe the problem and solution.
|
||||||
|
|
||||||
* Be sure you have tested your changes and are confident they will not break other parts of the product.
|
* Be sure you have tested your changes and are confident they will not break other parts of the product.
|
||||||
|
|
||||||
|
|||||||
+13
-13
@@ -1,46 +1,46 @@
|
|||||||
### 2.4.210-20260302 ISO image released on 2026/03/02
|
### 3.0.0-20260331 ISO image released on 2026/03/31
|
||||||
|
|
||||||
|
|
||||||
### Download and Verify
|
### Download and Verify
|
||||||
|
|
||||||
2.4.210-20260302 ISO image:
|
3.0.0-20260331 ISO image:
|
||||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.210-20260302.iso
|
https://download.securityonion.net/file/securityonion/securityonion-3.0.0-20260331.iso
|
||||||
|
|
||||||
MD5: 575F316981891EBED2EE4E1F42A1F016
|
MD5: ECD318A1662A6FDE0EF213F5A9BD4B07
|
||||||
SHA1: 600945E8823221CBC5F1C056084A71355308227E
|
SHA1: E55BE314440CCF3392DC0B06BC5E270B43176D9C
|
||||||
SHA256: A6AA6471125F07FA6E2796430E94BEAFDEF728E833E9728FDFA7106351EBC47E
|
SHA256: 7FC47405E335CBE5C2B6C51FE7AC60248F35CBE504907B8B5A33822B23F8F4D5
|
||||||
|
|
||||||
Signature for ISO image:
|
Signature for ISO image:
|
||||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.210-20260302.iso.sig
|
https://github.com/Security-Onion-Solutions/securityonion/raw/3/main/sigs/securityonion-3.0.0-20260331.iso.sig
|
||||||
|
|
||||||
Signing key:
|
Signing key:
|
||||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/3/main/KEYS
|
||||||
|
|
||||||
For example, here are the steps you can use on most Linux distributions to download and verify our Security Onion ISO image.
|
For example, here are the steps you can use on most Linux distributions to download and verify our Security Onion ISO image.
|
||||||
|
|
||||||
Download and import the signing key:
|
Download and import the signing key:
|
||||||
```
|
```
|
||||||
wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS -O - | gpg --import -
|
wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/3/main/KEYS -O - | gpg --import -
|
||||||
```
|
```
|
||||||
|
|
||||||
Download the signature file for the ISO:
|
Download the signature file for the ISO:
|
||||||
```
|
```
|
||||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.210-20260302.iso.sig
|
wget https://github.com/Security-Onion-Solutions/securityonion/raw/3/main/sigs/securityonion-3.0.0-20260331.iso.sig
|
||||||
```
|
```
|
||||||
|
|
||||||
Download the ISO image:
|
Download the ISO image:
|
||||||
```
|
```
|
||||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.210-20260302.iso
|
wget https://download.securityonion.net/file/securityonion/securityonion-3.0.0-20260331.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
Verify the downloaded ISO image using the signature file:
|
Verify the downloaded ISO image using the signature file:
|
||||||
```
|
```
|
||||||
gpg --verify securityonion-2.4.210-20260302.iso.sig securityonion-2.4.210-20260302.iso
|
gpg --verify securityonion-3.0.0-20260331.iso.sig securityonion-3.0.0-20260331.iso
|
||||||
```
|
```
|
||||||
|
|
||||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||||
```
|
```
|
||||||
gpg: Signature made Mon 02 Mar 2026 11:55:24 AM EST using RSA key ID FE507013
|
gpg: Signature made Mon 30 Mar 2026 06:22:14 PM EDT using RSA key ID FE507013
|
||||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||||
gpg: WARNING: This key is not certified with a trusted signature!
|
gpg: WARNING: This key is not certified with a trusted signature!
|
||||||
gpg: There is no indication that the signature belongs to the owner.
|
gpg: There is no indication that the signature belongs to the owner.
|
||||||
|
|||||||
@@ -1,2 +0,0 @@
|
|||||||
elasticsearch:
|
|
||||||
index_settings:
|
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
# Per-minion Telegraf Postgres credentials. so-telegraf-cred on the manager is
|
||||||
|
# the single writer; it mutates /opt/so/saltstack/local/pillar/telegraf/creds.sls
|
||||||
|
# under flock. Pillar_roots order (local before default) means the populated
|
||||||
|
# copy shadows this default on any real grid; this file exists so the pillar
|
||||||
|
# key is always defined on fresh installs and when no minions have creds yet.
|
||||||
|
telegraf:
|
||||||
|
postgres_creds: {}
|
||||||
+21
-3
@@ -17,6 +17,7 @@ base:
|
|||||||
- sensoroni.adv_sensoroni
|
- sensoroni.adv_sensoroni
|
||||||
- telegraf.soc_telegraf
|
- telegraf.soc_telegraf
|
||||||
- telegraf.adv_telegraf
|
- telegraf.adv_telegraf
|
||||||
|
- telegraf.creds
|
||||||
- versionlock.soc_versionlock
|
- versionlock.soc_versionlock
|
||||||
- versionlock.adv_versionlock
|
- versionlock.adv_versionlock
|
||||||
- soc.license
|
- soc.license
|
||||||
@@ -38,6 +39,9 @@ base:
|
|||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||||
- elasticsearch.auth
|
- elasticsearch.auth
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/postgres/auth.sls') %}
|
||||||
|
- postgres.auth
|
||||||
|
{% endif %}
|
||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||||
- kibana.secrets
|
- kibana.secrets
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -60,6 +64,8 @@ base:
|
|||||||
- redis.adv_redis
|
- redis.adv_redis
|
||||||
- influxdb.soc_influxdb
|
- influxdb.soc_influxdb
|
||||||
- influxdb.adv_influxdb
|
- influxdb.adv_influxdb
|
||||||
|
- postgres.soc_postgres
|
||||||
|
- postgres.adv_postgres
|
||||||
- elasticsearch.nodes
|
- elasticsearch.nodes
|
||||||
- elasticsearch.soc_elasticsearch
|
- elasticsearch.soc_elasticsearch
|
||||||
- elasticsearch.adv_elasticsearch
|
- elasticsearch.adv_elasticsearch
|
||||||
@@ -97,10 +103,12 @@ base:
|
|||||||
- node_data.ips
|
- node_data.ips
|
||||||
- secrets
|
- secrets
|
||||||
- healthcheck.eval
|
- healthcheck.eval
|
||||||
- elasticsearch.index_templates
|
|
||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||||
- elasticsearch.auth
|
- elasticsearch.auth
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/postgres/auth.sls') %}
|
||||||
|
- postgres.auth
|
||||||
|
{% endif %}
|
||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||||
- kibana.secrets
|
- kibana.secrets
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -126,6 +134,8 @@ base:
|
|||||||
- redis.adv_redis
|
- redis.adv_redis
|
||||||
- influxdb.soc_influxdb
|
- influxdb.soc_influxdb
|
||||||
- influxdb.adv_influxdb
|
- influxdb.adv_influxdb
|
||||||
|
- postgres.soc_postgres
|
||||||
|
- postgres.adv_postgres
|
||||||
- backup.soc_backup
|
- backup.soc_backup
|
||||||
- backup.adv_backup
|
- backup.adv_backup
|
||||||
- zeek.soc_zeek
|
- zeek.soc_zeek
|
||||||
@@ -142,10 +152,12 @@ base:
|
|||||||
- logstash.nodes
|
- logstash.nodes
|
||||||
- logstash.soc_logstash
|
- logstash.soc_logstash
|
||||||
- logstash.adv_logstash
|
- logstash.adv_logstash
|
||||||
- elasticsearch.index_templates
|
|
||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||||
- elasticsearch.auth
|
- elasticsearch.auth
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/postgres/auth.sls') %}
|
||||||
|
- postgres.auth
|
||||||
|
{% endif %}
|
||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||||
- kibana.secrets
|
- kibana.secrets
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -160,6 +172,8 @@ base:
|
|||||||
- redis.adv_redis
|
- redis.adv_redis
|
||||||
- influxdb.soc_influxdb
|
- influxdb.soc_influxdb
|
||||||
- influxdb.adv_influxdb
|
- influxdb.adv_influxdb
|
||||||
|
- postgres.soc_postgres
|
||||||
|
- postgres.adv_postgres
|
||||||
- elasticsearch.nodes
|
- elasticsearch.nodes
|
||||||
- elasticsearch.soc_elasticsearch
|
- elasticsearch.soc_elasticsearch
|
||||||
- elasticsearch.adv_elasticsearch
|
- elasticsearch.adv_elasticsearch
|
||||||
@@ -256,10 +270,12 @@ base:
|
|||||||
'*_import':
|
'*_import':
|
||||||
- node_data.ips
|
- node_data.ips
|
||||||
- secrets
|
- secrets
|
||||||
- elasticsearch.index_templates
|
|
||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %}
|
||||||
- elasticsearch.auth
|
- elasticsearch.auth
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/postgres/auth.sls') %}
|
||||||
|
- postgres.auth
|
||||||
|
{% endif %}
|
||||||
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
{% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/kibana/secrets.sls') %}
|
||||||
- kibana.secrets
|
- kibana.secrets
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -285,6 +301,8 @@ base:
|
|||||||
- redis.adv_redis
|
- redis.adv_redis
|
||||||
- influxdb.soc_influxdb
|
- influxdb.soc_influxdb
|
||||||
- influxdb.adv_influxdb
|
- influxdb.adv_influxdb
|
||||||
|
- postgres.soc_postgres
|
||||||
|
- postgres.adv_postgres
|
||||||
- zeek.soc_zeek
|
- zeek.soc_zeek
|
||||||
- zeek.adv_zeek
|
- zeek.adv_zeek
|
||||||
- bpf.soc_bpf
|
- bpf.soc_bpf
|
||||||
|
|||||||
@@ -29,10 +29,14 @@
|
|||||||
'manager',
|
'manager',
|
||||||
'nginx',
|
'nginx',
|
||||||
'influxdb',
|
'influxdb',
|
||||||
|
'postgres',
|
||||||
|
'postgres.auth',
|
||||||
'soc',
|
'soc',
|
||||||
'kratos',
|
'kratos',
|
||||||
'hydra',
|
'hydra',
|
||||||
'elasticfleet',
|
'elasticfleet',
|
||||||
|
'elasticfleet.manager',
|
||||||
|
'elasticsearch.cluster',
|
||||||
'elastic-fleet-package-registry',
|
'elastic-fleet-package-registry',
|
||||||
'utility'
|
'utility'
|
||||||
] %}
|
] %}
|
||||||
@@ -77,7 +81,7 @@
|
|||||||
),
|
),
|
||||||
'so-heavynode': (
|
'so-heavynode': (
|
||||||
sensor_states +
|
sensor_states +
|
||||||
['elasticagent', 'elasticsearch', 'logstash', 'redis', 'nginx']
|
['elasticagent', 'elasticsearch', 'elasticsearch.cluster', 'logstash', 'redis', 'nginx']
|
||||||
),
|
),
|
||||||
'so-idh': (
|
'so-idh': (
|
||||||
['idh']
|
['idh']
|
||||||
|
|||||||
@@ -32,3 +32,4 @@ so_config_backup:
|
|||||||
- daymonth: '*'
|
- daymonth: '*'
|
||||||
- month: '*'
|
- month: '*'
|
||||||
- dayweek: '*'
|
- dayweek: '*'
|
||||||
|
|
||||||
|
|||||||
@@ -54,6 +54,20 @@ x509_signing_policies:
|
|||||||
- extendedKeyUsage: serverAuth
|
- extendedKeyUsage: serverAuth
|
||||||
- days_valid: 820
|
- days_valid: 820
|
||||||
- copypath: /etc/pki/issued_certs/
|
- copypath: /etc/pki/issued_certs/
|
||||||
|
postgres:
|
||||||
|
- minions: '*'
|
||||||
|
- signing_private_key: /etc/pki/ca.key
|
||||||
|
- signing_cert: /etc/pki/ca.crt
|
||||||
|
- C: US
|
||||||
|
- ST: Utah
|
||||||
|
- L: Salt Lake City
|
||||||
|
- basicConstraints: "critical CA:false"
|
||||||
|
- keyUsage: "critical keyEncipherment"
|
||||||
|
- subjectKeyIdentifier: hash
|
||||||
|
- authorityKeyIdentifier: keyid,issuer:always
|
||||||
|
- extendedKeyUsage: serverAuth
|
||||||
|
- days_valid: 820
|
||||||
|
- copypath: /etc/pki/issued_certs/
|
||||||
elasticfleet:
|
elasticfleet:
|
||||||
- minions: '*'
|
- minions: '*'
|
||||||
- signing_private_key: /etc/pki/ca.key
|
- signing_private_key: /etc/pki/ca.key
|
||||||
|
|||||||
@@ -31,6 +31,7 @@ container_list() {
|
|||||||
"so-hydra"
|
"so-hydra"
|
||||||
"so-nginx"
|
"so-nginx"
|
||||||
"so-pcaptools"
|
"so-pcaptools"
|
||||||
|
"so-postgres"
|
||||||
"so-soc"
|
"so-soc"
|
||||||
"so-suricata"
|
"so-suricata"
|
||||||
"so-telegraf"
|
"so-telegraf"
|
||||||
@@ -55,6 +56,7 @@ container_list() {
|
|||||||
"so-logstash"
|
"so-logstash"
|
||||||
"so-nginx"
|
"so-nginx"
|
||||||
"so-pcaptools"
|
"so-pcaptools"
|
||||||
|
"so-postgres"
|
||||||
"so-redis"
|
"so-redis"
|
||||||
"so-soc"
|
"so-soc"
|
||||||
"so-strelka-backend"
|
"so-strelka-backend"
|
||||||
@@ -186,8 +188,27 @@ update_docker_containers() {
|
|||||||
if [ -z "$HOSTNAME" ]; then
|
if [ -z "$HOSTNAME" ]; then
|
||||||
HOSTNAME=$(hostname)
|
HOSTNAME=$(hostname)
|
||||||
fi
|
fi
|
||||||
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
|
docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1 || {
|
||||||
docker push $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1
|
echo "Unable to tag $image" >> "$LOG_FILE" 2>&1
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
# Push to the embedded registry via a registry-to-registry copy. Avoids
|
||||||
|
# `docker push`, which on Docker 29.x with the containerd image store
|
||||||
|
# represents freshly-pulled images as an index whose layer content
|
||||||
|
# isn't reachable through the push path. The local `docker tag` above
|
||||||
|
# is preserved so so-image-pull's `:5000` existence check still works.
|
||||||
|
# Pin to the digest already gpg-verified above so we copy exactly the
|
||||||
|
# bytes we approved.
|
||||||
|
local VERIFIED_REF
|
||||||
|
VERIFIED_REF=$(echo "$DOCKERINSPECT" | jq -r ".[0].RepoDigests[] | select(. | contains(\"$CONTAINER_REGISTRY\"))" | head -n 1)
|
||||||
|
if [ -z "$VERIFIED_REF" ] || [ "$VERIFIED_REF" = "null" ]; then
|
||||||
|
echo "Unable to determine verified digest for $image" >> "$LOG_FILE" 2>&1
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
docker buildx imagetools create --tag $HOSTNAME:5000/$IMAGEREPO/$image "$VERIFIED_REF" >> "$LOG_FILE" 2>&1 || {
|
||||||
|
echo "Unable to copy $image to embedded registry" >> "$LOG_FILE" 2>&1
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
fi
|
fi
|
||||||
else
|
else
|
||||||
echo "There is a problem downloading the $image image. Details: " >> "$LOG_FILE" 2>&1
|
echo "There is a problem downloading the $image image. Details: " >> "$LOG_FILE" 2>&1
|
||||||
|
|||||||
@@ -227,7 +227,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
|||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint).*user so_kibana lacks the required permissions \[logs-\1" # Known issue with 3 integrations using kibana_system role vs creating unique api creds with proper permissions.
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint|armis|o365_metrics|microsoft_sentinel|snyk).*user so_kibana lacks the required permissions \[(logs|metrics)-\1" # Known issue with integrations starting transform jobs that are explicitly not allowed to start as a system user. (installed as so_elastic / so_kibana)
|
||||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|manifest unknown" # appears in so-dockerregistry log for so-tcpreplay following docker upgrade to 29.2.1-1
|
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|manifest unknown" # appears in so-dockerregistry log for so-tcpreplay following docker upgrade to 29.2.1-1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|||||||
@@ -9,7 +9,7 @@
|
|||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02")
|
software_raid=("SOSMN" "SOSMN-DE02" "SOSSNNV" "SOSSNNV-DE02" "SOS10k-DE02" "SOS10KNV" "SOS10KNV-DE02" "SOS10KNV-DE02" "SOS2000-DE02" "SOS-GOFAST-LT-DE02" "SOS-GOFAST-MD-DE02" "SOS-GOFAST-HV-DE02" "HVGUEST")
|
||||||
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
|
hardware_raid=("SOS1000" "SOS1000F" "SOSSN7200" "SOS5000" "SOS4000")
|
||||||
|
|
||||||
{%- if salt['grains.get']('sosmodel', '') %}
|
{%- if salt['grains.get']('sosmodel', '') %}
|
||||||
@@ -87,6 +87,11 @@ check_boss_raid() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
check_software_raid() {
|
check_software_raid() {
|
||||||
|
if [[ ! -f /proc/mdstat ]]; then
|
||||||
|
SWRAID=0
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
SWRC=$(grep "_" /proc/mdstat)
|
SWRC=$(grep "_" /proc/mdstat)
|
||||||
if [[ -n $SWRC ]]; then
|
if [[ -n $SWRC ]]; then
|
||||||
# RAID is failed in some way
|
# RAID is failed in some way
|
||||||
@@ -107,7 +112,9 @@ if [[ "$is_hwraid" == "true" ]]; then
|
|||||||
fi
|
fi
|
||||||
if [[ "$is_softwareraid" == "true" ]]; then
|
if [[ "$is_softwareraid" == "true" ]]; then
|
||||||
check_software_raid
|
check_software_raid
|
||||||
|
if [ "$model" != "HVGUEST" ]; then
|
||||||
check_boss_raid
|
check_boss_raid
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
sum=$(($SWRAID + $BOSSRAID + $HWRAID))
|
sum=$(($SWRAID + $BOSSRAID + $HWRAID))
|
||||||
|
|||||||
@@ -237,3 +237,11 @@ docker:
|
|||||||
extra_hosts: []
|
extra_hosts: []
|
||||||
extra_env: []
|
extra_env: []
|
||||||
ulimits: []
|
ulimits: []
|
||||||
|
'so-postgres':
|
||||||
|
final_octet: 47
|
||||||
|
port_bindings:
|
||||||
|
- 0.0.0.0:5432:5432
|
||||||
|
custom_bind_mounts: []
|
||||||
|
extra_hosts: []
|
||||||
|
extra_env: []
|
||||||
|
ulimits: []
|
||||||
|
|||||||
@@ -0,0 +1,123 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||||
|
this file except in compliance with the Elastic License 2.0. #}
|
||||||
|
|
||||||
|
|
||||||
|
{% import_json '/opt/so/state/esfleet_content_package_components.json' as ADDON_CONTENT_PACKAGE_COMPONENTS %}
|
||||||
|
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
|
||||||
|
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||||
|
|
||||||
|
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
|
||||||
|
{% set ADDON_CONTENT_INTEGRATION_DEFAULTS = {} %}
|
||||||
|
{% set DEBUG_STUFF = {} %}
|
||||||
|
|
||||||
|
{% for pkg in ADDON_CONTENT_PACKAGE_COMPONENTS %}
|
||||||
|
{% if pkg.name in CORE_ESFLEET_PACKAGES %}
|
||||||
|
{# skip core content packages #}
|
||||||
|
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
|
||||||
|
{# generate defaults for each content package #}
|
||||||
|
{% if pkg.dataStreams is defined and pkg.dataStreams is not none and pkg.dataStreams | length > 0%}
|
||||||
|
{% for pattern in pkg.dataStreams %}
|
||||||
|
{# in ES 9.3.2 'input' type integrations no longer create default component templates and instead they wait for user input during 'integration' setup (fleet ui config)
|
||||||
|
title: generic is an artifact of that and is not in use #}
|
||||||
|
{% if pattern.title == "generic" %}
|
||||||
|
{% continue %}
|
||||||
|
{% endif %}
|
||||||
|
{% if "metrics-" in pattern.name %}
|
||||||
|
{% set integration_type = "metrics-" %}
|
||||||
|
{% elif "logs-" in pattern.name %}
|
||||||
|
{% set integration_type = "logs-" %}
|
||||||
|
{% else %}
|
||||||
|
{% set integration_type = "" %}
|
||||||
|
{% endif %}
|
||||||
|
{# on content integrations the component name is user defined at the time it is added to an agent policy #}
|
||||||
|
{% set component_name = pattern.title %}
|
||||||
|
{% set index_pattern = pattern.name %}
|
||||||
|
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
||||||
|
{% set component_name_x = component_name.replace(".","_x_") %}
|
||||||
|
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
||||||
|
{% set integration_key = "so-" ~ integration_type ~ pkg.name + '_x_' ~ component_name_x %}
|
||||||
|
{# Default integration settings #}
|
||||||
|
{% set integration_defaults = {
|
||||||
|
"index_sorting": false,
|
||||||
|
"index_template": {
|
||||||
|
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||||
|
"data_stream": {
|
||||||
|
"allow_custom_routing": false,
|
||||||
|
"hidden": false
|
||||||
|
},
|
||||||
|
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
|
||||||
|
"index_patterns": [index_pattern],
|
||||||
|
"priority": 501,
|
||||||
|
"template": {
|
||||||
|
"settings": {
|
||||||
|
"index": {
|
||||||
|
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
|
||||||
|
"number_of_replicas": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"policy": {
|
||||||
|
"phases": {
|
||||||
|
"cold": {
|
||||||
|
"actions": {
|
||||||
|
"allocate":{
|
||||||
|
"number_of_replicas": ""
|
||||||
|
},
|
||||||
|
"set_priority": {"priority": 0}
|
||||||
|
},
|
||||||
|
"min_age": "60d"
|
||||||
|
},
|
||||||
|
"delete": {
|
||||||
|
"actions": {
|
||||||
|
"delete": {}
|
||||||
|
},
|
||||||
|
"min_age": "365d"
|
||||||
|
},
|
||||||
|
"hot": {
|
||||||
|
"actions": {
|
||||||
|
"rollover": {
|
||||||
|
"max_age": "30d",
|
||||||
|
"max_primary_shard_size": "50gb"
|
||||||
|
},
|
||||||
|
"forcemerge":{
|
||||||
|
"max_num_segments": ""
|
||||||
|
},
|
||||||
|
"shrink":{
|
||||||
|
"max_primary_shard_size": "",
|
||||||
|
"method": "COUNT",
|
||||||
|
"number_of_shards": ""
|
||||||
|
},
|
||||||
|
"set_priority": {"priority": 100}
|
||||||
|
},
|
||||||
|
"min_age": "0ms"
|
||||||
|
},
|
||||||
|
"warm": {
|
||||||
|
"actions": {
|
||||||
|
"allocate": {
|
||||||
|
"number_of_replicas": ""
|
||||||
|
},
|
||||||
|
"forcemerge": {
|
||||||
|
"max_num_segments": ""
|
||||||
|
},
|
||||||
|
"shrink":{
|
||||||
|
"max_primary_shard_size": "",
|
||||||
|
"method": "COUNT",
|
||||||
|
"number_of_shards": ""
|
||||||
|
},
|
||||||
|
"set_priority": {"priority": 50}
|
||||||
|
},
|
||||||
|
"min_age": "30d"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} %}
|
||||||
|
|
||||||
|
|
||||||
|
{% do ADDON_CONTENT_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
|
||||||
|
{% endfor %}
|
||||||
|
{% else %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
@@ -1,5 +1,6 @@
|
|||||||
elasticfleet:
|
elasticfleet:
|
||||||
enabled: False
|
enabled: False
|
||||||
|
patch_version: 9.3.3+build202604082258 # Elastic Agent specific patch release.
|
||||||
enable_manager_output: True
|
enable_manager_output: True
|
||||||
config:
|
config:
|
||||||
server:
|
server:
|
||||||
|
|||||||
@@ -17,65 +17,17 @@ include:
|
|||||||
- logstash.ssl
|
- logstash.ssl
|
||||||
- elasticfleet.config
|
- elasticfleet.config
|
||||||
- elasticfleet.sostatus
|
- elasticfleet.sostatus
|
||||||
|
{%- if GLOBALS.role != "so-fleet" %}
|
||||||
|
- elasticfleet.manager
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
{% if grains.role not in ['so-fleet'] %}
|
{% if GLOBALS.role != "so-fleet" %}
|
||||||
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
|
# Wait for Elasticsearch to be ready - no reason to try running Elastic Fleet server if ES is not ready
|
||||||
wait_for_elasticsearch_elasticfleet:
|
wait_for_elasticsearch_elasticfleet:
|
||||||
cmd.run:
|
cmd.run:
|
||||||
- name: so-elasticsearch-wait
|
- name: so-elasticsearch-wait
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# If enabled, automatically update Fleet Logstash Outputs
|
|
||||||
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval', 'so-fleet'] %}
|
|
||||||
so-elastic-fleet-auto-configure-logstash-outputs:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-outputs-update
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
|
|
||||||
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
|
||||||
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
- onchanges:
|
|
||||||
- x509: etc_elasticfleet_logstash_crt
|
|
||||||
- x509: elasticfleet_kafka_crt
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# If enabled, automatically update Fleet Server URLs & ES Connection
|
|
||||||
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-fleet'] %}
|
|
||||||
so-elastic-fleet-auto-configure-server-urls:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-urls-update
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
|
|
||||||
{% if grains.role not in ['so-fleet'] %}
|
|
||||||
so-elastic-fleet-auto-configure-elasticsearch-urls:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-es-url-update
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
|
|
||||||
so-elastic-fleet-auto-configure-artifact-urls:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
|
|
||||||
- retry:
|
|
||||||
attempts: 4
|
|
||||||
interval: 30
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
# Sync Elastic Agent artifacts to Fleet Node
|
# Sync Elastic Agent artifacts to Fleet Node
|
||||||
{% if grains.role in ['so-fleet'] %}
|
|
||||||
elasticagent_syncartifacts:
|
elasticagent_syncartifacts:
|
||||||
file.recurse:
|
file.recurse:
|
||||||
- name: /nsm/elastic-fleet/artifacts/beats
|
- name: /nsm/elastic-fleet/artifacts/beats
|
||||||
@@ -149,57 +101,6 @@ so-elastic-fleet:
|
|||||||
- x509: etc_elasticfleet_crt
|
- x509: etc_elasticfleet_crt
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% if GLOBALS.role != "so-fleet" %}
|
|
||||||
so-elastic-fleet-package-statefile:
|
|
||||||
file.managed:
|
|
||||||
- name: /opt/so/state/elastic_fleet_packages.txt
|
|
||||||
- contents: {{ELASTICFLEETMERGED.packages}}
|
|
||||||
|
|
||||||
so-elastic-fleet-package-upgrade:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-package-upgrade
|
|
||||||
- retry:
|
|
||||||
attempts: 3
|
|
||||||
interval: 10
|
|
||||||
- onchanges:
|
|
||||||
- file: /opt/so/state/elastic_fleet_packages.txt
|
|
||||||
|
|
||||||
so-elastic-fleet-integrations:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
|
|
||||||
- retry:
|
|
||||||
attempts: 3
|
|
||||||
interval: 10
|
|
||||||
|
|
||||||
so-elastic-agent-grid-upgrade:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-agent-grid-upgrade
|
|
||||||
- retry:
|
|
||||||
attempts: 12
|
|
||||||
interval: 5
|
|
||||||
|
|
||||||
so-elastic-fleet-integration-upgrade:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
|
|
||||||
- retry:
|
|
||||||
attempts: 3
|
|
||||||
interval: 10
|
|
||||||
|
|
||||||
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
|
|
||||||
so-elastic-fleet-addon-integrations:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
|
|
||||||
|
|
||||||
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
|
|
||||||
so-elastic-defend-manage-filters-file-watch:
|
|
||||||
cmd.run:
|
|
||||||
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
|
|
||||||
- onchanges:
|
|
||||||
- file: elasticdefendcustom
|
|
||||||
- file: elasticdefenddisabled
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
delete_so-elastic-fleet_so-status.disabled:
|
delete_so-elastic-fleet_so-status.disabled:
|
||||||
file.uncomment:
|
file.uncomment:
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
|||||||
+9
-2
@@ -9,16 +9,22 @@
|
|||||||
"namespace": "so",
|
"namespace": "so",
|
||||||
"description": "Zeek Import logs",
|
"description": "Zeek Import logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/nsm/import/*/zeek/logs/*.log"
|
"/nsm/import/*/zeek/logs/*.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "import",
|
"data_stream.dataset": "import",
|
||||||
"pipeline": "",
|
"pipeline": "",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -34,7 +40,8 @@
|
|||||||
"fingerprint_length": "64",
|
"fingerprint_length": "64",
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -15,19 +15,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "kratos-logs",
|
"name": "kratos-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Kratos logs",
|
"description": "Kratos logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/opt/so/log/kratos/kratos.log"
|
"/opt/so/log/kratos/kratos.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "kratos",
|
"data_stream.dataset": "kratos",
|
||||||
"pipeline": "kratos",
|
"pipeline": "kratos",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -48,10 +54,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,16 +9,22 @@
|
|||||||
"namespace": "so",
|
"namespace": "so",
|
||||||
"description": "Zeek logs",
|
"description": "Zeek logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/nsm/zeek/logs/current/*.log"
|
"/nsm/zeek/logs/current/*.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "zeek",
|
"data_stream.dataset": "zeek",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"],
|
"exclude_files": ["({%- endraw -%}{{ ELASTICFLEETMERGED.logging.zeek.excluded | join('|') }}{%- raw -%})(\\..+)?\\.log$"],
|
||||||
@@ -30,10 +36,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
"package": {
|
"package": {
|
||||||
"name": "endpoint",
|
"name": "endpoint",
|
||||||
"title": "Elastic Defend",
|
"title": "Elastic Defend",
|
||||||
"version": "9.0.2",
|
"version": "9.3.0",
|
||||||
"requires_root": true
|
"requires_root": true
|
||||||
},
|
},
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
|
|||||||
@@ -6,21 +6,23 @@
|
|||||||
"name": "agent-monitor",
|
"name": "agent-monitor",
|
||||||
"namespace": "",
|
"namespace": "",
|
||||||
"description": "",
|
"description": "",
|
||||||
|
"policy_id": "so-grid-nodes_general",
|
||||||
"policy_ids": [
|
"policy_ids": [
|
||||||
"so-grid-nodes_general"
|
"so-grid-nodes_general"
|
||||||
],
|
],
|
||||||
"output_id": null,
|
|
||||||
"vars": {},
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/opt/so/log/agents/agent-monitor.log"
|
"/opt/so/log/agents/agent-monitor.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "agentmonitor",
|
"data_stream.dataset": "agentmonitor",
|
||||||
"pipeline": "elasticagent.monitor",
|
"pipeline": "elasticagent.monitor",
|
||||||
"parsers": "",
|
"parsers": "",
|
||||||
@@ -34,15 +36,16 @@
|
|||||||
"ignore_older": "72h",
|
"ignore_older": "72h",
|
||||||
"clean_inactive": -1,
|
"clean_inactive": -1,
|
||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": true,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": 64,
|
"file_identity_native": true,
|
||||||
"file_identity_native": false,
|
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
}
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
},
|
||||||
|
"force": true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "hydra-logs",
|
"name": "hydra-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Hydra logs",
|
"description": "Hydra logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/opt/so/log/hydra/hydra.log"
|
"/opt/so/log/hydra/hydra.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "hydra",
|
"data_stream.dataset": "hydra",
|
||||||
"pipeline": "hydra",
|
"pipeline": "hydra",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -34,10 +40,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "idh-logs",
|
"name": "idh-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "IDH integration",
|
"description": "IDH integration",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/nsm/idh/opencanary.log"
|
"/nsm/idh/opencanary.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "idh",
|
"data_stream.dataset": "idh",
|
||||||
"pipeline": "common",
|
"pipeline": "common",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -31,10 +37,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,26 +4,32 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "import-evtx-logs",
|
"name": "import-evtx-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Import Windows EVTX logs",
|
"description": "Import Windows EVTX logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/nsm/import/*/evtx/*.json"
|
"/nsm/import/*/evtx/*.json"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "import",
|
"data_stream.dataset": "import",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
"exclude_files": [
|
"exclude_files": [
|
||||||
"\\.gz$"
|
"\\.gz$"
|
||||||
],
|
],
|
||||||
"include_files": [],
|
"include_files": [],
|
||||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.15.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.8.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.15.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.15.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.8.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||||
"tags": [
|
"tags": [
|
||||||
"import"
|
"import"
|
||||||
],
|
],
|
||||||
@@ -33,10 +39,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "import-suricata-logs",
|
"name": "import-suricata-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Import Suricata logs",
|
"description": "Import Suricata logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/nsm/import/*/suricata/eve*.json"
|
"/nsm/import/*/suricata/eve*.json"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "import",
|
"data_stream.dataset": "import",
|
||||||
"pipeline": "suricata.common",
|
"pipeline": "suricata.common",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -32,10 +38,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,14 +4,18 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "rita-logs",
|
"name": "rita-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "RITA Logs",
|
"description": "RITA Logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
@@ -19,6 +23,8 @@
|
|||||||
"/nsm/rita/exploded-dns.csv",
|
"/nsm/rita/exploded-dns.csv",
|
||||||
"/nsm/rita/long-connections.csv"
|
"/nsm/rita/long-connections.csv"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "rita",
|
"data_stream.dataset": "rita",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
"exclude_files": [
|
"exclude_files": [
|
||||||
@@ -33,10 +39,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "so-ip-mappings",
|
"name": "so-ip-mappings",
|
||||||
|
"namespace": "so",
|
||||||
"description": "IP Description mappings",
|
"description": "IP Description mappings",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/nsm/custom-mappings/ip-descriptions.csv"
|
"/nsm/custom-mappings/ip-descriptions.csv"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "hostnamemappings",
|
"data_stream.dataset": "hostnamemappings",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
"exclude_files": [
|
"exclude_files": [
|
||||||
@@ -32,10 +38,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "soc-auth-sync-logs",
|
"name": "soc-auth-sync-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Security Onion - Elastic Auth Sync - Logs",
|
"description": "Security Onion - Elastic Auth Sync - Logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/opt/so/log/soc/sync.log"
|
"/opt/so/log/soc/sync.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "soc",
|
"data_stream.dataset": "soc",
|
||||||
"pipeline": "common",
|
"pipeline": "common",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -31,10 +37,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,20 +4,26 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "soc-detections-logs",
|
"name": "soc-detections-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Security Onion Console - Detections Logs",
|
"description": "Security Onion Console - Detections Logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/opt/so/log/soc/detections_runtime-status_sigma.log",
|
"/opt/so/log/soc/detections_runtime-status_sigma.log",
|
||||||
"/opt/so/log/soc/detections_runtime-status_yara.log"
|
"/opt/so/log/soc/detections_runtime-status_yara.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "soc",
|
"data_stream.dataset": "soc",
|
||||||
"pipeline": "common",
|
"pipeline": "common",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -35,10 +41,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "soc-salt-relay-logs",
|
"name": "soc-salt-relay-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Security Onion - Salt Relay - Logs",
|
"description": "Security Onion - Salt Relay - Logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/opt/so/log/soc/salt-relay.log"
|
"/opt/so/log/soc/salt-relay.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "soc",
|
"data_stream.dataset": "soc",
|
||||||
"pipeline": "common",
|
"pipeline": "common",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -33,10 +39,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "soc-sensoroni-logs",
|
"name": "soc-sensoroni-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Security Onion - Sensoroni - Logs",
|
"description": "Security Onion - Sensoroni - Logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/opt/so/log/sensoroni/sensoroni.log"
|
"/opt/so/log/sensoroni/sensoroni.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "soc",
|
"data_stream.dataset": "soc",
|
||||||
"pipeline": "common",
|
"pipeline": "common",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -31,10 +37,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "soc-server-logs",
|
"name": "soc-server-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Security Onion Console Logs",
|
"description": "Security Onion Console Logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/opt/so/log/soc/sensoroni-server.log"
|
"/opt/so/log/soc/sensoroni-server.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "soc",
|
"data_stream.dataset": "soc",
|
||||||
"pipeline": "common",
|
"pipeline": "common",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -33,10 +39,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "strelka-logs",
|
"name": "strelka-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Strelka Logs",
|
"description": "Strelka Logs",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/nsm/strelka/log/strelka.log"
|
"/nsm/strelka/log/strelka.log"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "strelka",
|
"data_stream.dataset": "strelka",
|
||||||
"pipeline": "strelka.file",
|
"pipeline": "strelka.file",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -31,10 +37,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,19 +4,25 @@
|
|||||||
"version": ""
|
"version": ""
|
||||||
},
|
},
|
||||||
"name": "suricata-logs",
|
"name": "suricata-logs",
|
||||||
|
"namespace": "so",
|
||||||
"description": "Suricata integration",
|
"description": "Suricata integration",
|
||||||
"policy_id": "so-grid-nodes_general",
|
"policy_id": "so-grid-nodes_general",
|
||||||
"namespace": "so",
|
"policy_ids": [
|
||||||
|
"so-grid-nodes_general"
|
||||||
|
],
|
||||||
|
"vars": {},
|
||||||
"inputs": {
|
"inputs": {
|
||||||
"filestream-filestream": {
|
"filestream-filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"streams": {
|
"streams": {
|
||||||
"filestream.generic": {
|
"filestream.filestream": {
|
||||||
"enabled": true,
|
"enabled": true,
|
||||||
"vars": {
|
"vars": {
|
||||||
"paths": [
|
"paths": [
|
||||||
"/nsm/suricata/eve*.json"
|
"/nsm/suricata/eve*.json"
|
||||||
],
|
],
|
||||||
|
"compression_gzip": false,
|
||||||
|
"use_logs_stream": false,
|
||||||
"data_stream.dataset": "suricata",
|
"data_stream.dataset": "suricata",
|
||||||
"pipeline": "suricata.common",
|
"pipeline": "suricata.common",
|
||||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||||
@@ -31,10 +37,10 @@
|
|||||||
"harvester_limit": 0,
|
"harvester_limit": 0,
|
||||||
"fingerprint": false,
|
"fingerprint": false,
|
||||||
"fingerprint_offset": 0,
|
"fingerprint_offset": 0,
|
||||||
"fingerprint_length": "64",
|
|
||||||
"file_identity_native": true,
|
"file_identity_native": true,
|
||||||
"exclude_lines": [],
|
"exclude_lines": [],
|
||||||
"include_lines": []
|
"include_lines": [],
|
||||||
|
"delete_enabled": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,123 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
||||||
|
this file except in compliance with the Elastic License 2.0. #}
|
||||||
|
|
||||||
|
|
||||||
|
{% import_json '/opt/so/state/esfleet_input_package_components.json' as ADDON_INPUT_PACKAGE_COMPONENTS %}
|
||||||
|
{% import_json '/opt/so/state/esfleet_component_templates.json' as INSTALLED_COMPONENT_TEMPLATES %}
|
||||||
|
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||||
|
|
||||||
|
{% set CORE_ESFLEET_PACKAGES = ELASTICFLEETDEFAULTS.get('elasticfleet', {}).get('packages', {}) %}
|
||||||
|
{% set ADDON_INPUT_INTEGRATION_DEFAULTS = {} %}
|
||||||
|
{% set DEBUG_STUFF = {} %}
|
||||||
|
|
||||||
|
{% for pkg in ADDON_INPUT_PACKAGE_COMPONENTS %}
|
||||||
|
{% if pkg.name in CORE_ESFLEET_PACKAGES %}
|
||||||
|
{# skip core input packages #}
|
||||||
|
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
|
||||||
|
{# generate defaults for each input package #}
|
||||||
|
{% if pkg.dataStreams is defined and pkg.dataStreams is not none and pkg.dataStreams | length > 0 %}
|
||||||
|
{% for pattern in pkg.dataStreams %}
|
||||||
|
{# in ES 9.3.2 'input' type integrations no longer create default component templates and instead they wait for user input during 'integration' setup (fleet ui config)
|
||||||
|
title: generic is an artifact of that and is not in use #}
|
||||||
|
{% if pattern.title == "generic" %}
|
||||||
|
{% continue %}
|
||||||
|
{% endif %}
|
||||||
|
{% if "metrics-" in pattern.name %}
|
||||||
|
{% set integration_type = "metrics-" %}
|
||||||
|
{% elif "logs-" in pattern.name %}
|
||||||
|
{% set integration_type = "logs-" %}
|
||||||
|
{% else %}
|
||||||
|
{% set integration_type = "" %}
|
||||||
|
{% endif %}
|
||||||
|
{# on input integrations the component name is user defined at the time it is added to an agent policy #}
|
||||||
|
{% set component_name = pattern.title %}
|
||||||
|
{% set index_pattern = pattern.name %}
|
||||||
|
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
||||||
|
{% set component_name_x = component_name.replace(".","_x_") %}
|
||||||
|
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
||||||
|
{% set integration_key = "so-" ~ integration_type ~ pkg.name + '_x_' ~ component_name_x %}
|
||||||
|
{# Default integration settings #}
|
||||||
|
{% set integration_defaults = {
|
||||||
|
"index_sorting": false,
|
||||||
|
"index_template": {
|
||||||
|
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||||
|
"data_stream": {
|
||||||
|
"allow_custom_routing": false,
|
||||||
|
"hidden": false
|
||||||
|
},
|
||||||
|
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
|
||||||
|
"index_patterns": [index_pattern],
|
||||||
|
"priority": 501,
|
||||||
|
"template": {
|
||||||
|
"settings": {
|
||||||
|
"index": {
|
||||||
|
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
|
||||||
|
"number_of_replicas": 0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"policy": {
|
||||||
|
"phases": {
|
||||||
|
"cold": {
|
||||||
|
"actions": {
|
||||||
|
"allocate":{
|
||||||
|
"number_of_replicas": ""
|
||||||
|
},
|
||||||
|
"set_priority": {"priority": 0}
|
||||||
|
},
|
||||||
|
"min_age": "60d"
|
||||||
|
},
|
||||||
|
"delete": {
|
||||||
|
"actions": {
|
||||||
|
"delete": {}
|
||||||
|
},
|
||||||
|
"min_age": "365d"
|
||||||
|
},
|
||||||
|
"hot": {
|
||||||
|
"actions": {
|
||||||
|
"rollover": {
|
||||||
|
"max_age": "30d",
|
||||||
|
"max_primary_shard_size": "50gb"
|
||||||
|
},
|
||||||
|
"forcemerge":{
|
||||||
|
"max_num_segments": ""
|
||||||
|
},
|
||||||
|
"shrink":{
|
||||||
|
"max_primary_shard_size": "",
|
||||||
|
"method": "COUNT",
|
||||||
|
"number_of_shards": ""
|
||||||
|
},
|
||||||
|
"set_priority": {"priority": 100}
|
||||||
|
},
|
||||||
|
"min_age": "0ms"
|
||||||
|
},
|
||||||
|
"warm": {
|
||||||
|
"actions": {
|
||||||
|
"allocate": {
|
||||||
|
"number_of_replicas": ""
|
||||||
|
},
|
||||||
|
"forcemerge": {
|
||||||
|
"max_num_segments": ""
|
||||||
|
},
|
||||||
|
"shrink":{
|
||||||
|
"max_primary_shard_size": "",
|
||||||
|
"method": "COUNT",
|
||||||
|
"number_of_shards": ""
|
||||||
|
},
|
||||||
|
"set_priority": {"priority": 50}
|
||||||
|
},
|
||||||
|
"min_age": "30d"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} %}
|
||||||
|
|
||||||
|
|
||||||
|
{% do ADDON_INPUT_INTEGRATION_DEFAULTS.update({integration_key: integration_defaults}) %}
|
||||||
|
{% do DEBUG_STUFF.update({integration_key: "Generating defaults for "+ pkg.name })%}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
@@ -59,8 +59,8 @@
|
|||||||
{# skip core integrations #}
|
{# skip core integrations #}
|
||||||
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
|
{% elif pkg.name not in CORE_ESFLEET_PACKAGES %}
|
||||||
{# generate defaults for each integration #}
|
{# generate defaults for each integration #}
|
||||||
{% if pkg.es_index_patterns is defined and pkg.es_index_patterns is not none %}
|
{% if pkg.dataStreams is defined and pkg.dataStreams is not none and pkg.dataStreams | length > 0 %}
|
||||||
{% for pattern in pkg.es_index_patterns %}
|
{% for pattern in pkg.dataStreams %}
|
||||||
{% if "metrics-" in pattern.name %}
|
{% if "metrics-" in pattern.name %}
|
||||||
{% set integration_type = "metrics-" %}
|
{% set integration_type = "metrics-" %}
|
||||||
{% elif "logs-" in pattern.name %}
|
{% elif "logs-" in pattern.name %}
|
||||||
@@ -75,44 +75,27 @@
|
|||||||
{% if component_name in WEIRD_INTEGRATIONS %}
|
{% if component_name in WEIRD_INTEGRATIONS %}
|
||||||
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
{% set component_name = WEIRD_INTEGRATIONS[component_name] %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{# create duplicate of component_name, so we can split generics from @custom component templates in the index template below and overwrite the default @package when needed
|
|
||||||
eg. having to replace unifiedlogs.generic@package with filestream.generic@package, but keep the ability to customize unifiedlogs.generic@custom and its ILM policy #}
|
|
||||||
{% set custom_component_name = component_name %}
|
|
||||||
|
|
||||||
{# duplicate integration_type to assist with sometimes needing to overwrite component templates with 'logs-filestream.generic@package' (there is no metrics-filestream.generic@package) #}
|
|
||||||
{% set generic_integration_type = integration_type %}
|
|
||||||
|
|
||||||
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
{# component_name_x maintains the functionality of merging local pillar changes with generated 'defaults' via SOC UI #}
|
||||||
{% set component_name_x = component_name.replace(".","_x_") %}
|
{% set component_name_x = component_name.replace(".","_x_") %}
|
||||||
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
{# pillar overrides/merge expects the key names to follow the naming in elasticsearch/defaults.yaml eg. so-logs-1password_x_item_usages . The _x_ is replaced later on in elasticsearch/template.map.jinja #}
|
||||||
{% set integration_key = "so-" ~ integration_type ~ component_name_x %}
|
{% set integration_key = "so-" ~ integration_type ~ component_name_x %}
|
||||||
|
|
||||||
{# if its a .generic template make sure that a .generic@package for the integration exists. Else default to logs-filestream.generic@package #}
|
|
||||||
{% if ".generic" in component_name and integration_type ~ component_name ~ "@package" not in INSTALLED_COMPONENT_TEMPLATES %}
|
|
||||||
{# these generic templates by default are directed to index_pattern of 'logs-generic-*', overwrite that here to point to eg gcp_pubsub.generic-* #}
|
|
||||||
{% set index_pattern = integration_type ~ component_name ~ "-*" %}
|
|
||||||
{# includes use of .generic component template, but it doesn't exist in installed component templates. Redirect it to filestream.generic@package #}
|
|
||||||
{% set component_name = "filestream.generic" %}
|
|
||||||
{% set generic_integration_type = "logs-" %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{# Default integration settings #}
|
{# Default integration settings #}
|
||||||
{% set integration_defaults = {
|
{% set integration_defaults = {
|
||||||
"index_sorting": false,
|
"index_sorting": false,
|
||||||
"index_template": {
|
"index_template": {
|
||||||
"composed_of": [generic_integration_type ~ component_name ~ "@package", integration_type ~ custom_component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
"composed_of": [integration_type ~ component_name ~ "@package", integration_type ~ component_name ~ "@custom", "so-fleet_integrations.ip_mappings-1", "so-fleet_globals-1", "so-fleet_agent_id_verification-1"],
|
||||||
"data_stream": {
|
"data_stream": {
|
||||||
"allow_custom_routing": false,
|
"allow_custom_routing": false,
|
||||||
"hidden": false
|
"hidden": false
|
||||||
},
|
},
|
||||||
"ignore_missing_component_templates": [integration_type ~ custom_component_name ~ "@custom"],
|
"ignore_missing_component_templates": [integration_type ~ component_name ~ "@custom"],
|
||||||
"index_patterns": [index_pattern],
|
"index_patterns": [index_pattern],
|
||||||
"priority": 501,
|
"priority": 501,
|
||||||
"template": {
|
"template": {
|
||||||
"settings": {
|
"settings": {
|
||||||
"index": {
|
"index": {
|
||||||
"lifecycle": {"name": "so-" ~ integration_type ~ custom_component_name ~ "-logs"},
|
"lifecycle": {"name": "so-" ~ integration_type ~ component_name ~ "-logs"},
|
||||||
"number_of_replicas": 0
|
"number_of_replicas": 0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -0,0 +1,112 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls in allowed_states %}
|
||||||
|
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- elasticfleet.config
|
||||||
|
|
||||||
|
# If enabled, automatically update Fleet Logstash Outputs
|
||||||
|
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration and grains.role not in ['so-import', 'so-eval'] %}
|
||||||
|
so-elastic-fleet-auto-configure-logstash-outputs:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-outputs-update
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #}
|
||||||
|
so-elastic-fleet-auto-configure-logstash-outputs-force:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-outputs-update --certs
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
- onchanges:
|
||||||
|
- x509: etc_elasticfleet_logstash_crt
|
||||||
|
- x509: elasticfleet_kafka_crt
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# If enabled, automatically update Fleet Server URLs & ES Connection
|
||||||
|
so-elastic-fleet-auto-configure-server-urls:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-urls-update
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
# Automatically update Fleet Server Elasticsearch URLs & Agent Artifact URLs
|
||||||
|
so-elastic-fleet-auto-configure-elasticsearch-urls:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-es-url-update
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
so-elastic-fleet-auto-configure-artifact-urls:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-artifacts-url-update
|
||||||
|
- retry:
|
||||||
|
attempts: 4
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
so-elastic-fleet-package-statefile:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/state/elastic_fleet_packages.txt
|
||||||
|
- contents: {{ELASTICFLEETMERGED.packages}}
|
||||||
|
|
||||||
|
so-elastic-fleet-package-upgrade:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-package-upgrade
|
||||||
|
- retry:
|
||||||
|
attempts: 3
|
||||||
|
interval: 10
|
||||||
|
- onchanges:
|
||||||
|
- file: /opt/so/state/elastic_fleet_packages.txt
|
||||||
|
|
||||||
|
so-elastic-fleet-integrations:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-integration-policy-load
|
||||||
|
- retry:
|
||||||
|
attempts: 3
|
||||||
|
interval: 10
|
||||||
|
|
||||||
|
so-elastic-agent-grid-upgrade:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-agent-grid-upgrade
|
||||||
|
- retry:
|
||||||
|
attempts: 12
|
||||||
|
interval: 5
|
||||||
|
|
||||||
|
so-elastic-fleet-integration-upgrade:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-integration-upgrade
|
||||||
|
- retry:
|
||||||
|
attempts: 3
|
||||||
|
interval: 10
|
||||||
|
|
||||||
|
{# Optional integrations script doesn't need the retries like so-elastic-fleet-integration-upgrade which loads the default integrations #}
|
||||||
|
so-elastic-fleet-addon-integrations:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elastic-fleet-optional-integrations-load
|
||||||
|
|
||||||
|
{% if ELASTICFLEETMERGED.config.defend_filters.enable_auto_configuration %}
|
||||||
|
so-elastic-defend-manage-filters-file-watch:
|
||||||
|
cmd.run:
|
||||||
|
- name: python3 /sbin/so-elastic-defend-manage-filters.py -c /opt/so/conf/elasticsearch/curl.config -d /opt/so/conf/elastic-fleet/defend-exclusions/disabled-filters.yaml -i /nsm/securityonion-resources/event_filters/ -i /opt/so/conf/elastic-fleet/defend-exclusions/rulesets/custom-filters/ &>> /opt/so/log/elasticfleet/elastic-defend-manage-filters.log
|
||||||
|
- onchanges:
|
||||||
|
- file: elasticdefendcustom
|
||||||
|
- file: elasticdefenddisabled
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -135,9 +135,33 @@ elastic_fleet_bulk_package_install() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
elastic_fleet_installed_packages() {
|
elastic_fleet_get_package_list_by_type() {
|
||||||
if ! fleet_api "epm/packages/installed?perPage=500"; then
|
if ! output=$(fleet_api "epm/packages"); then
|
||||||
return 1
|
return 1
|
||||||
|
else
|
||||||
|
is_integration=$(jq '[.items[] | select(.type=="integration") | .name ]' <<< "$output")
|
||||||
|
is_input=$(jq '[.items[] | select(.type=="input") | .name ]' <<< "$output")
|
||||||
|
is_content=$(jq '[.items[] | select(.type=="content") | .name ]' <<< "$output")
|
||||||
|
jq -n --argjson is_integration "${is_integration:-[]}" \
|
||||||
|
--argjson is_input "${is_input:-[]}" \
|
||||||
|
--argjson is_content "${is_content:-[]}" \
|
||||||
|
'{"integration": $is_integration,"input": $is_input, "content": $is_content}'
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
elastic_fleet_installed_packages_components() {
|
||||||
|
package_type=${1,,}
|
||||||
|
if [[ "$package_type" != "integration" && "$package_type" != "input" && "$package_type" != "content" ]]; then
|
||||||
|
echo "Error: Invalid package type ${package_type}. Valid types are 'integration', 'input', or 'content'."
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
packages_by_type=$(elastic_fleet_get_package_list_by_type)
|
||||||
|
packages=$(jq --arg package_type "$package_type" '.[$package_type]' <<< "$packages_by_type")
|
||||||
|
|
||||||
|
if ! output=$(fleet_api "epm/packages/installed?perPage=500"); then
|
||||||
|
return 1
|
||||||
|
else
|
||||||
|
jq -c --argjson packages "$packages" '[.items[] | select(.name | IN($packages[])) | {name: .name, dataStreams: .dataStreams}]' <<< "$output"
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,13 @@
|
|||||||
# this file except in compliance with the Elastic License 2.0.
|
# this file except in compliance with the Elastic License 2.0.
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
. /usr/sbin/so-elastic-fleet-common
|
||||||
{%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
|
{%- import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS %}
|
||||||
|
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
||||||
|
{# Optionally override Elasticsearch version for Elastic Agent patch releases #}
|
||||||
|
{%- if ELASTICFLEETDEFAULTS.elasticfleet.patch_version is defined %}
|
||||||
|
{%- do ELASTICSEARCHDEFAULTS.elasticsearch.update({'version': ELASTICFLEETDEFAULTS.elasticfleet.patch_version}) %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
# Only run on Managers
|
# Only run on Managers
|
||||||
if ! is_manager_node; then
|
if ! is_manager_node; then
|
||||||
@@ -14,11 +20,8 @@ if ! is_manager_node; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# Get current list of Grid Node Agents that need to be upgraded
|
# Get current list of Grid Node Agents that need to be upgraded
|
||||||
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" --retry 3 --retry-delay 30 --fail 2>/dev/null)
|
if ! RAW_JSON=$(fleet_api "agents?perPage=20&page=1&kuery=NOT%20agent.version%3A%20{{ELASTICSEARCHDEFAULTS.elasticsearch.version | urlencode }}%20AND%20policy_id%3A%20so-grid-nodes_%2A&showInactive=false&getStatusSummary=true" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
||||||
|
|
||||||
# Check to make sure that the server responded with good data - else, bail from script
|
|
||||||
CHECKSUM=$(jq -r '.page' <<< "$RAW_JSON")
|
|
||||||
if [ "$CHECKSUM" -ne 1 ]; then
|
|
||||||
printf "Failed to query for current Grid Agents...\n"
|
printf "Failed to query for current Grid Agents...\n"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@@ -31,10 +34,12 @@ if [ "$OUTDATED_LIST" != '[]' ]; then
|
|||||||
printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic {{ELASTICSEARCHDEFAULTS.elasticsearch.version}}...\n\n"
|
printf "Initiating upgrades for $AGENTNUMBERS Agents to Elastic {{ELASTICSEARCHDEFAULTS.elasticsearch.version}}...\n\n"
|
||||||
|
|
||||||
# Generate updated JSON payload
|
# Generate updated JSON payload
|
||||||
JSON_STRING=$(jq -n --arg ELASTICVERSION {{ELASTICSEARCHDEFAULTS.elasticsearch.version}} --arg UPDATELIST $OUTDATED_LIST '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
|
JSON_STRING=$(jq -n --arg ELASTICVERSION "{{ELASTICSEARCHDEFAULTS.elasticsearch.version}}" --argjson UPDATELIST "$OUTDATED_LIST" '{"version": $ELASTICVERSION,"agents": $UPDATELIST }')
|
||||||
|
|
||||||
# Update Node Agents
|
# Update Node Agents
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "http://localhost:5601/api/fleet/agents/bulk_upgrade" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
|
if ! fleet_api "agents/bulk_upgrade" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
||||||
|
printf "Failed to initiate Agent upgrades...\n"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
printf "No Agents need updates... Exiting\n\n"
|
printf "No Agents need updates... Exiting\n\n"
|
||||||
exit 0
|
exit 0
|
||||||
|
|||||||
@@ -18,7 +18,9 @@ INSTALLED_PACKAGE_LIST=/tmp/esfleet_installed_packages.json
|
|||||||
BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json
|
BULK_INSTALL_PACKAGE_LIST=/tmp/esfleet_bulk_install.json
|
||||||
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
|
BULK_INSTALL_PACKAGE_TMP=/tmp/esfleet_bulk_install_tmp.json
|
||||||
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
|
BULK_INSTALL_OUTPUT=/opt/so/state/esfleet_bulk_install_results.json
|
||||||
PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
|
INTEGRATION_PACKAGE_COMPONENTS=/opt/so/state/esfleet_package_components.json
|
||||||
|
INPUT_PACKAGE_COMPONENTS=/opt/so/state/esfleet_input_package_components.json
|
||||||
|
CONTENT_PACKAGE_COMPONENTS=/opt/so/state/esfleet_content_package_components.json
|
||||||
COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json
|
COMPONENT_TEMPLATES=/opt/so/state/esfleet_component_templates.json
|
||||||
|
|
||||||
PENDING_UPDATE=false
|
PENDING_UPDATE=false
|
||||||
@@ -179,10 +181,13 @@ if [[ -f $STATE_FILE_SUCCESS ]]; then
|
|||||||
else
|
else
|
||||||
echo "Elastic integrations don't appear to need installation/updating..."
|
echo "Elastic integrations don't appear to need installation/updating..."
|
||||||
fi
|
fi
|
||||||
# Write out file for generating index/component/ilm templates
|
# Write out file for generating index/component/ilm templates, keeping each package type separate
|
||||||
if latest_installed_package_list=$(elastic_fleet_installed_packages); then
|
for package_type in "INTEGRATION" "INPUT" "CONTENT"; do
|
||||||
echo $latest_installed_package_list | jq '[.items[] | {name: .name, es_index_patterns: .dataStreams}]' > $PACKAGE_COMPONENTS
|
if latest_installed_package_list=$(elastic_fleet_installed_packages_components "$package_type"); then
|
||||||
|
outfile="${package_type}_PACKAGE_COMPONENTS"
|
||||||
|
echo $latest_installed_package_list > "${!outfile}"
|
||||||
fi
|
fi
|
||||||
|
done
|
||||||
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
|
if retry 3 1 "so-elasticsearch-query / --fail --output /dev/null"; then
|
||||||
# Refresh installed component template list
|
# Refresh installed component template list
|
||||||
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
|
latest_component_templates_list=$(so-elasticsearch-query _component_template | jq '.component_templates[] | .name' | jq -s '.')
|
||||||
|
|||||||
@@ -235,6 +235,16 @@ function update_kafka_outputs() {
|
|||||||
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
# Compare the current Elastic Fleet certificate against what is on disk
|
||||||
|
POLICY_CERT_SHA=$(jq -r '.item.ssl.certificate' <<< $RAW_JSON | openssl x509 -noout -sha256 -fingerprint)
|
||||||
|
DISK_CERT_SHA=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt -noout -sha256 -fingerprint)
|
||||||
|
|
||||||
|
if [[ "$POLICY_CERT_SHA" != "$DISK_CERT_SHA" ]]; then
|
||||||
|
printf "Certificate on disk doesn't match certificate in policy - forcing update\n"
|
||||||
|
UPDATE_CERTS=true
|
||||||
|
FORCE_UPDATE=true
|
||||||
|
fi
|
||||||
|
|
||||||
# Sort & hash the new list of Logstash Outputs
|
# Sort & hash the new list of Logstash Outputs
|
||||||
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
|
||||||
NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
|
||||||
|
|||||||
@@ -0,0 +1,164 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls in allowed_states %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
||||||
|
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS, SO_MANAGED_INDICES %}
|
||||||
|
{% if GLOBALS.role != 'so-heavynode' %}
|
||||||
|
{% from 'elasticsearch/template.map.jinja' import ALL_ADDON_SETTINGS %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
escomponenttemplates:
|
||||||
|
file.recurse:
|
||||||
|
- name: /opt/so/conf/elasticsearch/templates/component
|
||||||
|
- source: salt://elasticsearch/templates/component
|
||||||
|
- user: 930
|
||||||
|
- group: 939
|
||||||
|
- clean: True
|
||||||
|
- onchanges_in:
|
||||||
|
- file: so-elasticsearch-templates-reload
|
||||||
|
- show_changes: False
|
||||||
|
|
||||||
|
# Clean up legacy and non-SO managed templates from the elasticsearch/templates/index/ directory
|
||||||
|
so_index_template_dir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/elasticsearch/templates/index
|
||||||
|
- clean: True
|
||||||
|
{%- if SO_MANAGED_INDICES %}
|
||||||
|
- require:
|
||||||
|
{%- for index in SO_MANAGED_INDICES %}
|
||||||
|
- file: so_index_template_{{index}}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
|
# Auto-generate index templates for SO managed indices (directly defined in elasticsearch/defaults.yaml)
|
||||||
|
# These index templates are for the core SO datasets and are always required
|
||||||
|
{% for index, settings in ES_INDEX_SETTINGS.items() %}
|
||||||
|
{% if settings.index_template is defined %}
|
||||||
|
so_index_template_{{index}}:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/elasticsearch/templates/index/{{ index }}-template.json
|
||||||
|
- source: salt://elasticsearch/base-template.json.jinja
|
||||||
|
- defaults:
|
||||||
|
TEMPLATE_CONFIG: {{ settings.index_template }}
|
||||||
|
- template: jinja
|
||||||
|
- onchanges_in:
|
||||||
|
- file: so-elasticsearch-templates-reload
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
{% if GLOBALS.role != "so-heavynode" %}
|
||||||
|
# Auto-generate optional index templates for integration | input | content packages
|
||||||
|
# These index templates are not used by default (until user adds package to an agent policy).
|
||||||
|
# Pre-configured with standard defaults, and incorporated into SOC configuration for user customization.
|
||||||
|
{% for index,settings in ALL_ADDON_SETTINGS.items() %}
|
||||||
|
{% if settings.index_template is defined %}
|
||||||
|
addon_index_template_{{index}}:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/elasticsearch/templates/addon-index/{{ index }}-template.json
|
||||||
|
- source: salt://elasticsearch/base-template.json.jinja
|
||||||
|
- defaults:
|
||||||
|
TEMPLATE_CONFIG: {{ settings.index_template }}
|
||||||
|
- template: jinja
|
||||||
|
- show_changes: False
|
||||||
|
- onchanges_in:
|
||||||
|
- file: addon-elasticsearch-templates-reload
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
||||||
|
so-es-cluster-settings:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elasticsearch-cluster-settings
|
||||||
|
- cwd: /opt/so
|
||||||
|
- template: jinja
|
||||||
|
- require:
|
||||||
|
- docker_container: so-elasticsearch
|
||||||
|
- file: elasticsearch_sbin_jinja
|
||||||
|
- http: wait_for_so-elasticsearch
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
# heavynodes will only load ILM policies for SO managed indices. (Indicies defined in elasticsearch/defaults.yaml)
|
||||||
|
so-elasticsearch-ilm-policy-load:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elasticsearch-ilm-policy-load
|
||||||
|
- cwd: /opt/so
|
||||||
|
- require:
|
||||||
|
- docker_container: so-elasticsearch
|
||||||
|
- file: so-elasticsearch-ilm-policy-load-script
|
||||||
|
- onchanges:
|
||||||
|
- file: so-elasticsearch-ilm-policy-load-script
|
||||||
|
|
||||||
|
so-elasticsearch-templates-reload:
|
||||||
|
file.absent:
|
||||||
|
- name: /opt/so/state/estemplates.txt
|
||||||
|
|
||||||
|
addon-elasticsearch-templates-reload:
|
||||||
|
file.absent:
|
||||||
|
- name: /opt/so/state/addon_estemplates.txt
|
||||||
|
|
||||||
|
# so-elasticsearch-templates-load will have its first successful run during the 'so-elastic-fleet-setup' script
|
||||||
|
so-elasticsearch-templates:
|
||||||
|
cmd.run:
|
||||||
|
{%- if GLOBALS.role == "so-heavynode" %}
|
||||||
|
- name: /usr/sbin/so-elasticsearch-templates-load --heavynode
|
||||||
|
{%- else %}
|
||||||
|
- name: /usr/sbin/so-elasticsearch-templates-load
|
||||||
|
{%- endif %}
|
||||||
|
- cwd: /opt/so
|
||||||
|
- template: jinja
|
||||||
|
- require:
|
||||||
|
- docker_container: so-elasticsearch
|
||||||
|
- file: elasticsearch_sbin_jinja
|
||||||
|
|
||||||
|
so-elasticsearch-pipelines:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elasticsearch-pipelines {{ GLOBALS.hostname }}
|
||||||
|
- require:
|
||||||
|
- docker_container: so-elasticsearch
|
||||||
|
- file: so-elasticsearch-pipelines-script
|
||||||
|
|
||||||
|
so-elasticsearch-roles-load:
|
||||||
|
cmd.run:
|
||||||
|
- name: /usr/sbin/so-elasticsearch-roles-load
|
||||||
|
- cwd: /opt/so
|
||||||
|
- template: jinja
|
||||||
|
- require:
|
||||||
|
- docker_container: so-elasticsearch
|
||||||
|
- file: elasticsearch_sbin_jinja
|
||||||
|
|
||||||
|
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
|
||||||
|
{% set ap = "absent" %}
|
||||||
|
{% endif %}
|
||||||
|
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
||||||
|
{% if ELASTICSEARCHMERGED.index_clean %}
|
||||||
|
{% set ap = "present" %}
|
||||||
|
{% else %}
|
||||||
|
{% set ap = "absent" %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
||||||
|
so-elasticsearch-indices-delete:
|
||||||
|
cron.{{ap}}:
|
||||||
|
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
|
||||||
|
- identifier: so-elasticsearch-indices-delete
|
||||||
|
- user: root
|
||||||
|
- minute: '*/5'
|
||||||
|
- hour: '*'
|
||||||
|
- daymonth: '*'
|
||||||
|
- month: '*'
|
||||||
|
- dayweek: '*'
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -66,6 +66,8 @@ so-elasticsearch-ilm-policy-load-script:
|
|||||||
- group: 939
|
- group: 939
|
||||||
- mode: 754
|
- mode: 754
|
||||||
- template: jinja
|
- template: jinja
|
||||||
|
- defaults:
|
||||||
|
GLOBALS: {{ GLOBALS }}
|
||||||
- show_changes: False
|
- show_changes: False
|
||||||
|
|
||||||
so-elasticsearch-pipelines-script:
|
so-elasticsearch-pipelines-script:
|
||||||
@@ -91,6 +93,13 @@ estemplatedir:
|
|||||||
- group: 939
|
- group: 939
|
||||||
- makedirs: True
|
- makedirs: True
|
||||||
|
|
||||||
|
esaddontemplatedir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/elasticsearch/templates/addon-index
|
||||||
|
- user: 930
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
esrolesdir:
|
esrolesdir:
|
||||||
file.directory:
|
file.directory:
|
||||||
- name: /opt/so/conf/elasticsearch/roles
|
- name: /opt/so/conf/elasticsearch/roles
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
elasticsearch:
|
elasticsearch:
|
||||||
enabled: false
|
enabled: false
|
||||||
version: 9.0.8
|
version: 9.3.3
|
||||||
index_clean: true
|
index_clean: true
|
||||||
vm:
|
vm:
|
||||||
max_map_count: 1048576
|
max_map_count: 1048576
|
||||||
|
|||||||
+16
-125
@@ -10,8 +10,6 @@
|
|||||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %}
|
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %}
|
||||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %}
|
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %}
|
||||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
||||||
{% set TEMPLATES = salt['pillar.get']('elasticsearch:templates', {}) %}
|
|
||||||
{% from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
|
|
||||||
|
|
||||||
include:
|
include:
|
||||||
- ca
|
- ca
|
||||||
@@ -19,6 +17,9 @@ include:
|
|||||||
- elasticsearch.ssl
|
- elasticsearch.ssl
|
||||||
- elasticsearch.config
|
- elasticsearch.config
|
||||||
- elasticsearch.sostatus
|
- elasticsearch.sostatus
|
||||||
|
{%- if GLOBALS.role != "so-searchnode" %}
|
||||||
|
- elasticsearch.cluster
|
||||||
|
{%- endif%}
|
||||||
|
|
||||||
so-elasticsearch:
|
so-elasticsearch:
|
||||||
docker_container.running:
|
docker_container.running:
|
||||||
@@ -101,134 +102,24 @@ so-elasticsearch:
|
|||||||
- cmd: auth_users_roles_inode
|
- cmd: auth_users_roles_inode
|
||||||
- cmd: auth_users_inode
|
- cmd: auth_users_inode
|
||||||
|
|
||||||
|
wait_for_so-elasticsearch:
|
||||||
|
http.wait_for_successful_query:
|
||||||
|
- name: "https://localhost:9200/"
|
||||||
|
- username: 'so_elastic'
|
||||||
|
- password: '{{ ELASTICSEARCHMERGED.auth.users.so_elastic_user.pass }}'
|
||||||
|
- ssl: True
|
||||||
|
- verify_ssl: False
|
||||||
|
- status: 200
|
||||||
|
- wait_for: 300
|
||||||
|
- request_interval: 15
|
||||||
|
- require:
|
||||||
|
- docker_container: so-elasticsearch
|
||||||
|
|
||||||
delete_so-elasticsearch_so-status.disabled:
|
delete_so-elasticsearch_so-status.disabled:
|
||||||
file.uncomment:
|
file.uncomment:
|
||||||
- name: /opt/so/conf/so-status/so-status.conf
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
- regex: ^so-elasticsearch$
|
- regex: ^so-elasticsearch$
|
||||||
|
|
||||||
{% if GLOBALS.role != "so-searchnode" %}
|
|
||||||
escomponenttemplates:
|
|
||||||
file.recurse:
|
|
||||||
- name: /opt/so/conf/elasticsearch/templates/component
|
|
||||||
- source: salt://elasticsearch/templates/component
|
|
||||||
- user: 930
|
|
||||||
- group: 939
|
|
||||||
- clean: True
|
|
||||||
- onchanges_in:
|
|
||||||
- file: so-elasticsearch-templates-reload
|
|
||||||
- show_changes: False
|
|
||||||
|
|
||||||
# Auto-generate templates from defaults file
|
|
||||||
{% for index, settings in ES_INDEX_SETTINGS.items() %}
|
|
||||||
{% if settings.index_template is defined %}
|
|
||||||
es_index_template_{{index}}:
|
|
||||||
file.managed:
|
|
||||||
- name: /opt/so/conf/elasticsearch/templates/index/{{ index }}-template.json
|
|
||||||
- source: salt://elasticsearch/base-template.json.jinja
|
|
||||||
- defaults:
|
|
||||||
TEMPLATE_CONFIG: {{ settings.index_template }}
|
|
||||||
- template: jinja
|
|
||||||
- show_changes: False
|
|
||||||
- onchanges_in:
|
|
||||||
- file: so-elasticsearch-templates-reload
|
|
||||||
{% endif %}
|
|
||||||
{% endfor %}
|
|
||||||
|
|
||||||
{% if TEMPLATES %}
|
|
||||||
# Sync custom templates to /opt/so/conf/elasticsearch/templates
|
|
||||||
{% for TEMPLATE in TEMPLATES %}
|
|
||||||
es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}:
|
|
||||||
file.managed:
|
|
||||||
- source: salt://elasticsearch/templates/index/{{TEMPLATE}}
|
|
||||||
{% if 'jinja' in TEMPLATE.split('.')[-1] %}
|
|
||||||
- name: /opt/so/conf/elasticsearch/templates/index/{{TEMPLATE.split('/')[1] | replace(".jinja", "")}}
|
|
||||||
- template: jinja
|
|
||||||
{% else %}
|
|
||||||
- name: /opt/so/conf/elasticsearch/templates/index/{{TEMPLATE.split('/')[1]}}
|
|
||||||
{% endif %}
|
|
||||||
- user: 930
|
|
||||||
- group: 939
|
|
||||||
- show_changes: False
|
|
||||||
- onchanges_in:
|
|
||||||
- file: so-elasticsearch-templates-reload
|
|
||||||
{% endfor %}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% if GLOBALS.role in GLOBALS.manager_roles %}
|
|
||||||
so-es-cluster-settings:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elasticsearch-cluster-settings
|
|
||||||
- cwd: /opt/so
|
|
||||||
- template: jinja
|
|
||||||
- require:
|
|
||||||
- docker_container: so-elasticsearch
|
|
||||||
- file: elasticsearch_sbin_jinja
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
so-elasticsearch-ilm-policy-load:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elasticsearch-ilm-policy-load
|
|
||||||
- cwd: /opt/so
|
|
||||||
- require:
|
|
||||||
- docker_container: so-elasticsearch
|
|
||||||
- file: so-elasticsearch-ilm-policy-load-script
|
|
||||||
- onchanges:
|
|
||||||
- file: so-elasticsearch-ilm-policy-load-script
|
|
||||||
|
|
||||||
so-elasticsearch-templates-reload:
|
|
||||||
file.absent:
|
|
||||||
- name: /opt/so/state/estemplates.txt
|
|
||||||
|
|
||||||
so-elasticsearch-templates:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elasticsearch-templates-load
|
|
||||||
- cwd: /opt/so
|
|
||||||
- template: jinja
|
|
||||||
- require:
|
|
||||||
- docker_container: so-elasticsearch
|
|
||||||
- file: elasticsearch_sbin_jinja
|
|
||||||
|
|
||||||
so-elasticsearch-pipelines:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elasticsearch-pipelines {{ GLOBALS.hostname }}
|
|
||||||
- require:
|
|
||||||
- docker_container: so-elasticsearch
|
|
||||||
- file: so-elasticsearch-pipelines-script
|
|
||||||
|
|
||||||
so-elasticsearch-roles-load:
|
|
||||||
cmd.run:
|
|
||||||
- name: /usr/sbin/so-elasticsearch-roles-load
|
|
||||||
- cwd: /opt/so
|
|
||||||
- template: jinja
|
|
||||||
- require:
|
|
||||||
- docker_container: so-elasticsearch
|
|
||||||
- file: elasticsearch_sbin_jinja
|
|
||||||
|
|
||||||
{% if grains.role in ['so-managersearch', 'so-manager', 'so-managerhype'] %}
|
|
||||||
{% set ap = "absent" %}
|
|
||||||
{% endif %}
|
|
||||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-heavynode'] %}
|
|
||||||
{% if ELASTICSEARCHMERGED.index_clean %}
|
|
||||||
{% set ap = "present" %}
|
|
||||||
{% else %}
|
|
||||||
{% set ap = "absent" %}
|
|
||||||
{% endif %}
|
|
||||||
{% endif %}
|
|
||||||
{% if grains.role in ['so-eval', 'so-standalone', 'so-managersearch', 'so-heavynode', 'so-manager'] %}
|
|
||||||
so-elasticsearch-indices-delete:
|
|
||||||
cron.{{ap}}:
|
|
||||||
- name: /usr/sbin/so-elasticsearch-indices-delete > /opt/so/log/elasticsearch/cron-elasticsearch-indices-delete.log 2>&1
|
|
||||||
- identifier: so-elasticsearch-indices-delete
|
|
||||||
- user: root
|
|
||||||
- minute: '*/5'
|
|
||||||
- hour: '*'
|
|
||||||
- daymonth: '*'
|
|
||||||
- month: '*'
|
|
||||||
- dayweek: '*'
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
{% else %}
|
{% else %}
|
||||||
|
|
||||||
{{sls}}_state_not_allowed:
|
{{sls}}_state_not_allowed:
|
||||||
|
|||||||
+74
-13
@@ -10,24 +10,28 @@
|
|||||||
"processors": [
|
"processors": [
|
||||||
{
|
{
|
||||||
"set": {
|
"set": {
|
||||||
|
"tag": "set_ecs_version_f5923549",
|
||||||
"field": "ecs.version",
|
"field": "ecs.version",
|
||||||
"value": "8.17.0"
|
"value": "8.17.0"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"set": {
|
"set": {
|
||||||
|
"tag": "set_observer_vendor_ad9d35cc",
|
||||||
"field": "observer.vendor",
|
"field": "observer.vendor",
|
||||||
"value": "netgate"
|
"value": "netgate"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"set": {
|
"set": {
|
||||||
|
"tag": "set_observer_type_5dddf3ba",
|
||||||
"field": "observer.type",
|
"field": "observer.type",
|
||||||
"value": "firewall"
|
"value": "firewall"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"rename": {
|
"rename": {
|
||||||
|
"tag": "rename_message_to_event_original_56a77271",
|
||||||
"field": "message",
|
"field": "message",
|
||||||
"target_field": "event.original",
|
"target_field": "event.original",
|
||||||
"ignore_missing": true,
|
"ignore_missing": true,
|
||||||
@@ -36,12 +40,14 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"set": {
|
"set": {
|
||||||
|
"tag": "set_event_kind_de80643c",
|
||||||
"field": "event.kind",
|
"field": "event.kind",
|
||||||
"value": "event"
|
"value": "event"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"set": {
|
"set": {
|
||||||
|
"tag": "set_event_timezone_4ca44cac",
|
||||||
"field": "event.timezone",
|
"field": "event.timezone",
|
||||||
"value": "{{{_tmp.tz_offset}}}",
|
"value": "{{{_tmp.tz_offset}}}",
|
||||||
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
|
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
|
||||||
@@ -49,6 +55,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"grok": {
|
"grok": {
|
||||||
|
"tag": "grok_event_original_27d9c8c7",
|
||||||
"description": "Parse syslog header",
|
"description": "Parse syslog header",
|
||||||
"field": "event.original",
|
"field": "event.original",
|
||||||
"patterns": [
|
"patterns": [
|
||||||
@@ -72,6 +79,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"date": {
|
"date": {
|
||||||
|
"tag": "date__tmp_timestamp8601_to_timestamp_6ac9d3ce",
|
||||||
"if": "ctx._tmp.timestamp8601 != null",
|
"if": "ctx._tmp.timestamp8601 != null",
|
||||||
"field": "_tmp.timestamp8601",
|
"field": "_tmp.timestamp8601",
|
||||||
"target_field": "@timestamp",
|
"target_field": "@timestamp",
|
||||||
@@ -82,6 +90,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"date": {
|
"date": {
|
||||||
|
"tag": "date__tmp_timestamp_to_timestamp_f21e536e",
|
||||||
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
|
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
|
||||||
"field": "_tmp.timestamp",
|
"field": "_tmp.timestamp",
|
||||||
"target_field": "@timestamp",
|
"target_field": "@timestamp",
|
||||||
@@ -95,6 +104,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"grok": {
|
"grok": {
|
||||||
|
"tag": "grok_process_name_cef3d489",
|
||||||
"description": "Set Event Provider",
|
"description": "Set Event Provider",
|
||||||
"field": "process.name",
|
"field": "process.name",
|
||||||
"patterns": [
|
"patterns": [
|
||||||
@@ -107,71 +117,83 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.23.1-firewall",
|
"tag": "pipeline_e16851a7",
|
||||||
|
"name": "logs-pfsense.log-1.25.2-firewall",
|
||||||
"if": "ctx.event.provider == 'filterlog'"
|
"if": "ctx.event.provider == 'filterlog'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.23.1-openvpn",
|
"tag": "pipeline_828590b5",
|
||||||
|
"name": "logs-pfsense.log-1.25.2-openvpn",
|
||||||
"if": "ctx.event.provider == 'openvpn'"
|
"if": "ctx.event.provider == 'openvpn'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.23.1-ipsec",
|
"tag": "pipeline_9d37039c",
|
||||||
|
"name": "logs-pfsense.log-1.25.2-ipsec",
|
||||||
"if": "ctx.event.provider == 'charon'"
|
"if": "ctx.event.provider == 'charon'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.23.1-dhcp",
|
"tag": "pipeline_ad56bbca",
|
||||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
"name": "logs-pfsense.log-1.25.2-dhcp",
|
||||||
|
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\", \"dnsmasq-dhcp\"].contains(ctx.event.provider)"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.23.1-unbound",
|
"tag": "pipeline_dd85553d",
|
||||||
|
"name": "logs-pfsense.log-1.25.2-unbound",
|
||||||
"if": "ctx.event.provider == 'unbound'"
|
"if": "ctx.event.provider == 'unbound'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.23.1-haproxy",
|
"tag": "pipeline_720ed255",
|
||||||
|
"name": "logs-pfsense.log-1.25.2-haproxy",
|
||||||
"if": "ctx.event.provider == 'haproxy'"
|
"if": "ctx.event.provider == 'haproxy'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.23.1-php-fpm",
|
"tag": "pipeline_456beba5",
|
||||||
|
"name": "logs-pfsense.log-1.25.2-php-fpm",
|
||||||
"if": "ctx.event.provider == 'php-fpm'"
|
"if": "ctx.event.provider == 'php-fpm'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.23.1-squid",
|
"tag": "pipeline_a0d89375",
|
||||||
|
"name": "logs-pfsense.log-1.25.2-squid",
|
||||||
"if": "ctx.event.provider == 'squid'"
|
"if": "ctx.event.provider == 'squid'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.23.1-snort",
|
"tag": "pipeline_c2f1ed55",
|
||||||
|
"name": "logs-pfsense.log-1.25.2-snort",
|
||||||
"if": "ctx.event.provider == 'snort'"
|
"if": "ctx.event.provider == 'snort'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "logs-pfsense.log-1.23.1-suricata",
|
"tag":"pipeline_33db1c9e",
|
||||||
|
"name": "logs-pfsense.log-1.25.2-suricata",
|
||||||
"if": "ctx.event.provider == 'suricata'"
|
"if": "ctx.event.provider == 'suricata'"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"drop": {
|
"drop": {
|
||||||
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"snort\", \"suricata\"].contains(ctx.event?.provider)"
|
"tag": "drop_9d7c46f8",
|
||||||
|
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dnsmasq-dhcp\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"snort\", \"suricata\"].contains(ctx.event?.provider)"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"append": {
|
"append": {
|
||||||
|
"tag": "append_event_category_4780a983",
|
||||||
"field": "event.category",
|
"field": "event.category",
|
||||||
"value": "network",
|
"value": "network",
|
||||||
"if": "ctx.network != null"
|
"if": "ctx.network != null"
|
||||||
@@ -179,6 +201,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"convert": {
|
"convert": {
|
||||||
|
"tag": "convert_source_address_to_source_ip_f5632a20",
|
||||||
"field": "source.address",
|
"field": "source.address",
|
||||||
"target_field": "source.ip",
|
"target_field": "source.ip",
|
||||||
"type": "ip",
|
"type": "ip",
|
||||||
@@ -188,6 +211,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"convert": {
|
"convert": {
|
||||||
|
"tag": "convert_destination_address_to_destination_ip_f1388f0c",
|
||||||
"field": "destination.address",
|
"field": "destination.address",
|
||||||
"target_field": "destination.ip",
|
"target_field": "destination.ip",
|
||||||
"type": "ip",
|
"type": "ip",
|
||||||
@@ -197,6 +221,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"set": {
|
"set": {
|
||||||
|
"tag": "set_network_type_1f1d940a",
|
||||||
"field": "network.type",
|
"field": "network.type",
|
||||||
"value": "ipv6",
|
"value": "ipv6",
|
||||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
|
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
|
||||||
@@ -204,6 +229,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"set": {
|
"set": {
|
||||||
|
"tag": "set_network_type_69deca38",
|
||||||
"field": "network.type",
|
"field": "network.type",
|
||||||
"value": "ipv4",
|
"value": "ipv4",
|
||||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
|
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
|
||||||
@@ -211,6 +237,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"geoip": {
|
"geoip": {
|
||||||
|
"tag": "geoip_source_ip_to_source_geo_da2e41b2",
|
||||||
"field": "source.ip",
|
"field": "source.ip",
|
||||||
"target_field": "source.geo",
|
"target_field": "source.geo",
|
||||||
"ignore_missing": true
|
"ignore_missing": true
|
||||||
@@ -218,6 +245,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"geoip": {
|
"geoip": {
|
||||||
|
"tag": "geoip_destination_ip_to_destination_geo_ab5e2968",
|
||||||
"field": "destination.ip",
|
"field": "destination.ip",
|
||||||
"target_field": "destination.geo",
|
"target_field": "destination.geo",
|
||||||
"ignore_missing": true
|
"ignore_missing": true
|
||||||
@@ -225,6 +253,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"geoip": {
|
"geoip": {
|
||||||
|
"tag": "geoip_source_ip_to_source_as_28d69883",
|
||||||
"ignore_missing": true,
|
"ignore_missing": true,
|
||||||
"database_file": "GeoLite2-ASN.mmdb",
|
"database_file": "GeoLite2-ASN.mmdb",
|
||||||
"field": "source.ip",
|
"field": "source.ip",
|
||||||
@@ -237,6 +266,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"geoip": {
|
"geoip": {
|
||||||
|
"tag": "geoip_destination_ip_to_destination_as_8a007787",
|
||||||
"database_file": "GeoLite2-ASN.mmdb",
|
"database_file": "GeoLite2-ASN.mmdb",
|
||||||
"field": "destination.ip",
|
"field": "destination.ip",
|
||||||
"target_field": "destination.as",
|
"target_field": "destination.as",
|
||||||
@@ -249,6 +279,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"rename": {
|
"rename": {
|
||||||
|
"tag": "rename_source_as_asn_to_source_as_number_a917047d",
|
||||||
"field": "source.as.asn",
|
"field": "source.as.asn",
|
||||||
"target_field": "source.as.number",
|
"target_field": "source.as.number",
|
||||||
"ignore_missing": true
|
"ignore_missing": true
|
||||||
@@ -256,6 +287,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"rename": {
|
"rename": {
|
||||||
|
"tag": "rename_source_as_organization_name_to_source_as_organization_name_f1362d0b",
|
||||||
"field": "source.as.organization_name",
|
"field": "source.as.organization_name",
|
||||||
"target_field": "source.as.organization.name",
|
"target_field": "source.as.organization.name",
|
||||||
"ignore_missing": true
|
"ignore_missing": true
|
||||||
@@ -263,6 +295,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"rename": {
|
"rename": {
|
||||||
|
"tag": "rename_destination_as_asn_to_destination_as_number_3b459fcd",
|
||||||
"field": "destination.as.asn",
|
"field": "destination.as.asn",
|
||||||
"target_field": "destination.as.number",
|
"target_field": "destination.as.number",
|
||||||
"ignore_missing": true
|
"ignore_missing": true
|
||||||
@@ -270,6 +303,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"rename": {
|
"rename": {
|
||||||
|
"tag": "rename_destination_as_organization_name_to_destination_as_organization_name_814bd459",
|
||||||
"field": "destination.as.organization_name",
|
"field": "destination.as.organization_name",
|
||||||
"target_field": "destination.as.organization.name",
|
"target_field": "destination.as.organization.name",
|
||||||
"ignore_missing": true
|
"ignore_missing": true
|
||||||
@@ -277,12 +311,14 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"community_id": {
|
"community_id": {
|
||||||
|
"tag": "community_id_d2308e7a",
|
||||||
"target_field": "network.community_id",
|
"target_field": "network.community_id",
|
||||||
"ignore_failure": true
|
"ignore_failure": true
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"grok": {
|
"grok": {
|
||||||
|
"tag": "grok_observer_ingress_interface_name_968018d3",
|
||||||
"field": "observer.ingress.interface.name",
|
"field": "observer.ingress.interface.name",
|
||||||
"patterns": [
|
"patterns": [
|
||||||
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
|
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
|
||||||
@@ -293,6 +329,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"set": {
|
"set": {
|
||||||
|
"tag": "set_network_vlan_id_efd4d96a",
|
||||||
"field": "network.vlan.id",
|
"field": "network.vlan.id",
|
||||||
"copy_from": "observer.ingress.vlan.id",
|
"copy_from": "observer.ingress.vlan.id",
|
||||||
"ignore_empty_value": true
|
"ignore_empty_value": true
|
||||||
@@ -300,6 +337,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"append": {
|
"append": {
|
||||||
|
"tag": "append_related_ip_c1a6356b",
|
||||||
"field": "related.ip",
|
"field": "related.ip",
|
||||||
"value": "{{{destination.ip}}}",
|
"value": "{{{destination.ip}}}",
|
||||||
"allow_duplicates": false,
|
"allow_duplicates": false,
|
||||||
@@ -308,6 +346,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"append": {
|
"append": {
|
||||||
|
"tag": "append_related_ip_8121c591",
|
||||||
"field": "related.ip",
|
"field": "related.ip",
|
||||||
"value": "{{{source.ip}}}",
|
"value": "{{{source.ip}}}",
|
||||||
"allow_duplicates": false,
|
"allow_duplicates": false,
|
||||||
@@ -316,6 +355,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"append": {
|
"append": {
|
||||||
|
"tag": "append_related_ip_53b62ed8",
|
||||||
"field": "related.ip",
|
"field": "related.ip",
|
||||||
"value": "{{{source.nat.ip}}}",
|
"value": "{{{source.nat.ip}}}",
|
||||||
"allow_duplicates": false,
|
"allow_duplicates": false,
|
||||||
@@ -324,6 +364,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"append": {
|
"append": {
|
||||||
|
"tag": "append_related_hosts_6f162628",
|
||||||
"field": "related.hosts",
|
"field": "related.hosts",
|
||||||
"value": "{{{destination.domain}}}",
|
"value": "{{{destination.domain}}}",
|
||||||
"if": "ctx.destination?.domain != null"
|
"if": "ctx.destination?.domain != null"
|
||||||
@@ -331,6 +372,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"append": {
|
"append": {
|
||||||
|
"tag": "append_related_user_c036eec2",
|
||||||
"field": "related.user",
|
"field": "related.user",
|
||||||
"value": "{{{user.name}}}",
|
"value": "{{{user.name}}}",
|
||||||
"if": "ctx.user?.name != null"
|
"if": "ctx.user?.name != null"
|
||||||
@@ -338,6 +380,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"set": {
|
"set": {
|
||||||
|
"tag": "set_network_direction_cb1e3125",
|
||||||
"field": "network.direction",
|
"field": "network.direction",
|
||||||
"value": "{{{network.direction}}}bound",
|
"value": "{{{network.direction}}}bound",
|
||||||
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
|
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
|
||||||
@@ -345,6 +388,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"remove": {
|
"remove": {
|
||||||
|
"tag": "remove_a82e20f2",
|
||||||
"field": [
|
"field": [
|
||||||
"_tmp"
|
"_tmp"
|
||||||
],
|
],
|
||||||
@@ -353,11 +397,21 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"script": {
|
"script": {
|
||||||
|
"tag": "script_a7f2c062",
|
||||||
"lang": "painless",
|
"lang": "painless",
|
||||||
"description": "This script processor iterates over the whole document to remove fields with null values.",
|
"description": "This script processor iterates over the whole document to remove fields with null values.",
|
||||||
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"append": {
|
||||||
|
"tag": "append_preserve_original_event_on_error",
|
||||||
|
"field": "tags",
|
||||||
|
"value": "preserve_original_event",
|
||||||
|
"allow_duplicates": false,
|
||||||
|
"if": "ctx.error?.message != null"
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"pipeline": {
|
"pipeline": {
|
||||||
"name": "global@custom",
|
"name": "global@custom",
|
||||||
@@ -405,7 +459,14 @@
|
|||||||
{
|
{
|
||||||
"append": {
|
"append": {
|
||||||
"field": "error.message",
|
"field": "error.message",
|
||||||
"value": "{{{ _ingest.on_failure_message }}}"
|
"value": "Processor '{{{ _ingest.on_failure_processor_type }}}' {{#_ingest.on_failure_processor_tag}}with tag '{{{ _ingest.on_failure_processor_tag }}}' {{/_ingest.on_failure_processor_tag}}in pipeline '{{{ _ingest.pipeline }}}' failed with message '{{{ _ingest.on_failure_message }}}'"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"append": {
|
||||||
|
"field": "tags",
|
||||||
|
"value": "preserve_original_event",
|
||||||
|
"allow_duplicates": false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@@ -22,6 +22,12 @@
|
|||||||
"ignore_failure": true
|
"ignore_failure": true
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
"lowercase": {
|
||||||
|
"field": "network.transport",
|
||||||
|
"ignore_failure": true
|
||||||
|
}
|
||||||
|
},
|
||||||
{
|
{
|
||||||
"rename": {
|
"rename": {
|
||||||
"field": "message2.in_iface",
|
"field": "message2.in_iface",
|
||||||
|
|||||||
@@ -45,3 +45,7 @@ appender.rolling_json.strategy.action.condition.nested_condition.age = 1D
|
|||||||
rootLogger.level = info
|
rootLogger.level = info
|
||||||
rootLogger.appenderRef.rolling.ref = rolling
|
rootLogger.appenderRef.rolling.ref = rolling
|
||||||
rootLogger.appenderRef.rolling_json.ref = rolling_json
|
rootLogger.appenderRef.rolling_json.ref = rolling_json
|
||||||
|
|
||||||
|
# Suppress NotEntitledException WARNs (ES 9.3.3 bug)
|
||||||
|
logger.entitlement_security.name = org.elasticsearch.entitlement.runtime.policy.PolicyManager.x-pack-security.org.elasticsearch.security.org.elasticsearch.xpack.security
|
||||||
|
logger.entitlement_security.level = error
|
||||||
@@ -14,15 +14,42 @@
|
|||||||
|
|
||||||
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
|
{% set ES_INDEX_SETTINGS_ORIG = ELASTICSEARCHDEFAULTS.elasticsearch.index_settings %}
|
||||||
|
|
||||||
|
{% set ALL_ADDON_INTEGRATION_DEFAULTS = {} %}
|
||||||
|
{% set ALL_ADDON_SETTINGS_ORIG = {} %}
|
||||||
|
{% set ALL_ADDON_SETTINGS_GLOBAL_OVERRIDES = {} %}
|
||||||
|
{% set ALL_ADDON_SETTINGS = {} %}
|
||||||
{# start generation of integration default index_settings #}
|
{# start generation of integration default index_settings #}
|
||||||
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') and salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %}
|
{% if salt['file.file_exists']('/opt/so/state/esfleet_component_templates.json') %}
|
||||||
{% set check_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %}
|
{# import integration type defaults #}
|
||||||
{% if check_package_components.size > 1 %}
|
{% if salt['file.file_exists']('/opt/so/state/esfleet_package_components.json') %}
|
||||||
|
{% set check_integration_package_components = salt['file.stats']('/opt/so/state/esfleet_package_components.json') %}
|
||||||
|
{% if check_integration_package_components.size > 1 %}
|
||||||
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
|
{% from 'elasticfleet/integration-defaults.map.jinja' import ADDON_INTEGRATION_DEFAULTS %}
|
||||||
{% for index, settings in ADDON_INTEGRATION_DEFAULTS.items() %}
|
{% do ALL_ADDON_INTEGRATION_DEFAULTS.update(ADDON_INTEGRATION_DEFAULTS) %}
|
||||||
{% do ES_INDEX_SETTINGS_ORIG.update({index: settings}) %}
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{# import input type defaults #}
|
||||||
|
{% if salt['file.file_exists']('/opt/so/state/esfleet_input_package_components.json') %}
|
||||||
|
{% set check_input_package_components = salt['file.stats']('/opt/so/state/esfleet_input_package_components.json') %}
|
||||||
|
{% if check_input_package_components.size > 1 %}
|
||||||
|
{% from 'elasticfleet/input-defaults.map.jinja' import ADDON_INPUT_INTEGRATION_DEFAULTS %}
|
||||||
|
{% do ALL_ADDON_INTEGRATION_DEFAULTS.update(ADDON_INPUT_INTEGRATION_DEFAULTS) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{# import content type defaults #}
|
||||||
|
{% if salt['file.file_exists']('/opt/so/state/esfleet_content_package_components.json') %}
|
||||||
|
{% set check_content_package_components = salt['file.stats']('/opt/so/state/esfleet_content_package_components.json') %}
|
||||||
|
{% if check_content_package_components.size > 1 %}
|
||||||
|
{% from 'elasticfleet/content-defaults.map.jinja' import ADDON_CONTENT_INTEGRATION_DEFAULTS %}
|
||||||
|
{% do ALL_ADDON_INTEGRATION_DEFAULTS.update(ADDON_CONTENT_INTEGRATION_DEFAULTS) %}
|
||||||
|
{% endif %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% for index, settings in ALL_ADDON_INTEGRATION_DEFAULTS.items() %}
|
||||||
|
{% do ALL_ADDON_SETTINGS_ORIG.update({index: settings}) %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif%}
|
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{# end generation of integration default index_settings #}
|
{# end generation of integration default index_settings #}
|
||||||
|
|
||||||
@@ -31,25 +58,33 @@
|
|||||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update({index: salt['defaults.merge'](ELASTICSEARCHDEFAULTS.elasticsearch.index_settings[index], PILLAR_GLOBAL_OVERRIDES, in_place=False)}) %}
|
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update({index: salt['defaults.merge'](ELASTICSEARCHDEFAULTS.elasticsearch.index_settings[index], PILLAR_GLOBAL_OVERRIDES, in_place=False)}) %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
|
||||||
|
{% if ALL_ADDON_SETTINGS_ORIG.keys() | length > 0 %}
|
||||||
|
{% for index in ALL_ADDON_SETTINGS_ORIG.keys() %}
|
||||||
|
{% do ALL_ADDON_SETTINGS_GLOBAL_OVERRIDES.update({index: salt['defaults.merge'](ALL_ADDON_SETTINGS_ORIG[index], PILLAR_GLOBAL_OVERRIDES, in_place=False)}) %}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
{% set ES_INDEX_SETTINGS = {} %}
|
{% set ES_INDEX_SETTINGS = {} %}
|
||||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.update(salt['defaults.merge'](ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
|
{% macro create_final_index_template(DEFINED_SETTINGS, GLOBAL_OVERRIDES, FINAL_INDEX_SETTINGS) %}
|
||||||
{% for index, settings in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES.items() %}
|
|
||||||
|
{% do GLOBAL_OVERRIDES.update(salt['defaults.merge'](GLOBAL_OVERRIDES, ES_INDEX_PILLAR, in_place=False)) %}
|
||||||
|
{% for index, settings in GLOBAL_OVERRIDES.items() %}
|
||||||
|
|
||||||
{# prevent this action from being performed on custom defined indices. #}
|
{# prevent this action from being performed on custom defined indices. #}
|
||||||
{# the custom defined index is not present in either of the dictionaries and fails to reder. #}
|
{# the custom defined index is not present in either of the dictionaries and fails to reder. #}
|
||||||
{% if index in ES_INDEX_SETTINGS_ORIG and index in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES %}
|
{% if index in DEFINED_SETTINGS and index in GLOBAL_OVERRIDES %}
|
||||||
|
|
||||||
{# dont merge policy from the global_overrides if policy isn't defined in the original index settingss #}
|
{# dont merge policy from the global_overrides if policy isn't defined in the original index settingss #}
|
||||||
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
|
{# this will prevent so-elasticsearch-ilm-policy-load from trying to load policy on non ILM manged indices #}
|
||||||
{% if not ES_INDEX_SETTINGS_ORIG[index].policy is defined and ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
{% if not DEFINED_SETTINGS[index].policy is defined and GLOBAL_OVERRIDES[index].policy is defined %}
|
||||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].pop('policy') %}
|
{% do GLOBAL_OVERRIDES[index].pop('policy') %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{# this prevents and index from inderiting a policy phase from global overrides if it wasnt defined in the defaults. #}
|
{# this prevents and index from inderiting a policy phase from global overrides if it wasnt defined in the defaults. #}
|
||||||
{% if ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy is defined %}
|
{% if GLOBAL_OVERRIDES[index].policy is defined %}
|
||||||
{% for phase in ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.copy() %}
|
{% for phase in GLOBAL_OVERRIDES[index].policy.phases.copy() %}
|
||||||
{% if ES_INDEX_SETTINGS_ORIG[index].policy.phases[phase] is not defined %}
|
{% if DEFINED_SETTINGS[index].policy.phases[phase] is not defined %}
|
||||||
{% do ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index].policy.phases.pop(phase) %}
|
{% do GLOBAL_OVERRIDES[index].policy.phases.pop(phase) %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
@@ -111,5 +146,14 @@
|
|||||||
{% endfor %}
|
{% endfor %}
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
{% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %}
|
{% do FINAL_INDEX_SETTINGS.update({index | replace("_x_", "."): GLOBAL_OVERRIDES[index]}) %}
|
||||||
|
{% endfor %}
|
||||||
|
{% endmacro %}
|
||||||
|
|
||||||
|
{{ create_final_index_template(ES_INDEX_SETTINGS_ORIG, ES_INDEX_SETTINGS_GLOBAL_OVERRIDES, ES_INDEX_SETTINGS) }}
|
||||||
|
{{ create_final_index_template(ALL_ADDON_SETTINGS_ORIG, ALL_ADDON_SETTINGS_GLOBAL_OVERRIDES, ALL_ADDON_SETTINGS) }}
|
||||||
|
|
||||||
|
{% set SO_MANAGED_INDICES = [] %}
|
||||||
|
{% for index, settings in ES_INDEX_SETTINGS.items() %}
|
||||||
|
{% do SO_MANAGED_INDICES.append(index) %}
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
@@ -6,8 +6,19 @@
|
|||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
if [ "$1" == "" ]; then
|
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://localhost:9200/_component_template | jq '.component_templates[] |.name'| sort
|
if [[ -z "$1" ]]; then
|
||||||
|
if output=$(so-elasticsearch-query "_component_template" --retry 3 --retry-delay 1 --fail); then
|
||||||
|
jq '[.component_templates[] | .name] | sort' <<< "$output"
|
||||||
|
else
|
||||||
|
echo "Failed to retrieve component templates from Elasticsearch."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -s -k -L https://localhost:9200/_component_template/$1 | jq
|
if output=$(so-elasticsearch-query "_component_template/$1" --retry 3 --retry-delay 1 --fail); then
|
||||||
|
jq <<< "$output"
|
||||||
|
else
|
||||||
|
echo "Failed to retrieve component template '$1' from Elasticsearch."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
fi
|
fi
|
||||||
@@ -0,0 +1,276 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
SO_STATEFILE_SUCCESS=/opt/so/state/estemplates.txt
|
||||||
|
ADDON_STATEFILE_SUCCESS=/opt/so/state/addon_estemplates.txt
|
||||||
|
ELASTICSEARCH_TEMPLATES_DIR="/opt/so/conf/elasticsearch/templates"
|
||||||
|
SO_TEMPLATES_DIR="${ELASTICSEARCH_TEMPLATES_DIR}/index"
|
||||||
|
ADDON_TEMPLATES_DIR="${ELASTICSEARCH_TEMPLATES_DIR}/addon-index"
|
||||||
|
SO_LOAD_FAILURES=0
|
||||||
|
ADDON_LOAD_FAILURES=0
|
||||||
|
SO_LOAD_FAILURES_NAMES=()
|
||||||
|
ADDON_LOAD_FAILURES_NAMES=()
|
||||||
|
IS_HEAVYNODE="false"
|
||||||
|
FORCE="false"
|
||||||
|
VERBOSE="false"
|
||||||
|
SHOULD_EXIT_ON_FAILURE="true"
|
||||||
|
|
||||||
|
# If soup is running, ignore errors
|
||||||
|
pgrep soup >/dev/null && SHOULD_EXIT_ON_FAILURE="false"
|
||||||
|
|
||||||
|
while [[ $# -gt 0 ]]; do
|
||||||
|
case "$1" in
|
||||||
|
--heavynode)
|
||||||
|
IS_HEAVYNODE="true"
|
||||||
|
;;
|
||||||
|
--force)
|
||||||
|
FORCE="true"
|
||||||
|
;;
|
||||||
|
--verbose)
|
||||||
|
VERBOSE="true"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
echo "Usage: $0 [options]"
|
||||||
|
echo "Options:"
|
||||||
|
echo " --heavynode Only loads index templates specific to heavynodes"
|
||||||
|
echo " --force Force reload all templates regardless of statefiles (default: false)"
|
||||||
|
echo " --verbose Enable verbose output"
|
||||||
|
exit 1
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
shift
|
||||||
|
done
|
||||||
|
|
||||||
|
load_template() {
|
||||||
|
local uri="$1"
|
||||||
|
local file="$2"
|
||||||
|
|
||||||
|
echo "Loading template file $file"
|
||||||
|
if ! output=$(retry 3 3 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"); then
|
||||||
|
echo "$output"
|
||||||
|
|
||||||
|
return 1
|
||||||
|
|
||||||
|
elif [[ "$VERBOSE" == "true" ]]; then
|
||||||
|
echo "$output"
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
check_required_component_template_exists() {
|
||||||
|
local required
|
||||||
|
local missing
|
||||||
|
local file=$1
|
||||||
|
|
||||||
|
required=$(jq '[((.composed_of //[]) - (.ignore_missing_component_templates // []))[]]' "$file")
|
||||||
|
missing=$(jq -n --argjson required "$required" --argjson component_templates "$component_templates" '(($required) - ($component_templates))')
|
||||||
|
|
||||||
|
if [[ $(jq length <<<"$missing") -gt 0 ]]; then
|
||||||
|
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
check_heavynode_compatiable_index_template() {
|
||||||
|
# The only templates that are relevant to heavynodes are from datasets defined in elasticagent/files/elastic-agent.yml.jinja.
|
||||||
|
# Heavynodes do not have fleet server packages installed and do not support elastic agents reporting directly to them.
|
||||||
|
local -A heavynode_index_templates=(
|
||||||
|
["so-import"]=1
|
||||||
|
["so-syslog"]=1
|
||||||
|
["so-logs-soc"]=1
|
||||||
|
["so-suricata"]=1
|
||||||
|
["so-suricata.alerts"]=1
|
||||||
|
["so-zeek"]=1
|
||||||
|
["so-strelka"]=1
|
||||||
|
)
|
||||||
|
|
||||||
|
local template_name="$1"
|
||||||
|
|
||||||
|
if [[ ! -v heavynode_index_templates["$template_name"] ]]; then
|
||||||
|
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
load_component_templates() {
|
||||||
|
local printed_name="$1"
|
||||||
|
local pattern="${ELASTICSEARCH_TEMPLATES_DIR}/component/$2"
|
||||||
|
local append_mappings="${3:-"false"}"
|
||||||
|
|
||||||
|
echo -e "\nLoading $printed_name component templates...\n"
|
||||||
|
|
||||||
|
if ! compgen -G "${pattern}/*.json" > /dev/null; then
|
||||||
|
echo "No $printed_name component templates found in ${pattern}, skipping."
|
||||||
|
return
|
||||||
|
fi
|
||||||
|
|
||||||
|
for component in "$pattern"/*.json; do
|
||||||
|
tmpl_name=$(basename "${component%.json}")
|
||||||
|
|
||||||
|
if [[ "$append_mappings" == "true" ]]; then
|
||||||
|
# avoid duplicating "-mappings" if it already exists in the component template filename
|
||||||
|
tmpl_name="${tmpl_name%-mappings}-mappings"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if ! load_template "_component_template/${tmpl_name}" "$component"; then
|
||||||
|
SO_LOAD_FAILURES=$((SO_LOAD_FAILURES + 1))
|
||||||
|
SO_LOAD_FAILURES_NAMES+=("$component")
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
|
||||||
|
check_elasticsearch_responsive() {
|
||||||
|
# Cannot load templates if Elasticsearch is not responding.
|
||||||
|
# NOTE: Slightly faster exit w/ failure than previous "retry 240 1" if there is a problem with Elasticsearch the
|
||||||
|
# script should exit sooner rather than hang at the 'so-elasticsearch-templates' salt state.
|
||||||
|
retry 3 15 "so-elasticsearch-query / --output /dev/null --fail" ||
|
||||||
|
fail "Elasticsearch is not responding. Please review Elasticsearch logs /opt/so/log/elasticsearch/securityonion.log for more details. Additionally, consider running so-elasticsearch-troubleshoot."
|
||||||
|
}
|
||||||
|
|
||||||
|
index_templates_exist() {
|
||||||
|
local templates_dir="$1"
|
||||||
|
|
||||||
|
if [[ ! -d "$templates_dir" ]]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
compgen -G "${templates_dir}/*.json" > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
should_load_addon_templates() {
|
||||||
|
if [[ "$IS_HEAVYNODE" == "true" ]]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Skip statefile checks when forcing template load
|
||||||
|
if [[ "$FORCE" != "true" ]]; then
|
||||||
|
if [[ ! -f "$SO_STATEFILE_SUCCESS" || -f "$ADDON_STATEFILE_SUCCESS" ]]; then
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
index_templates_exist "$ADDON_TEMPLATES_DIR"
|
||||||
|
}
|
||||||
|
|
||||||
|
if [[ "$FORCE" == "true" || ! -f "$SO_STATEFILE_SUCCESS" ]] && index_templates_exist "$SO_TEMPLATES_DIR"; then
|
||||||
|
check_elasticsearch_responsive
|
||||||
|
|
||||||
|
if [[ "$IS_HEAVYNODE" == "false" ]]; then
|
||||||
|
# TODO: Better way to check if fleet server is installed vs checking for Elastic Defend component template.
|
||||||
|
fleet_check="logs-endpoint.alerts@package"
|
||||||
|
if ! so-elasticsearch-query "_component_template/$fleet_check" --output /dev/null --retry 5 --retry-delay 3 --fail; then
|
||||||
|
# This check prevents so-elasticsearch-templates-load from running before so-elastic-fleet-setup has run.
|
||||||
|
echo -e "\nPackage $fleet_check not yet installed. Fleet Server may not be fully configured yet."
|
||||||
|
# Fleet Server is required because some SO index templates depend on components installed via
|
||||||
|
# specific integrations eg Elastic Defend. These are components that we do not manually create / manage
|
||||||
|
# via /opt/so/saltstack/salt/elasticsearch/templates/component/
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
# load_component_templates "Name" "directory" "append '-mappings'?"
|
||||||
|
load_component_templates "ECS" "ecs" "true"
|
||||||
|
load_component_templates "Elastic Agent" "elastic-agent"
|
||||||
|
load_component_templates "Security Onion" "so"
|
||||||
|
|
||||||
|
component_templates=$(so-elasticsearch-component-templates-list)
|
||||||
|
echo -e "Loading Security Onion index templates...\n"
|
||||||
|
for so_idx_tmpl in "${SO_TEMPLATES_DIR}"/*.json; do
|
||||||
|
tmpl_name=$(basename "${so_idx_tmpl%-template.json}")
|
||||||
|
|
||||||
|
if [[ "$IS_HEAVYNODE" == "true" ]]; then
|
||||||
|
# TODO: Better way to load only heavynode specific templates
|
||||||
|
if ! check_heavynode_compatiable_index_template "$tmpl_name"; then
|
||||||
|
if [[ "$VERBOSE" == "true" ]]; then
|
||||||
|
echo "Skipping over $so_idx_tmpl, template is not a heavynode specific index template."
|
||||||
|
fi
|
||||||
|
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
if check_required_component_template_exists "$so_idx_tmpl"; then
|
||||||
|
if ! load_template "_index_template/$tmpl_name" "$so_idx_tmpl"; then
|
||||||
|
SO_LOAD_FAILURES=$((SO_LOAD_FAILURES + 1))
|
||||||
|
SO_LOAD_FAILURES_NAMES+=("$so_idx_tmpl")
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipping over $so_idx_tmpl due to missing required component template(s)."
|
||||||
|
SO_LOAD_FAILURES=$((SO_LOAD_FAILURES + 1))
|
||||||
|
SO_LOAD_FAILURES_NAMES+=("$so_idx_tmpl")
|
||||||
|
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ $SO_LOAD_FAILURES -eq 0 ]]; then
|
||||||
|
echo "All Security Onion core templates loaded successfully."
|
||||||
|
|
||||||
|
touch "$SO_STATEFILE_SUCCESS"
|
||||||
|
else
|
||||||
|
echo "Encountered $SO_LOAD_FAILURES failure(s) loading templates:"
|
||||||
|
for failed_template in "${SO_LOAD_FAILURES_NAMES[@]}"; do
|
||||||
|
echo " - $failed_template"
|
||||||
|
done
|
||||||
|
if [[ "$SHOULD_EXIT_ON_FAILURE" == "true" ]]; then
|
||||||
|
fail "Failed to load all Security Onion core templates successfully."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
elif ! index_templates_exist "$SO_TEMPLATES_DIR"; then
|
||||||
|
echo "No Security Onion core index templates found in ${SO_TEMPLATES_DIR}, skipping."
|
||||||
|
elif [[ -f "$SO_STATEFILE_SUCCESS" ]]; then
|
||||||
|
echo "Security Onion core templates already loaded"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Start loading addon templates
|
||||||
|
if should_load_addon_templates; then
|
||||||
|
|
||||||
|
check_elasticsearch_responsive
|
||||||
|
|
||||||
|
echo -e "\nLoading addon integration index templates...\n"
|
||||||
|
component_templates=$(so-elasticsearch-component-templates-list)
|
||||||
|
|
||||||
|
for addon_idx_tmpl in "${ADDON_TEMPLATES_DIR}"/*.json; do
|
||||||
|
tmpl_name=$(basename "${addon_idx_tmpl%-template.json}")
|
||||||
|
|
||||||
|
if check_required_component_template_exists "$addon_idx_tmpl"; then
|
||||||
|
if ! load_template "_index_template/${tmpl_name}" "$addon_idx_tmpl"; then
|
||||||
|
ADDON_LOAD_FAILURES=$((ADDON_LOAD_FAILURES + 1))
|
||||||
|
ADDON_LOAD_FAILURES_NAMES+=("$addon_idx_tmpl")
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
echo "Skipping over $addon_idx_tmpl due to missing required component template(s)."
|
||||||
|
ADDON_LOAD_FAILURES=$((ADDON_LOAD_FAILURES + 1))
|
||||||
|
ADDON_LOAD_FAILURES_NAMES+=("$addon_idx_tmpl")
|
||||||
|
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
if [[ $ADDON_LOAD_FAILURES -eq 0 ]]; then
|
||||||
|
echo "All addon integration templates loaded successfully."
|
||||||
|
|
||||||
|
touch "$ADDON_STATEFILE_SUCCESS"
|
||||||
|
else
|
||||||
|
echo "Encountered $ADDON_LOAD_FAILURES failure(s) loading addon integration templates:"
|
||||||
|
for failed_template in "${ADDON_LOAD_FAILURES_NAMES[@]}"; do
|
||||||
|
echo " - $failed_template"
|
||||||
|
done
|
||||||
|
if [[ "$SHOULD_EXIT_ON_FAILURE" == "true" ]]; then
|
||||||
|
fail "Failed to load all addon integration templates successfully."
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
elif [[ ! -f "$SO_STATEFILE_SUCCESS" && "$IS_HEAVYNODE" == "false" ]]; then
|
||||||
|
echo "Skipping loading addon integration templates until Security Onion core templates have been loaded."
|
||||||
|
|
||||||
|
elif [[ -f "$ADDON_STATEFILE_SUCCESS" && "$IS_HEAVYNODE" == "false" && "$FORCE" == "false" ]]; then
|
||||||
|
echo "Addon integration templates already loaded"
|
||||||
|
fi
|
||||||
@@ -7,6 +7,9 @@
|
|||||||
. /usr/sbin/so-common
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
{%- from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
|
{%- from 'elasticsearch/template.map.jinja' import ES_INDEX_SETTINGS %}
|
||||||
|
{%- if GLOBALS.role != "so-heavynode" %}
|
||||||
|
{%- from 'elasticsearch/template.map.jinja' import ALL_ADDON_SETTINGS %}
|
||||||
|
{%- endif %}
|
||||||
|
|
||||||
{%- for index, settings in ES_INDEX_SETTINGS.items() %}
|
{%- for index, settings in ES_INDEX_SETTINGS.items() %}
|
||||||
{%- if settings.policy is defined %}
|
{%- if settings.policy is defined %}
|
||||||
@@ -33,3 +36,13 @@
|
|||||||
{%- endif %}
|
{%- endif %}
|
||||||
{%- endfor %}
|
{%- endfor %}
|
||||||
echo
|
echo
|
||||||
|
{%- if GLOBALS.role != "so-heavynode" %}
|
||||||
|
{%- for index, settings in ALL_ADDON_SETTINGS.items() %}
|
||||||
|
{%- if settings.policy is defined %}
|
||||||
|
echo
|
||||||
|
echo "Setting up {{ index }}-logs policy..."
|
||||||
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -s -k -L -X PUT "https://localhost:9200/_ilm/policy/{{ index }}-logs" -H 'Content-Type: application/json' -d'{ "policy": {{ settings.policy | tojson(true) }} }'
|
||||||
|
echo
|
||||||
|
{%- endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
{%- endif %}
|
||||||
|
|||||||
@@ -1,165 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
||||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
|
||||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
|
||||||
# Elastic License 2.0.
|
|
||||||
{%- import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
|
|
||||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
|
||||||
|
|
||||||
STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt
|
|
||||||
STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt
|
|
||||||
|
|
||||||
if [[ -f $STATE_FILE_INITIAL ]]; then
|
|
||||||
# The initial template load has already run. As this is a subsequent load, all dependencies should
|
|
||||||
# already be satisified. Therefore, immediately exit/abort this script upon any template load failure
|
|
||||||
# since this is an unrecoverable failure.
|
|
||||||
should_exit_on_failure=1
|
|
||||||
else
|
|
||||||
# This is the initial template load, and there likely are some components not yet setup in Elasticsearch.
|
|
||||||
# Therefore load as many templates as possible at this time and if an error occurs proceed to the next
|
|
||||||
# template. But if at least one template fails to load do not mark the templates as having been loaded.
|
|
||||||
# This will allow the next load to resume the load of the templates that failed to load initially.
|
|
||||||
should_exit_on_failure=0
|
|
||||||
echo "This is the initial template load"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If soup is running, ignore errors
|
|
||||||
pgrep soup > /dev/null && should_exit_on_failure=0
|
|
||||||
|
|
||||||
load_failures=0
|
|
||||||
|
|
||||||
load_template() {
|
|
||||||
uri=$1
|
|
||||||
file=$2
|
|
||||||
|
|
||||||
echo "Loading template file $i"
|
|
||||||
if ! retry 3 1 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"; then
|
|
||||||
if [[ $should_exit_on_failure -eq 1 ]]; then
|
|
||||||
fail "Could not load template file: $file"
|
|
||||||
else
|
|
||||||
load_failures=$((load_failures+1))
|
|
||||||
echo "Incremented load failure counter: $load_failures"
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
if [ ! -f $STATE_FILE_SUCCESS ]; then
|
|
||||||
echo "State file $STATE_FILE_SUCCESS not found. Running so-elasticsearch-templates-load."
|
|
||||||
|
|
||||||
. /usr/sbin/so-common
|
|
||||||
|
|
||||||
{% if GLOBALS.role != 'so-heavynode' %}
|
|
||||||
if [ -f /usr/sbin/so-elastic-fleet-common ]; then
|
|
||||||
. /usr/sbin/so-elastic-fleet-common
|
|
||||||
fi
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
default_conf_dir=/opt/so/conf
|
|
||||||
|
|
||||||
# Define a default directory to load pipelines from
|
|
||||||
ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/"
|
|
||||||
|
|
||||||
{% if GLOBALS.role == 'so-heavynode' %}
|
|
||||||
file="/opt/so/conf/elasticsearch/templates/index/so-common-template.json"
|
|
||||||
{% else %}
|
|
||||||
file="/usr/sbin/so-elastic-fleet-common"
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
if [ -f "$file" ]; then
|
|
||||||
# Wait for ElasticSearch to initialize
|
|
||||||
echo -n "Waiting for ElasticSearch..."
|
|
||||||
retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'"
|
|
||||||
{% if GLOBALS.role != 'so-heavynode' %}
|
|
||||||
TEMPLATE="logs-endpoint.alerts@package"
|
|
||||||
INSTALLED=$(so-elasticsearch-query _component_template/$TEMPLATE | jq -r .component_templates[0].name)
|
|
||||||
if [ "$INSTALLED" != "$TEMPLATE" ]; then
|
|
||||||
echo
|
|
||||||
echo "Packages not yet installed."
|
|
||||||
echo
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
touch $STATE_FILE_INITIAL
|
|
||||||
|
|
||||||
cd ${ELASTICSEARCH_TEMPLATES}/component/ecs
|
|
||||||
|
|
||||||
echo "Loading ECS component templates..."
|
|
||||||
for i in *; do
|
|
||||||
TEMPLATE=$(echo $i | cut -d '.' -f1)
|
|
||||||
load_template "_component_template/${TEMPLATE}-mappings" "$i"
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
|
|
||||||
cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent
|
|
||||||
|
|
||||||
echo "Loading Elastic Agent component templates..."
|
|
||||||
{% if GLOBALS.role == 'so-heavynode' %}
|
|
||||||
component_pattern="so-*"
|
|
||||||
{% else %}
|
|
||||||
component_pattern="*"
|
|
||||||
{% endif %}
|
|
||||||
for i in $component_pattern; do
|
|
||||||
TEMPLATE=${i::-5}
|
|
||||||
load_template "_component_template/$TEMPLATE" "$i"
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
|
|
||||||
# Load SO-specific component templates
|
|
||||||
cd ${ELASTICSEARCH_TEMPLATES}/component/so
|
|
||||||
|
|
||||||
echo "Loading Security Onion component templates..."
|
|
||||||
for i in *; do
|
|
||||||
TEMPLATE=$(echo $i | cut -d '.' -f1);
|
|
||||||
load_template "_component_template/$TEMPLATE" "$i"
|
|
||||||
done
|
|
||||||
echo
|
|
||||||
|
|
||||||
# Load SO index templates
|
|
||||||
cd ${ELASTICSEARCH_TEMPLATES}/index
|
|
||||||
|
|
||||||
echo "Loading Security Onion index templates..."
|
|
||||||
shopt -s extglob
|
|
||||||
{% if GLOBALS.role == 'so-heavynode' %}
|
|
||||||
pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*|*endpoint*|*elasticsearch*|*generic*|*fleet_server*|*soc*)"
|
|
||||||
{% else %}
|
|
||||||
pattern="*"
|
|
||||||
{% endif %}
|
|
||||||
# Index templates will be skipped if the following conditions are met:
|
|
||||||
# 1. The template is part of the "so-logs-" template group
|
|
||||||
# 2. The template name does not correlate to at least one existing component template
|
|
||||||
# In this situation, the script will treat the skipped template as a temporary failure
|
|
||||||
# and allow the templates to be loaded again on the next run or highstate, whichever
|
|
||||||
# comes first.
|
|
||||||
COMPONENT_LIST=$(so-elasticsearch-component-templates-list)
|
|
||||||
for i in $pattern; do
|
|
||||||
TEMPLATE=${i::-14}
|
|
||||||
COMPONENT_PATTERN=${TEMPLATE:3}
|
|
||||||
MATCH=$(echo "$TEMPLATE" | grep -E "^so-logs-|^so-metrics" | grep -vE "detections|osquery")
|
|
||||||
if [[ -n "$MATCH" && ! "$COMPONENT_LIST" =~ "$COMPONENT_PATTERN" && ! "$COMPONENT_PATTERN" =~ \.generic|logs-winlog\.winlog ]]; then
|
|
||||||
load_failures=$((load_failures+1))
|
|
||||||
echo "Component template does not exist for $COMPONENT_PATTERN. The index template will not be loaded. Load failures: $load_failures"
|
|
||||||
else
|
|
||||||
load_template "_index_template/$TEMPLATE" "$i"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
else
|
|
||||||
{% if GLOBALS.role == 'so-heavynode' %}
|
|
||||||
echo "Common template does not exist. Exiting..."
|
|
||||||
{% else %}
|
|
||||||
echo "Elastic Fleet not configured. Exiting..."
|
|
||||||
{% endif %}
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
cd - >/dev/null
|
|
||||||
|
|
||||||
if [[ $load_failures -eq 0 ]]; then
|
|
||||||
echo "All templates loaded successfully"
|
|
||||||
touch $STATE_FILE_SUCCESS
|
|
||||||
else
|
|
||||||
echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Templates already loaded"
|
|
||||||
fi
|
|
||||||
@@ -11,6 +11,7 @@
|
|||||||
'so-kratos',
|
'so-kratos',
|
||||||
'so-hydra',
|
'so-hydra',
|
||||||
'so-nginx',
|
'so-nginx',
|
||||||
|
'so-postgres',
|
||||||
'so-redis',
|
'so-redis',
|
||||||
'so-soc',
|
'so-soc',
|
||||||
'so-strelka-coordinator',
|
'so-strelka-coordinator',
|
||||||
@@ -34,6 +35,7 @@
|
|||||||
'so-hydra',
|
'so-hydra',
|
||||||
'so-logstash',
|
'so-logstash',
|
||||||
'so-nginx',
|
'so-nginx',
|
||||||
|
'so-postgres',
|
||||||
'so-redis',
|
'so-redis',
|
||||||
'so-soc',
|
'so-soc',
|
||||||
'so-strelka-coordinator',
|
'so-strelka-coordinator',
|
||||||
@@ -77,6 +79,7 @@
|
|||||||
'so-kratos',
|
'so-kratos',
|
||||||
'so-hydra',
|
'so-hydra',
|
||||||
'so-nginx',
|
'so-nginx',
|
||||||
|
'so-postgres',
|
||||||
'so-soc'
|
'so-soc'
|
||||||
] %}
|
] %}
|
||||||
|
|
||||||
|
|||||||
@@ -98,6 +98,10 @@ firewall:
|
|||||||
tcp:
|
tcp:
|
||||||
- 8086
|
- 8086
|
||||||
udp: []
|
udp: []
|
||||||
|
postgres:
|
||||||
|
tcp:
|
||||||
|
- 5432
|
||||||
|
udp: []
|
||||||
kafka_controller:
|
kafka_controller:
|
||||||
tcp:
|
tcp:
|
||||||
- 9093
|
- 9093
|
||||||
@@ -193,6 +197,7 @@ firewall:
|
|||||||
- kibana
|
- kibana
|
||||||
- redis
|
- redis
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
- localrules
|
- localrules
|
||||||
@@ -379,6 +384,7 @@ firewall:
|
|||||||
- kibana
|
- kibana
|
||||||
- redis
|
- redis
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
- docker_registry
|
- docker_registry
|
||||||
@@ -392,6 +398,7 @@ firewall:
|
|||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -404,6 +411,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -421,6 +429,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
searchnode:
|
searchnode:
|
||||||
portgroups:
|
portgroups:
|
||||||
@@ -431,6 +440,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -444,6 +454,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -453,6 +464,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -486,6 +498,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
@@ -496,6 +509,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -590,6 +604,7 @@ firewall:
|
|||||||
- kibana
|
- kibana
|
||||||
- redis
|
- redis
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
- docker_registry
|
- docker_registry
|
||||||
@@ -603,6 +618,7 @@ firewall:
|
|||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -615,6 +631,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -632,6 +649,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
searchnode:
|
searchnode:
|
||||||
portgroups:
|
portgroups:
|
||||||
@@ -642,6 +660,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -655,6 +674,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -664,6 +684,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -695,6 +716,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
@@ -705,6 +727,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -799,6 +822,7 @@ firewall:
|
|||||||
- kibana
|
- kibana
|
||||||
- redis
|
- redis
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
- docker_registry
|
- docker_registry
|
||||||
@@ -812,6 +836,7 @@ firewall:
|
|||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -824,6 +849,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -841,6 +867,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
searchnode:
|
searchnode:
|
||||||
portgroups:
|
portgroups:
|
||||||
@@ -850,6 +877,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -862,6 +890,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -871,6 +900,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -904,6 +934,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
@@ -914,6 +945,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -1011,6 +1043,7 @@ firewall:
|
|||||||
- kibana
|
- kibana
|
||||||
- redis
|
- redis
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
- docker_registry
|
- docker_registry
|
||||||
@@ -1031,6 +1064,7 @@ firewall:
|
|||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -1043,6 +1077,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -1054,6 +1089,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- beats_5044
|
- beats_5044
|
||||||
@@ -1065,6 +1101,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- redis
|
- redis
|
||||||
@@ -1074,6 +1111,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- redis
|
- redis
|
||||||
@@ -1084,6 +1122,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -1120,6 +1159,7 @@ firewall:
|
|||||||
portgroups:
|
portgroups:
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- sensoroni
|
- sensoroni
|
||||||
- yum
|
- yum
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
@@ -1130,6 +1170,7 @@ firewall:
|
|||||||
- yum
|
- yum
|
||||||
- docker_registry
|
- docker_registry
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
- elastic_agent_data
|
- elastic_agent_data
|
||||||
- elastic_agent_update
|
- elastic_agent_update
|
||||||
@@ -1473,6 +1514,7 @@ firewall:
|
|||||||
- kibana
|
- kibana
|
||||||
- redis
|
- redis
|
||||||
- influxdb
|
- influxdb
|
||||||
|
- postgres
|
||||||
- elasticsearch_rest
|
- elasticsearch_rest
|
||||||
- elasticsearch_node
|
- elasticsearch_node
|
||||||
- elastic_agent_control
|
- elastic_agent_control
|
||||||
|
|||||||
@@ -11,18 +11,14 @@ global:
|
|||||||
regexFailureMessage: You must enter a valid IP address or CIDR.
|
regexFailureMessage: You must enter a valid IP address or CIDR.
|
||||||
mdengine:
|
mdengine:
|
||||||
description: Which engine to use for meta data generation. Options are ZEEK and SURICATA.
|
description: Which engine to use for meta data generation. Options are ZEEK and SURICATA.
|
||||||
regex: ^(ZEEK|SURICATA)$
|
|
||||||
options:
|
options:
|
||||||
- ZEEK
|
- ZEEK
|
||||||
- SURICATA
|
- SURICATA
|
||||||
regexFailureMessage: You must enter either ZEEK or SURICATA.
|
|
||||||
global: True
|
global: True
|
||||||
pcapengine:
|
pcapengine:
|
||||||
description: Which engine to use for generating pcap. Currently only SURICATA is supported.
|
description: Which engine to use for generating pcap. Currently only SURICATA is supported.
|
||||||
regex: ^(SURICATA)$
|
|
||||||
options:
|
options:
|
||||||
- SURICATA
|
- SURICATA
|
||||||
regexFailureMessage: You must enter either SURICATA.
|
|
||||||
global: True
|
global: True
|
||||||
ids:
|
ids:
|
||||||
description: Which IDS engine to use. Currently only Suricata is supported.
|
description: Which IDS engine to use. Currently only Suricata is supported.
|
||||||
@@ -42,11 +38,9 @@ global:
|
|||||||
advanced: True
|
advanced: True
|
||||||
pipeline:
|
pipeline:
|
||||||
description: Sets which pipeline technology for events to use. The use of Kafka requires a Security Onion Pro license.
|
description: Sets which pipeline technology for events to use. The use of Kafka requires a Security Onion Pro license.
|
||||||
regex: ^(REDIS|KAFKA)$
|
|
||||||
options:
|
options:
|
||||||
- REDIS
|
- REDIS
|
||||||
- KAFKA
|
- KAFKA
|
||||||
regexFailureMessage: You must enter either REDIS or KAFKA.
|
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
repo_host:
|
repo_host:
|
||||||
|
|||||||
@@ -20,7 +20,7 @@ so-idh:
|
|||||||
- network_mode: host
|
- network_mode: host
|
||||||
- binds:
|
- binds:
|
||||||
- /nsm/idh:/var/tmp:rw
|
- /nsm/idh:/var/tmp:rw
|
||||||
- /opt/so/conf/idh/http-skins:/usr/local/lib/python3.12/site-packages/opencanary/modules/data/http/skin:ro
|
- /opt/so/conf/idh/http-skins:/opt/opencanary/http-skins:ro
|
||||||
- /opt/so/conf/idh/opencanary.conf:/etc/opencanaryd/opencanary.conf:ro
|
- /opt/so/conf/idh/opencanary.conf:/etc/opencanaryd/opencanary.conf:ro
|
||||||
{% if DOCKERMERGED.containers['so-idh'].custom_bind_mounts %}
|
{% if DOCKERMERGED.containers['so-idh'].custom_bind_mounts %}
|
||||||
{% for BIND in DOCKERMERGED.containers['so-idh'].custom_bind_mounts %}
|
{% for BIND in DOCKERMERGED.containers['so-idh'].custom_bind_mounts %}
|
||||||
|
|||||||
@@ -28,6 +28,7 @@
|
|||||||
{% set HTTPPROXYSKINLIST = OPENCANARYCONFIG.pop('httpproxy_x_skinlist') %}
|
{% set HTTPPROXYSKINLIST = OPENCANARYCONFIG.pop('httpproxy_x_skinlist') %}
|
||||||
{% do OPENCANARYCONFIG.update({'http_x_skin_x_list': HTTPSKINLIST}) %}
|
{% do OPENCANARYCONFIG.update({'http_x_skin_x_list': HTTPSKINLIST}) %}
|
||||||
{% do OPENCANARYCONFIG.update({'httpproxy_x_skin_x_list': HTTPPROXYSKINLIST}) %}
|
{% do OPENCANARYCONFIG.update({'httpproxy_x_skin_x_list': HTTPPROXYSKINLIST}) %}
|
||||||
|
{% do OPENCANARYCONFIG.update({'http_x_skindir': '/opt/opencanary/http-skins/' ~ OPENCANARYCONFIG['http_x_skin']}) %}
|
||||||
|
|
||||||
{% set OPENSSH = salt['pillar.get']('idh:openssh', default=IDHCONFIG.idh.openssh, merge=True) %}
|
{% set OPENSSH = salt['pillar.get']('idh:openssh', default=IDHCONFIG.idh.openssh, merge=True) %}
|
||||||
|
|
||||||
|
|||||||
@@ -0,0 +1,29 @@
|
|||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>Redirect</title>
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
.outer {
|
||||||
|
margin-left: auto;
|
||||||
|
margin-right: auto;
|
||||||
|
width: 25em;
|
||||||
|
height: 100%;
|
||||||
|
}
|
||||||
|
.inner{
|
||||||
|
display: table-cell;
|
||||||
|
vertical-align: middle;
|
||||||
|
height: 30em;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class='outer'>
|
||||||
|
<div class='inner'>
|
||||||
|
<a href="/index">Click here</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
|
||||||
@@ -0,0 +1,29 @@
|
|||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<title>Redirect</title>
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
.outer {
|
||||||
|
margin-left: auto;
|
||||||
|
margin-right: auto;
|
||||||
|
width: 25em;
|
||||||
|
height: 100%;
|
||||||
|
}
|
||||||
|
.inner{
|
||||||
|
display: table-cell;
|
||||||
|
vertical-align: middle;
|
||||||
|
height: 30em;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
<div class='outer'>
|
||||||
|
<div class='inner'>
|
||||||
|
<a href="/index">Click here</a>
|
||||||
|
</div>
|
||||||
|
</div>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
|
||||||
@@ -85,7 +85,10 @@ influxdb:
|
|||||||
description: The log level to use for outputting log statements. Allowed values are debug, info, or error.
|
description: The log level to use for outputting log statements. Allowed values are debug, info, or error.
|
||||||
global: True
|
global: True
|
||||||
advanced: false
|
advanced: false
|
||||||
regex: ^(info|debug|error)$
|
options:
|
||||||
|
- info
|
||||||
|
- debug
|
||||||
|
- error
|
||||||
helpLink: influxdb
|
helpLink: influxdb
|
||||||
metrics-disabled:
|
metrics-disabled:
|
||||||
description: If true, the HTTP endpoint that exposes internal InfluxDB metrics will be inaccessible.
|
description: If true, the HTTP endpoint that exposes internal InfluxDB metrics will be inaccessible.
|
||||||
@@ -140,7 +143,9 @@ influxdb:
|
|||||||
description: Determines the type of storage used for secrets. Allowed values are bolt or vault.
|
description: Determines the type of storage used for secrets. Allowed values are bolt or vault.
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
regex: ^(bolt|vault)$
|
options:
|
||||||
|
- bolt
|
||||||
|
- vault
|
||||||
helpLink: influxdb
|
helpLink: influxdb
|
||||||
session-length:
|
session-length:
|
||||||
description: Number of minutes that a user login session can remain authenticated.
|
description: Number of minutes that a user login session can remain authenticated.
|
||||||
@@ -260,7 +265,9 @@ influxdb:
|
|||||||
description: The type of data store to use for HTTP resources. Allowed values are disk or memory. Memory should not be used for production Security Onion installations.
|
description: The type of data store to use for HTTP resources. Allowed values are disk or memory. Memory should not be used for production Security Onion installations.
|
||||||
global: True
|
global: True
|
||||||
advanced: True
|
advanced: True
|
||||||
regex: ^(disk|memory)$
|
options:
|
||||||
|
- disk
|
||||||
|
- memory
|
||||||
helpLink: influxdb
|
helpLink: influxdb
|
||||||
tls-cert:
|
tls-cert:
|
||||||
description: The container path to the certificate to use for TLS encryption of the HTTP requests and responses.
|
description: The container path to the certificate to use for TLS encryption of the HTTP requests and responses.
|
||||||
|
|||||||
@@ -131,7 +131,10 @@ kafka:
|
|||||||
ssl_x_keystore_x_type:
|
ssl_x_keystore_x_type:
|
||||||
description: The key store file format.
|
description: The key store file format.
|
||||||
title: ssl.keystore.type
|
title: ssl.keystore.type
|
||||||
regex: ^(JKS|PKCS12|PEM)$
|
options:
|
||||||
|
- JKS
|
||||||
|
- PKCS12
|
||||||
|
- PEM
|
||||||
helpLink: kafka
|
helpLink: kafka
|
||||||
ssl_x_truststore_x_location:
|
ssl_x_truststore_x_location:
|
||||||
description: The trust store file location within the Docker container.
|
description: The trust store file location within the Docker container.
|
||||||
@@ -160,7 +163,11 @@ kafka:
|
|||||||
security_x_protocol:
|
security_x_protocol:
|
||||||
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
|
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
|
||||||
title: security.protocol
|
title: security.protocol
|
||||||
regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT)
|
options:
|
||||||
|
- SASL_SSL
|
||||||
|
- PLAINTEXT
|
||||||
|
- SSL
|
||||||
|
- SASL_PLAINTEXT
|
||||||
helpLink: kafka
|
helpLink: kafka
|
||||||
ssl_x_keystore_x_location:
|
ssl_x_keystore_x_location:
|
||||||
description: The key store file location within the Docker container.
|
description: The key store file location within the Docker container.
|
||||||
@@ -174,7 +181,10 @@ kafka:
|
|||||||
ssl_x_keystore_x_type:
|
ssl_x_keystore_x_type:
|
||||||
description: The key store file format.
|
description: The key store file format.
|
||||||
title: ssl.keystore.type
|
title: ssl.keystore.type
|
||||||
regex: ^(JKS|PKCS12|PEM)$
|
options:
|
||||||
|
- JKS
|
||||||
|
- PKCS12
|
||||||
|
- PEM
|
||||||
helpLink: kafka
|
helpLink: kafka
|
||||||
ssl_x_truststore_x_location:
|
ssl_x_truststore_x_location:
|
||||||
description: The trust store file location within the Docker container.
|
description: The trust store file location within the Docker container.
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ kibana:
|
|||||||
- default
|
- default
|
||||||
- file
|
- file
|
||||||
migrations:
|
migrations:
|
||||||
discardCorruptObjects: "8.18.8"
|
discardCorruptObjects: "9.3.3"
|
||||||
telemetry:
|
telemetry:
|
||||||
enabled: False
|
enabled: False
|
||||||
xpack:
|
xpack:
|
||||||
|
|||||||
@@ -9,5 +9,5 @@ SESSIONCOOKIE=$(curl -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http:
|
|||||||
# Disable certain Features from showing up in the Kibana UI
|
# Disable certain Features from showing up in the Kibana UI
|
||||||
echo
|
echo
|
||||||
echo "Setting up default Kibana Space:"
|
echo "Setting up default Kibana Space:"
|
||||||
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV3","inventory","dataQuality","searchSynonyms","enterpriseSearchApplications","enterpriseSearchAnalytics","securitySolutionTimeline","securitySolutionNotes","entityManager"]} ' >> /opt/so/log/kibana/misc.log
|
curl -K /opt/so/conf/elasticsearch/curl.config -b "sid=$SESSIONCOOKIE" -L -X PUT "localhost:5601/api/spaces/space/default" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d' {"id":"default","name":"Default","disabledFeatures":["ml","enterpriseSearch","logs","infrastructure","apm","uptime","monitoring","stackAlerts","actions","securitySolutionCasesV3","inventory","dataQuality","searchSynonyms","searchQueryRules","enterpriseSearchApplications","enterpriseSearchAnalytics","securitySolutionTimeline","securitySolutionNotes","securitySolutionRulesV1","entityManager","streams","cloudConnect","slo"]} ' >> /opt/so/log/kibana/misc.log
|
||||||
echo
|
echo
|
||||||
|
|||||||
@@ -3,8 +3,8 @@ kratos:
|
|||||||
description: Enables or disables the Kratos authentication system. WARNING - Disabling this process will cause the grid to malfunction. Re-enabling this setting will require manual effort via SSH.
|
description: Enables or disables the Kratos authentication system. WARNING - Disabling this process will cause the grid to malfunction. Re-enabling this setting will require manual effort via SSH.
|
||||||
forcedType: bool
|
forcedType: bool
|
||||||
advanced: True
|
advanced: True
|
||||||
|
readonly: True
|
||||||
helpLink: kratos
|
helpLink: kratos
|
||||||
|
|
||||||
oidc:
|
oidc:
|
||||||
enabled:
|
enabled:
|
||||||
description: Set to True to enable OIDC / Single Sign-On (SSO) to SOC. Requires a valid Security Onion license key.
|
description: Set to True to enable OIDC / Single Sign-On (SSO) to SOC. Requires a valid Security Onion license key.
|
||||||
@@ -21,8 +21,12 @@ kratos:
|
|||||||
description: "Specify the provider type. Required. Valid values are: auth0, generic, github, google, microsoft"
|
description: "Specify the provider type. Required. Valid values are: auth0, generic, github, google, microsoft"
|
||||||
global: True
|
global: True
|
||||||
forcedType: string
|
forcedType: string
|
||||||
regex: "auth0|generic|github|google|microsoft"
|
options:
|
||||||
regexFailureMessage: "Valid values are: auth0, generic, github, google, microsoft"
|
- auth0
|
||||||
|
- generic
|
||||||
|
- github
|
||||||
|
- google
|
||||||
|
- microsoft
|
||||||
helpLink: oidc
|
helpLink: oidc
|
||||||
client_id:
|
client_id:
|
||||||
description: Specify the client ID, also referenced as the application ID. Required.
|
description: Specify the client ID, also referenced as the application ID. Required.
|
||||||
@@ -43,8 +47,9 @@ kratos:
|
|||||||
description: The source of the subject identifier. Typically 'userinfo'. Only used when provider is 'microsoft'.
|
description: The source of the subject identifier. Typically 'userinfo'. Only used when provider is 'microsoft'.
|
||||||
global: True
|
global: True
|
||||||
forcedType: string
|
forcedType: string
|
||||||
regex: me|userinfo
|
options:
|
||||||
regexFailureMessage: "Valid values are: me, userinfo"
|
- me
|
||||||
|
- userinfo
|
||||||
helpLink: oidc
|
helpLink: oidc
|
||||||
auth_url:
|
auth_url:
|
||||||
description: Provider's auth URL. Required when provider is 'generic'.
|
description: Provider's auth URL. Required when provider is 'generic'.
|
||||||
|
|||||||
@@ -133,7 +133,7 @@ function getinstallinfo() {
|
|||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export $(echo "$INSTALLVARS" | xargs)
|
while read -r var; do export "$var"; done <<< "$INSTALLVARS"
|
||||||
if [ $? -ne 0 ]; then
|
if [ $? -ne 0 ]; then
|
||||||
log "ERROR" "Failed to source install variables"
|
log "ERROR" "Failed to source install variables"
|
||||||
return 1
|
return 1
|
||||||
@@ -281,6 +281,39 @@ function deleteMinionFiles () {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Remove this minion's postgres Telegraf credential from the shared creds
|
||||||
|
# pillar and drop the matching role in Postgres. Always returns 0 so a dead
|
||||||
|
# or unreachable so-postgres doesn't block minion deletion — in that case we
|
||||||
|
# log a warning and leave the role behind for manual cleanup.
|
||||||
|
function remove_postgres_telegraf_from_minion() {
|
||||||
|
local MINION_SAFE
|
||||||
|
MINION_SAFE=$(echo "$MINION_ID" | tr '.-' '__' | tr '[:upper:]' '[:lower:]')
|
||||||
|
local PG_USER="so_telegraf_${MINION_SAFE}"
|
||||||
|
|
||||||
|
log "INFO" "Removing postgres telegraf cred for $MINION_ID"
|
||||||
|
|
||||||
|
so-telegraf-cred remove "$MINION_ID" >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
if docker ps --format '{{.Names}}' 2>/dev/null | grep -q '^so-postgres$'; then
|
||||||
|
if ! docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d so_telegraf >/dev/null 2>&1 <<EOSQL
|
||||||
|
DO \$\$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '$PG_USER') THEN
|
||||||
|
EXECUTE format('REASSIGN OWNED BY %I TO so_telegraf', '$PG_USER');
|
||||||
|
EXECUTE format('DROP OWNED BY %I', '$PG_USER');
|
||||||
|
EXECUTE format('DROP ROLE %I', '$PG_USER');
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
\$\$;
|
||||||
|
EOSQL
|
||||||
|
then
|
||||||
|
log "WARN" "Failed to drop postgres role $PG_USER; pillar entry was removed — drop manually if the role persists"
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
log "WARN" "so-postgres container is not running; skipping DB role cleanup for $PG_USER"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
# Create the minion file
|
# Create the minion file
|
||||||
function ensure_socore_ownership() {
|
function ensure_socore_ownership() {
|
||||||
log "INFO" "Setting socore ownership on minion files"
|
log "INFO" "Setting socore ownership on minion files"
|
||||||
@@ -542,6 +575,17 @@ function add_telegraf_to_minion() {
|
|||||||
log "ERROR" "Failed to add telegraf configuration to $PILLARFILE"
|
log "ERROR" "Failed to add telegraf configuration to $PILLARFILE"
|
||||||
return 1
|
return 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# Provision the per-minion postgres Telegraf credential in the shared
|
||||||
|
# telegraf/creds.sls pillar. so-telegraf-cred is the only writer; it
|
||||||
|
# generates a password on first add and is a no-op on re-add so the cred
|
||||||
|
# is stable across repeated so-minion runs. postgres.telegraf_users on the
|
||||||
|
# manager creates/updates the DB role from the same pillar.
|
||||||
|
so-telegraf-cred add "$MINION_ID"
|
||||||
|
if [ $? -ne 0 ]; then
|
||||||
|
log "ERROR" "Failed to provision postgres telegraf cred for $MINION_ID"
|
||||||
|
return 1
|
||||||
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
function add_influxdb_to_minion() {
|
function add_influxdb_to_minion() {
|
||||||
@@ -1069,6 +1113,7 @@ case "$OPERATION" in
|
|||||||
|
|
||||||
"delete")
|
"delete")
|
||||||
log "INFO" "Removing minion $MINION_ID"
|
log "INFO" "Removing minion $MINION_ID"
|
||||||
|
remove_postgres_telegraf_from_minion
|
||||||
deleteMinionFiles || {
|
deleteMinionFiles || {
|
||||||
log "ERROR" "Failed to delete minion files for $MINION_ID"
|
log "ERROR" "Failed to delete minion files for $MINION_ID"
|
||||||
exit 1
|
exit 1
|
||||||
|
|||||||
Executable
+54
@@ -0,0 +1,54 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
# Single writer for the Telegraf Postgres credentials pillar. Thin wrapper
|
||||||
|
# around so-yaml.py that generates a password on first add and no-ops on
|
||||||
|
# re-add so the cred is stable across repeated so-minion runs.
|
||||||
|
#
|
||||||
|
# Note: so-yaml.py splits keys on '.' with no escape. SO minion ids are
|
||||||
|
# dot-free by construction (setup/so-functions:1884 takes the short_name
|
||||||
|
# before the first '.'), so using the raw minion id as the key is safe.
|
||||||
|
|
||||||
|
CREDS=/opt/so/saltstack/local/pillar/telegraf/creds.sls
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo "Usage: $0 <add|remove> <minion_id>" >&2
|
||||||
|
exit 2
|
||||||
|
}
|
||||||
|
|
||||||
|
seed_creds_file() {
|
||||||
|
mkdir -p "$(dirname "$CREDS")" || return 1
|
||||||
|
if [[ ! -f "$CREDS" ]]; then
|
||||||
|
(umask 027 && printf 'telegraf:\n postgres_creds: {}\n' > "$CREDS") || return 1
|
||||||
|
chown socore:socore "$CREDS" 2>/dev/null || true
|
||||||
|
chmod 640 "$CREDS" || return 1
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
OP=$1
|
||||||
|
MID=$2
|
||||||
|
[[ -z "$OP" || -z "$MID" ]] && usage
|
||||||
|
|
||||||
|
case "$OP" in
|
||||||
|
add)
|
||||||
|
SAFE=$(echo "$MID" | tr '.-' '__' | tr '[:upper:]' '[:lower:]')
|
||||||
|
seed_creds_file || exit 1
|
||||||
|
if so-yaml.py get -r "$CREDS" "telegraf.postgres_creds.${MID}.user" >/dev/null 2>&1; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
PASS=$(tr -dc 'A-Za-z0-9~!@#^&*()_=+[]|;:,.<>?-' < /dev/urandom | head -c 72)
|
||||||
|
so-yaml.py replace "$CREDS" "telegraf.postgres_creds.${MID}.user" "so_telegraf_${SAFE}" >/dev/null
|
||||||
|
so-yaml.py replace "$CREDS" "telegraf.postgres_creds.${MID}.pass" "$PASS" >/dev/null
|
||||||
|
;;
|
||||||
|
remove)
|
||||||
|
[[ -f "$CREDS" ]] || exit 0
|
||||||
|
so-yaml.py remove "$CREDS" "telegraf.postgres_creds.${MID}" >/dev/null 2>&1 || true
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
@@ -39,9 +39,16 @@ def showUsage(args):
|
|||||||
|
|
||||||
|
|
||||||
def loadYaml(filename):
|
def loadYaml(filename):
|
||||||
file = open(filename, "r")
|
try:
|
||||||
|
with open(filename, "r") as file:
|
||||||
content = file.read()
|
content = file.read()
|
||||||
return yaml.safe_load(content)
|
return yaml.safe_load(content)
|
||||||
|
except FileNotFoundError:
|
||||||
|
print(f"File not found: {filename}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error reading file {filename}: {e}", file=sys.stderr)
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
def writeYaml(filename, content):
|
def writeYaml(filename, content):
|
||||||
@@ -285,6 +292,7 @@ def add(args):
|
|||||||
def removeKey(content, key):
|
def removeKey(content, key):
|
||||||
pieces = key.split(".", 1)
|
pieces = key.split(".", 1)
|
||||||
if len(pieces) > 1:
|
if len(pieces) > 1:
|
||||||
|
if pieces[0] in content:
|
||||||
removeKey(content[pieces[0]], pieces[1])
|
removeKey(content[pieces[0]], pieces[1])
|
||||||
else:
|
else:
|
||||||
content.pop(key, None)
|
content.pop(key, None)
|
||||||
|
|||||||
@@ -973,3 +973,21 @@ class TestReplaceListObject(unittest.TestCase):
|
|||||||
|
|
||||||
expected = "key1:\n- id: '1'\n status: updated\n- id: '2'\n status: inactive\n"
|
expected = "key1:\n- id: '1'\n status: updated\n- id: '2'\n status: inactive\n"
|
||||||
self.assertEqual(actual, expected)
|
self.assertEqual(actual, expected)
|
||||||
|
|
||||||
|
|
||||||
|
class TestLoadYaml(unittest.TestCase):
|
||||||
|
|
||||||
|
def test_load_yaml_missing_file(self):
|
||||||
|
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||||
|
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||||
|
soyaml.loadYaml("/tmp/so-yaml_test-does-not-exist.yaml")
|
||||||
|
sysmock.assert_called_with(1)
|
||||||
|
self.assertIn("File not found:", mock_stderr.getvalue())
|
||||||
|
|
||||||
|
def test_load_yaml_read_error(self):
|
||||||
|
with patch('sys.exit', new=MagicMock()) as sysmock:
|
||||||
|
with patch('sys.stderr', new=StringIO()) as mock_stderr:
|
||||||
|
with patch('builtins.open', side_effect=PermissionError("denied")):
|
||||||
|
soyaml.loadYaml("/tmp/so-yaml_test-unreadable.yaml")
|
||||||
|
sysmock.assert_called_with(1)
|
||||||
|
self.assertIn("Error reading file", mock_stderr.getvalue())
|
||||||
|
|||||||
+119
-45
@@ -24,6 +24,14 @@ BACKUPTOPFILE=/opt/so/saltstack/default/salt/top.sls.backup
|
|||||||
SALTUPGRADED=false
|
SALTUPGRADED=false
|
||||||
SALT_CLOUD_INSTALLED=false
|
SALT_CLOUD_INSTALLED=false
|
||||||
SALT_CLOUD_CONFIGURED=false
|
SALT_CLOUD_CONFIGURED=false
|
||||||
|
# Check if salt-cloud is installed
|
||||||
|
if rpm -q salt-cloud &>/dev/null; then
|
||||||
|
SALT_CLOUD_INSTALLED=true
|
||||||
|
fi
|
||||||
|
# Check if salt-cloud is configured
|
||||||
|
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
|
||||||
|
SALT_CLOUD_CONFIGURED=true
|
||||||
|
fi
|
||||||
# used to display messages to the user at the end of soup
|
# used to display messages to the user at the end of soup
|
||||||
declare -a FINAL_MESSAGE_QUEUE=()
|
declare -a FINAL_MESSAGE_QUEUE=()
|
||||||
|
|
||||||
@@ -305,7 +313,7 @@ clone_to_tmp() {
|
|||||||
# Make a temp location for the files
|
# Make a temp location for the files
|
||||||
mkdir -p /tmp/sogh
|
mkdir -p /tmp/sogh
|
||||||
cd /tmp/sogh
|
cd /tmp/sogh
|
||||||
SOUP_BRANCH="-b 2.4/main"
|
SOUP_BRANCH="-b 3/main"
|
||||||
if [ -n "$BRANCH" ]; then
|
if [ -n "$BRANCH" ]; then
|
||||||
SOUP_BRANCH="-b $BRANCH"
|
SOUP_BRANCH="-b $BRANCH"
|
||||||
fi
|
fi
|
||||||
@@ -363,6 +371,7 @@ preupgrade_changes() {
|
|||||||
echo "Checking to see if changes are needed."
|
echo "Checking to see if changes are needed."
|
||||||
|
|
||||||
[[ "$INSTALLEDVERSION" =~ ^2\.4\.21[0-9]+$ ]] && up_to_3.0.0
|
[[ "$INSTALLEDVERSION" =~ ^2\.4\.21[0-9]+$ ]] && up_to_3.0.0
|
||||||
|
[[ "$INSTALLEDVERSION" == "3.0.0" ]] && up_to_3.1.0
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -371,6 +380,7 @@ postupgrade_changes() {
|
|||||||
echo "Running post upgrade processes."
|
echo "Running post upgrade processes."
|
||||||
|
|
||||||
[[ "$POSTVERSION" =~ ^2\.4\.21[0-9]+$ ]] && post_to_3.0.0
|
[[ "$POSTVERSION" =~ ^2\.4\.21[0-9]+$ ]] && post_to_3.0.0
|
||||||
|
[[ "$POSTVERSION" == "3.0.0" ]] && post_to_3.1.0
|
||||||
true
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -445,7 +455,6 @@ migrate_pcap_to_suricata() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
up_to_3.0.0() {
|
up_to_3.0.0() {
|
||||||
determine_elastic_agent_upgrade
|
|
||||||
migrate_pcap_to_suricata
|
migrate_pcap_to_suricata
|
||||||
|
|
||||||
INSTALLEDVERSION=3.0.0
|
INSTALLEDVERSION=3.0.0
|
||||||
@@ -469,6 +478,87 @@ post_to_3.0.0() {
|
|||||||
|
|
||||||
### 3.0.0 End ###
|
### 3.0.0 End ###
|
||||||
|
|
||||||
|
### 3.1.0 Scripts ###
|
||||||
|
|
||||||
|
elasticsearch_backup_index_templates() {
|
||||||
|
echo "Backing up current elasticsearch index templates in /opt/so/conf/elasticsearch/templates/index/ to /nsm/backup/3.0.0_elasticsearch_index_templates.tar.gz"
|
||||||
|
tar -czf /nsm/backup/3.0.0_elasticsearch_index_templates.tar.gz -C /opt/so/conf/elasticsearch/templates/index/ .
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure_postgres_local_pillar() {
|
||||||
|
# Postgres was added as a service after 3.0.0, so the new pillar/top.sls
|
||||||
|
# references postgres.soc_postgres / postgres.adv_postgres unconditionally.
|
||||||
|
# Managers upgrading from 3.0.0 have no /opt/so/saltstack/local/pillar/postgres/
|
||||||
|
# (make_some_dirs only runs at install time), so the stubs must be created
|
||||||
|
# here before salt-master restarts against the new top.sls.
|
||||||
|
echo "Ensuring postgres local pillar stubs exist."
|
||||||
|
local dir=/opt/so/saltstack/local/pillar/postgres
|
||||||
|
mkdir -p "$dir"
|
||||||
|
[[ -f "$dir/soc_postgres.sls" ]] || touch "$dir/soc_postgres.sls"
|
||||||
|
[[ -f "$dir/adv_postgres.sls" ]] || touch "$dir/adv_postgres.sls"
|
||||||
|
chown -R socore:socore "$dir"
|
||||||
|
}
|
||||||
|
|
||||||
|
ensure_postgres_secret() {
|
||||||
|
# On a fresh install, generate_passwords + secrets_pillar seed
|
||||||
|
# secrets:postgres_pass in /opt/so/saltstack/local/pillar/secrets.sls. That
|
||||||
|
# code path is skipped on upgrade (secrets.sls already exists from 3.0.0
|
||||||
|
# with import_pass/influx_pass but no postgres_pass), so the postgres
|
||||||
|
# container's POSTGRES_PASSWORD_FILE and SOC's PG_ADMIN_PASS would be empty
|
||||||
|
# after highstate. Generate one now if missing.
|
||||||
|
local secrets_file=/opt/so/saltstack/local/pillar/secrets.sls
|
||||||
|
if [[ ! -f "$secrets_file" ]]; then
|
||||||
|
echo "WARNING: $secrets_file missing; skipping postgres_pass backfill."
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
if so-yaml.py get -r "$secrets_file" secrets.postgres_pass >/dev/null 2>&1; then
|
||||||
|
echo "secrets.postgres_pass already set; leaving as-is."
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
echo "Seeding secrets.postgres_pass in $secrets_file."
|
||||||
|
so-yaml.py add "$secrets_file" secrets.postgres_pass "$(get_random_value)"
|
||||||
|
chown socore:socore "$secrets_file"
|
||||||
|
}
|
||||||
|
|
||||||
|
up_to_3.1.0() {
|
||||||
|
ensure_postgres_local_pillar
|
||||||
|
ensure_postgres_secret
|
||||||
|
determine_elastic_agent_upgrade
|
||||||
|
elasticsearch_backup_index_templates
|
||||||
|
# Clear existing component template state file.
|
||||||
|
rm -f /opt/so/state/esfleet_component_templates.json
|
||||||
|
|
||||||
|
|
||||||
|
INSTALLEDVERSION=3.1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
post_to_3.1.0() {
|
||||||
|
/usr/sbin/so-kibana-space-defaults
|
||||||
|
# ensure manager has new version of socloud.conf
|
||||||
|
if [[ $SALT_CLOUD_CONFIGURED == true ]]; then
|
||||||
|
salt-call state.apply salt.cloud.config concurrent=True
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Backfill the Telegraf creds pillar for every accepted minion. so-telegraf-cred
|
||||||
|
# add is idempotent — it no-ops when an entry already exists — so this is safe
|
||||||
|
# to run on every soup. The subsequent state.apply creates/updates the matching
|
||||||
|
# Postgres roles from the reconciled pillar.
|
||||||
|
echo "Reconciling Telegraf Postgres creds for accepted minions."
|
||||||
|
for mid in $(salt-key --out=json --list=accepted 2>/dev/null | jq -r '.minions[]?' 2>/dev/null); do
|
||||||
|
[[ -n "$mid" ]] || continue
|
||||||
|
/usr/sbin/so-telegraf-cred add "$mid" || echo " warning: so-telegraf-cred add $mid failed" >&2
|
||||||
|
done
|
||||||
|
# Run through the master (not --local) so state compilation uses the
|
||||||
|
# master's configured file_roots; the manager's /etc/salt/minion has no
|
||||||
|
# file_roots of its own and --local would fail with "No matching sls found".
|
||||||
|
salt-call state.apply postgres.telegraf_users queue=True || true
|
||||||
|
|
||||||
|
POSTVERSION=3.1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
### 3.1.0 End ###
|
||||||
|
|
||||||
|
|
||||||
repo_sync() {
|
repo_sync() {
|
||||||
echo "Sync the local repo."
|
echo "Sync the local repo."
|
||||||
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync."
|
||||||
@@ -636,15 +726,6 @@ upgrade_check_salt() {
|
|||||||
upgrade_salt() {
|
upgrade_salt() {
|
||||||
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION."
|
||||||
echo ""
|
echo ""
|
||||||
# Check if salt-cloud is installed
|
|
||||||
if rpm -q salt-cloud &>/dev/null; then
|
|
||||||
SALT_CLOUD_INSTALLED=true
|
|
||||||
fi
|
|
||||||
# Check if salt-cloud is configured
|
|
||||||
if [[ -f /etc/salt/cloud.profiles.d/socloud.conf ]]; then
|
|
||||||
SALT_CLOUD_CONFIGURED=true
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Removing yum versionlock for Salt."
|
echo "Removing yum versionlock for Salt."
|
||||||
echo ""
|
echo ""
|
||||||
yum versionlock delete "salt"
|
yum versionlock delete "salt"
|
||||||
@@ -728,12 +809,12 @@ verify_es_version_compatibility() {
|
|||||||
local is_active_intermediate_upgrade=1
|
local is_active_intermediate_upgrade=1
|
||||||
# supported upgrade paths for SO-ES versions
|
# supported upgrade paths for SO-ES versions
|
||||||
declare -A es_upgrade_map=(
|
declare -A es_upgrade_map=(
|
||||||
["8.18.8"]="9.0.8"
|
["9.0.8"]="9.3.3"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Elasticsearch MUST upgrade through these versions
|
# Elasticsearch MUST upgrade through these versions
|
||||||
declare -A es_to_so_version=(
|
declare -A es_to_so_version=(
|
||||||
["8.18.8"]="2.4.190-20251024"
|
["9.0.8"]="3.0.0-20260331"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Get current Elasticsearch version
|
# Get current Elasticsearch version
|
||||||
@@ -745,26 +826,17 @@ verify_es_version_compatibility() {
|
|||||||
exit 160
|
exit 160
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! target_es_version_raw=$(so-yaml.py get $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version); then
|
if ! target_es_version=$(so-yaml.py get -r $UPDATE_DIR/salt/elasticsearch/defaults.yaml elasticsearch.version); then
|
||||||
# so-yaml.py failed to get the ES version from upgrade versions elasticsearch/defaults.yaml file. Likely they are upgrading to an SO version older than 2.4.110 prior to the ES version pinning and should be OKAY to continue with the upgrade.
|
|
||||||
|
|
||||||
# if so-yaml.py failed to get the ES version AND the version we are upgrading to is newer than 2.4.110 then we should bail
|
|
||||||
if [[ $(cat $UPDATE_DIR/VERSION | cut -d'.' -f3) > 110 ]]; then
|
|
||||||
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
|
echo "Couldn't determine the target Elasticsearch version (post soup version) to ensure compatibility with current Elasticsearch version. Exiting"
|
||||||
|
|
||||||
exit 160
|
exit 160
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# allow upgrade to version < 2.4.110 without checking ES version compatibility
|
|
||||||
return 0
|
|
||||||
else
|
|
||||||
target_es_version=$(sed -n '1p' <<< "$target_es_version_raw")
|
|
||||||
fi
|
|
||||||
|
|
||||||
for statefile in "${es_required_version_statefile_base}"-*; do
|
for statefile in "${es_required_version_statefile_base}"-*; do
|
||||||
[[ -f $statefile ]] || continue
|
[[ -f $statefile ]] || continue
|
||||||
|
|
||||||
local es_required_version_statefile_value=$(cat "$statefile")
|
local es_required_version_statefile_value
|
||||||
|
es_required_version_statefile_value=$(cat "$statefile")
|
||||||
|
|
||||||
if [[ "$es_required_version_statefile_value" == "$target_es_version" ]]; then
|
if [[ "$es_required_version_statefile_value" == "$target_es_version" ]]; then
|
||||||
echo "Intermediate upgrade to ES $target_es_version is in progress. Skipping Elasticsearch version compatibility check."
|
echo "Intermediate upgrade to ES $target_es_version is in progress. Skipping Elasticsearch version compatibility check."
|
||||||
@@ -773,7 +845,7 @@ verify_es_version_compatibility() {
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
# use sort to check if es_required_statefile_value is < the current es_version.
|
# use sort to check if es_required_statefile_value is < the current es_version.
|
||||||
if [[ "$(printf '%s\n' $es_required_version_statefile_value $es_version | sort -V | head -n1)" == "$es_required_version_statefile_value" ]]; then
|
if [[ "$(printf '%s\n' "$es_required_version_statefile_value" "$es_version" | sort -V | head -n1)" == "$es_required_version_statefile_value" ]]; then
|
||||||
rm -f "$statefile"
|
rm -f "$statefile"
|
||||||
continue
|
continue
|
||||||
fi
|
fi
|
||||||
@@ -784,8 +856,7 @@ verify_es_version_compatibility() {
|
|||||||
|
|
||||||
echo -e "\n##############################################################################################################################\n"
|
echo -e "\n##############################################################################################################################\n"
|
||||||
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss! This command can take up to an hour to complete."
|
echo "A previously required intermediate Elasticsearch upgrade was detected. Verifying that all Searchnodes/Heavynodes have successfully upgraded Elasticsearch to $es_required_version_statefile_value before proceeding with soup to avoid potential data loss! This command can take up to an hour to complete."
|
||||||
timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$statefile"
|
if ! timeout --foreground 4000 bash "$es_verification_script" "$es_required_version_statefile_value" "$statefile"; then
|
||||||
if [[ $? -ne 0 ]]; then
|
|
||||||
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
echo -e "\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n"
|
||||||
|
|
||||||
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
|
echo "A previous required intermediate Elasticsearch upgrade to $es_required_version_statefile_value has yet to successfully complete across the grid. Please allow time for all Searchnodes/Heavynodes to have upgraded Elasticsearch to $es_required_version_statefile_value before running soup again to avoid potential data loss!"
|
||||||
@@ -802,6 +873,7 @@ verify_es_version_compatibility() {
|
|||||||
return 0
|
return 0
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
# shellcheck disable=SC2076 # Do not want a regex here eg usage " 8.18.8 9.0.8 " =~ " 9.0.8 "
|
||||||
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
|
if [[ " ${es_upgrade_map[$es_version]} " =~ " $target_es_version " || "$es_version" == "$target_es_version" ]]; then
|
||||||
# supported upgrade
|
# supported upgrade
|
||||||
return 0
|
return 0
|
||||||
@@ -810,7 +882,7 @@ verify_es_version_compatibility() {
|
|||||||
if [[ -z "$compatible_versions" ]]; then
|
if [[ -z "$compatible_versions" ]]; then
|
||||||
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
|
# If current ES version is not explicitly defined in the upgrade map, we know they have an intermediate upgrade to do.
|
||||||
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
|
# We default to the lowest ES version defined in es_to_so_version as $first_es_required_version
|
||||||
local first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
|
first_es_required_version=$(printf '%s\n' "${!es_to_so_version[@]}" | sort -V | head -n1)
|
||||||
next_step_so_version=${es_to_so_version[$first_es_required_version]}
|
next_step_so_version=${es_to_so_version[$first_es_required_version]}
|
||||||
required_es_upgrade_version="$first_es_required_version"
|
required_es_upgrade_version="$first_es_required_version"
|
||||||
else
|
else
|
||||||
@@ -829,7 +901,7 @@ verify_es_version_compatibility() {
|
|||||||
if [[ $is_airgap -eq 0 ]]; then
|
if [[ $is_airgap -eq 0 ]]; then
|
||||||
run_airgap_intermediate_upgrade
|
run_airgap_intermediate_upgrade
|
||||||
else
|
else
|
||||||
if [[ ! -z $ISOLOC ]]; then
|
if [[ -n $ISOLOC ]]; then
|
||||||
originally_requested_iso_location="$ISOLOC"
|
originally_requested_iso_location="$ISOLOC"
|
||||||
fi
|
fi
|
||||||
# Make sure ISOLOC is not set. Network installs that used soup -f would have ISOLOC set.
|
# Make sure ISOLOC is not set. Network installs that used soup -f would have ISOLOC set.
|
||||||
@@ -861,7 +933,8 @@ wait_for_salt_minion_with_restart() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
run_airgap_intermediate_upgrade() {
|
run_airgap_intermediate_upgrade() {
|
||||||
local originally_requested_so_version=$(cat $UPDATE_DIR/VERSION)
|
local originally_requested_so_version
|
||||||
|
originally_requested_so_version=$(cat "$UPDATE_DIR/VERSION")
|
||||||
# preserve ISOLOC value, so we can try to use it post intermediate upgrade
|
# preserve ISOLOC value, so we can try to use it post intermediate upgrade
|
||||||
local originally_requested_iso_location="$ISOLOC"
|
local originally_requested_iso_location="$ISOLOC"
|
||||||
|
|
||||||
@@ -873,7 +946,8 @@ run_airgap_intermediate_upgrade() {
|
|||||||
|
|
||||||
while [[ -z "$next_iso_location" ]] || [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; do
|
while [[ -z "$next_iso_location" ]] || [[ ! -f "$next_iso_location" && ! -b "$next_iso_location" ]]; do
|
||||||
# List removable devices if any are present
|
# List removable devices if any are present
|
||||||
local removable_devices=$(lsblk -no PATH,SIZE,TYPE,MOUNTPOINTS,RM | awk '$NF==1')
|
local removable_devices
|
||||||
|
removable_devices=$(lsblk -no PATH,SIZE,TYPE,MOUNTPOINTS,RM | awk '$NF==1')
|
||||||
if [[ -n "$removable_devices" ]]; then
|
if [[ -n "$removable_devices" ]]; then
|
||||||
echo "PATH SIZE TYPE MOUNTPOINTS RM"
|
echo "PATH SIZE TYPE MOUNTPOINTS RM"
|
||||||
echo "$removable_devices"
|
echo "$removable_devices"
|
||||||
@@ -894,21 +968,21 @@ run_airgap_intermediate_upgrade() {
|
|||||||
|
|
||||||
echo "Using $next_iso_location for required intermediary upgrade."
|
echo "Using $next_iso_location for required intermediary upgrade."
|
||||||
exec bash <<EOF
|
exec bash <<EOF
|
||||||
ISOLOC=$next_iso_location soup -y && \
|
ISOLOC="$next_iso_location" soup -y && \
|
||||||
ISOLOC=$next_iso_location soup -y && \
|
ISOLOC="$next_iso_location" soup -y && \
|
||||||
|
|
||||||
echo -e "\n##############################################################################################################################\n" && \
|
echo -e "\n##############################################################################################################################\n" && \
|
||||||
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
|
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
|
||||||
|
|
||||||
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
|
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh "$required_es_upgrade_version" "$es_required_version_statefile" && \
|
||||||
|
|
||||||
echo -e "\n##############################################################################################################################\n" && \
|
echo -e "\n##############################################################################################################################\n" && \
|
||||||
|
|
||||||
# automatically start the next soup if the original ISO isn't using the same block device we just used
|
# automatically start the next soup if the original ISO isn't using the same block device we just used
|
||||||
if [[ -n "$originally_requested_iso_location" ]] && [[ "$originally_requested_iso_location" != "$next_iso_location" ]]; then
|
if [[ -n "$originally_requested_iso_location" ]] && [[ "$originally_requested_iso_location" != "$next_iso_location" ]]; then
|
||||||
umount /tmp/soagupdate
|
umount /tmp/soagupdate
|
||||||
ISOLOC=$originally_requested_iso_location soup -y && \
|
ISOLOC="$originally_requested_iso_location" soup -y && \
|
||||||
ISOLOC=$originally_requested_iso_location soup -y
|
ISOLOC="$originally_requested_iso_location" soup -y
|
||||||
else
|
else
|
||||||
echo "Could not automatically start next soup to $originally_requested_so_version. Soup will now exit here at $(cat /etc/soversion)" && \
|
echo "Could not automatically start next soup to $originally_requested_so_version. Soup will now exit here at $(cat /etc/soversion)" && \
|
||||||
|
|
||||||
@@ -924,29 +998,29 @@ run_network_intermediate_upgrade() {
|
|||||||
if [[ -n "$BRANCH" ]]; then
|
if [[ -n "$BRANCH" ]]; then
|
||||||
local originally_requested_so_branch="$BRANCH"
|
local originally_requested_so_branch="$BRANCH"
|
||||||
else
|
else
|
||||||
local originally_requested_so_branch="2.4/main"
|
local originally_requested_so_branch="3/main"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
echo "Starting automated intermediate upgrade to $next_step_so_version."
|
echo "Starting automated intermediate upgrade to $next_step_so_version."
|
||||||
echo "After completion, the system will automatically attempt to upgrade to the latest version."
|
echo "After completion, the system will automatically attempt to upgrade to the latest version."
|
||||||
echo -e "\n##############################################################################################################################\n"
|
echo -e "\n##############################################################################################################################\n"
|
||||||
exec bash << EOF
|
exec bash << EOF
|
||||||
BRANCH=$next_step_so_version soup -y && \
|
BRANCH="$next_step_so_version" soup -y && \
|
||||||
BRANCH=$next_step_so_version soup -y && \
|
BRANCH="$next_step_so_version" soup -y && \
|
||||||
|
|
||||||
echo -e "\n##############################################################################################################################\n" && \
|
echo -e "\n##############################################################################################################################\n" && \
|
||||||
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
|
echo -e "Verifying Elasticsearch was successfully upgraded to $required_es_upgrade_version across the grid. This part can take a while as Searchnodes/Heavynodes sync up with the Manager! \n\nOnce verification completes the next soup will begin automatically. If verification takes longer than 1 hour it will stop waiting and your grid will remain at $next_step_so_version. Allowing for all Searchnodes/Heavynodes to upgrade Elasticsearch to the required version on their own time.\n" && \
|
||||||
|
|
||||||
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh $required_es_upgrade_version $es_required_version_statefile && \
|
timeout --foreground 4000 bash /tmp/so_intermediate_upgrade_verification.sh "$required_es_upgrade_version" "$es_required_version_statefile" && \
|
||||||
|
|
||||||
echo -e "\n##############################################################################################################################\n" && \
|
echo -e "\n##############################################################################################################################\n" && \
|
||||||
if [[ -n "$originally_requested_iso_location" ]]; then
|
if [[ -n "$originally_requested_iso_location" ]]; then
|
||||||
# nonairgap soup that used -f originally, runs intermediate upgrade using network + BRANCH, later coming back to the original ISO for the last soup
|
# nonairgap soup that used -f originally, runs intermediate upgrade using network + BRANCH, later coming back to the original ISO for the last soup
|
||||||
ISOLOC=$originally_requested_iso_location soup -y && \
|
ISOLOC="$originally_requested_iso_location" soup -y && \
|
||||||
ISOLOC=$originally_requested_iso_location soup -y
|
ISOLOC="$originally_requested_iso_location" soup -y
|
||||||
else
|
else
|
||||||
BRANCH=$originally_requested_so_branch soup -y && \
|
BRANCH="$originally_requested_so_branch" soup -y && \
|
||||||
BRANCH=$originally_requested_so_branch soup -y
|
BRANCH="$originally_requested_so_branch" soup -y
|
||||||
fi
|
fi
|
||||||
echo -e "\n##############################################################################################################################\n"
|
echo -e "\n##############################################################################################################################\n"
|
||||||
EOF
|
EOF
|
||||||
|
|||||||
@@ -387,7 +387,7 @@ http {
|
|||||||
error_page 429 = @error429;
|
error_page 429 = @error429;
|
||||||
|
|
||||||
location @error401 {
|
location @error401 {
|
||||||
if ($request_uri ~* (^/api/.*|^/connect/.*|^/oauth2/.*)) {
|
if ($request_uri ~* (^/api/.*|^/connect/.*|^/oauth2/.*|^/.*\.map$)) {
|
||||||
return 401;
|
return 401;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -25,8 +25,33 @@ manager_run_es_soc:
|
|||||||
- salt: {{NEWNODE}}_update_mine
|
- salt: {{NEWNODE}}_update_mine
|
||||||
{% endif %}
|
{% endif %}
|
||||||
|
|
||||||
|
# so-minion has already added the new minion's entry to telegraf/creds.sls
|
||||||
|
# via so-telegraf-cred before this orch fires. Reconcile the Postgres role
|
||||||
|
# on the manager so the new minion can authenticate on its first highstate,
|
||||||
|
# then refresh the minion's pillar so its telegraf.conf renders with the
|
||||||
|
# freshly-written cred.
|
||||||
|
manager_create_postgres_telegraf_role:
|
||||||
|
salt.state:
|
||||||
|
- tgt: {{ MANAGER }}
|
||||||
|
- sls:
|
||||||
|
- postgres.telegraf_users
|
||||||
|
- queue: True
|
||||||
|
- require:
|
||||||
|
- salt: {{NEWNODE}}_update_mine
|
||||||
|
|
||||||
|
{{NEWNODE}}_refresh_pillar:
|
||||||
|
salt.function:
|
||||||
|
- name: saltutil.refresh_pillar
|
||||||
|
- tgt: {{ NEWNODE }}
|
||||||
|
- kwarg:
|
||||||
|
wait: True
|
||||||
|
- require:
|
||||||
|
- salt: manager_create_postgres_telegraf_role
|
||||||
|
|
||||||
{{NEWNODE}}_run_highstate:
|
{{NEWNODE}}_run_highstate:
|
||||||
salt.state:
|
salt.state:
|
||||||
- tgt: {{ NEWNODE }}
|
- tgt: {{ NEWNODE }}
|
||||||
- highstate: True
|
- highstate: True
|
||||||
- queue: True
|
- queue: True
|
||||||
|
- require:
|
||||||
|
- salt: {{NEWNODE}}_refresh_pillar
|
||||||
|
|||||||
@@ -0,0 +1,37 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls in allowed_states %}
|
||||||
|
|
||||||
|
{% set DIGITS = "1234567890" %}
|
||||||
|
{% set LOWERCASE = "qwertyuiopasdfghjklzxcvbnm" %}
|
||||||
|
{% set UPPERCASE = "QWERTYUIOPASDFGHJKLZXCVBNM" %}
|
||||||
|
{% set SYMBOLS = "~!@#^&*()-_=+[]|;:,.<>?" %}
|
||||||
|
{% set CHARS = DIGITS~LOWERCASE~UPPERCASE~SYMBOLS %}
|
||||||
|
{% set so_postgres_user_pass = salt['pillar.get']('postgres:auth:users:so_postgres_user:pass', salt['random.get_str'](72, chars=CHARS)) %}
|
||||||
|
|
||||||
|
# Admin cred only. Per-minion Telegraf creds live in telegraf/creds.sls,
|
||||||
|
# managed by /usr/sbin/so-telegraf-cred (called from so-minion).
|
||||||
|
postgres_auth_pillar:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/saltstack/local/pillar/postgres/auth.sls
|
||||||
|
- mode: 640
|
||||||
|
- reload_pillar: True
|
||||||
|
- contents: |
|
||||||
|
postgres:
|
||||||
|
auth:
|
||||||
|
users:
|
||||||
|
so_postgres_user:
|
||||||
|
user: so_postgres
|
||||||
|
pass: "{{ so_postgres_user_pass }}"
|
||||||
|
- show_changes: False
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -0,0 +1,111 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
{% from 'postgres/map.jinja' import PGMERGED %}
|
||||||
|
|
||||||
|
# Postgres Setup
|
||||||
|
postgresconfdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/postgres
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
postgressecretsdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/postgres/secrets
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- mode: 700
|
||||||
|
- require:
|
||||||
|
- file: postgresconfdir
|
||||||
|
|
||||||
|
postgresdatadir:
|
||||||
|
file.directory:
|
||||||
|
- name: /nsm/postgres
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
postgreslogdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/log/postgres
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- makedirs: True
|
||||||
|
|
||||||
|
postgresinitdir:
|
||||||
|
file.directory:
|
||||||
|
- name: /opt/so/conf/postgres/init
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- require:
|
||||||
|
- file: postgresconfdir
|
||||||
|
|
||||||
|
postgresinitusers:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/postgres/init/init-users.sh
|
||||||
|
- source: salt://postgres/files/init-users.sh
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- mode: 755
|
||||||
|
|
||||||
|
postgresconf:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/postgres/postgresql.conf
|
||||||
|
- source: salt://postgres/files/postgresql.conf.jinja
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- template: jinja
|
||||||
|
- defaults:
|
||||||
|
PGMERGED: {{ PGMERGED }}
|
||||||
|
|
||||||
|
postgreshba:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/postgres/pg_hba.conf
|
||||||
|
- source: salt://postgres/files/pg_hba.conf
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- mode: 640
|
||||||
|
|
||||||
|
postgres_super_secret:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/postgres/secrets/postgres_password
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- mode: 600
|
||||||
|
- contents_pillar: 'secrets:postgres_pass'
|
||||||
|
- show_changes: False
|
||||||
|
- require:
|
||||||
|
- file: postgressecretsdir
|
||||||
|
|
||||||
|
postgres_app_secret:
|
||||||
|
file.managed:
|
||||||
|
- name: /opt/so/conf/postgres/secrets/so_postgres_pass
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
- mode: 600
|
||||||
|
- contents_pillar: 'postgres:auth:users:so_postgres_user:pass'
|
||||||
|
- show_changes: False
|
||||||
|
- require:
|
||||||
|
- file: postgressecretsdir
|
||||||
|
|
||||||
|
postgres_sbin:
|
||||||
|
file.recurse:
|
||||||
|
- name: /usr/sbin
|
||||||
|
- source: salt://postgres/tools/sbin
|
||||||
|
- user: root
|
||||||
|
- group: root
|
||||||
|
- file_mode: 755
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -0,0 +1,19 @@
|
|||||||
|
postgres:
|
||||||
|
enabled: True
|
||||||
|
telegraf:
|
||||||
|
retention_days: 14
|
||||||
|
config:
|
||||||
|
listen_addresses: '*'
|
||||||
|
port: 5432
|
||||||
|
max_connections: 100
|
||||||
|
shared_buffers: 256MB
|
||||||
|
ssl: 'on'
|
||||||
|
ssl_cert_file: '/conf/postgres.crt'
|
||||||
|
ssl_key_file: '/conf/postgres.key'
|
||||||
|
ssl_ca_file: '/conf/ca.crt'
|
||||||
|
hba_file: '/conf/pg_hba.conf'
|
||||||
|
log_destination: 'stderr'
|
||||||
|
logging_collector: 'off'
|
||||||
|
log_min_messages: 'warning'
|
||||||
|
shared_preload_libraries: pg_cron
|
||||||
|
cron.database_name: so_telegraf
|
||||||
@@ -0,0 +1,33 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- postgres.sostatus
|
||||||
|
|
||||||
|
so-postgres:
|
||||||
|
docker_container.absent:
|
||||||
|
- force: True
|
||||||
|
|
||||||
|
so-postgres_so-status.disabled:
|
||||||
|
file.comment:
|
||||||
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- regex: ^so-postgres$
|
||||||
|
|
||||||
|
so_postgres_backup:
|
||||||
|
cron.absent:
|
||||||
|
- name: /usr/sbin/so-postgres-backup > /dev/null 2>&1
|
||||||
|
- identifier: so_postgres_backup
|
||||||
|
- user: root
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -0,0 +1,109 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||||
|
{% set SO_POSTGRES_USER = salt['pillar.get']('postgres:auth:users:so_postgres_user:user', 'so_postgres') %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
- postgres.auth
|
||||||
|
- postgres.ssl
|
||||||
|
- postgres.config
|
||||||
|
- postgres.sostatus
|
||||||
|
- postgres.telegraf_users
|
||||||
|
|
||||||
|
so-postgres:
|
||||||
|
docker_container.running:
|
||||||
|
- image: {{ GLOBALS.registry_host }}:5000/{{ GLOBALS.image_repo }}/so-postgres:{{ GLOBALS.so_version }}
|
||||||
|
- hostname: so-postgres
|
||||||
|
- networks:
|
||||||
|
- sobridge:
|
||||||
|
- ipv4_address: {{ DOCKERMERGED.containers['so-postgres'].ip }}
|
||||||
|
- port_bindings:
|
||||||
|
{% for BINDING in DOCKERMERGED.containers['so-postgres'].port_bindings %}
|
||||||
|
- {{ BINDING }}
|
||||||
|
{% endfor %}
|
||||||
|
- environment:
|
||||||
|
- POSTGRES_DB=securityonion
|
||||||
|
# Passwords are delivered via mounted 0600 secret files, not plaintext env vars.
|
||||||
|
# The upstream postgres image resolves POSTGRES_PASSWORD_FILE; entrypoint.sh and
|
||||||
|
# init-users.sh resolve SO_POSTGRES_PASS_FILE the same way.
|
||||||
|
- POSTGRES_PASSWORD_FILE=/run/secrets/postgres_password
|
||||||
|
- SO_POSTGRES_USER={{ SO_POSTGRES_USER }}
|
||||||
|
- SO_POSTGRES_PASS_FILE=/run/secrets/so_postgres_pass
|
||||||
|
{% if DOCKERMERGED.containers['so-postgres'].extra_env %}
|
||||||
|
{% for XTRAENV in DOCKERMERGED.containers['so-postgres'].extra_env %}
|
||||||
|
- {{ XTRAENV }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
- binds:
|
||||||
|
- /opt/so/log/postgres/:/log:rw
|
||||||
|
- /nsm/postgres:/var/lib/postgresql/data:rw
|
||||||
|
- /opt/so/conf/postgres/postgresql.conf:/conf/postgresql.conf:ro
|
||||||
|
- /opt/so/conf/postgres/pg_hba.conf:/conf/pg_hba.conf:ro
|
||||||
|
- /opt/so/conf/postgres/secrets:/run/secrets:ro
|
||||||
|
- /opt/so/conf/postgres/init/init-users.sh:/docker-entrypoint-initdb.d/init-users.sh:ro
|
||||||
|
- /etc/pki/postgres.crt:/conf/postgres.crt:ro
|
||||||
|
- /etc/pki/postgres.key:/conf/postgres.key:ro
|
||||||
|
- /etc/pki/tls/certs/intca.crt:/conf/ca.crt:ro
|
||||||
|
{% if DOCKERMERGED.containers['so-postgres'].custom_bind_mounts %}
|
||||||
|
{% for BIND in DOCKERMERGED.containers['so-postgres'].custom_bind_mounts %}
|
||||||
|
- {{ BIND }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% if DOCKERMERGED.containers['so-postgres'].extra_hosts %}
|
||||||
|
- extra_hosts:
|
||||||
|
{% for XTRAHOST in DOCKERMERGED.containers['so-postgres'].extra_hosts %}
|
||||||
|
- {{ XTRAHOST }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
{% if DOCKERMERGED.containers['so-postgres'].ulimits %}
|
||||||
|
- ulimits:
|
||||||
|
{% for ULIMIT in DOCKERMERGED.containers['so-postgres'].ulimits %}
|
||||||
|
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||||
|
{% endfor %}
|
||||||
|
{% endif %}
|
||||||
|
- watch:
|
||||||
|
- file: postgresconf
|
||||||
|
- file: postgreshba
|
||||||
|
- file: postgresinitusers
|
||||||
|
- file: postgres_super_secret
|
||||||
|
- file: postgres_app_secret
|
||||||
|
- x509: postgres_crt
|
||||||
|
- x509: postgres_key
|
||||||
|
- require:
|
||||||
|
- file: postgresconf
|
||||||
|
- file: postgreshba
|
||||||
|
- file: postgresinitusers
|
||||||
|
- file: postgres_super_secret
|
||||||
|
- file: postgres_app_secret
|
||||||
|
- x509: postgres_crt
|
||||||
|
- x509: postgres_key
|
||||||
|
|
||||||
|
delete_so-postgres_so-status.disabled:
|
||||||
|
file.uncomment:
|
||||||
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- regex: ^so-postgres$
|
||||||
|
|
||||||
|
so_postgres_backup:
|
||||||
|
cron.present:
|
||||||
|
- name: /usr/sbin/so-postgres-backup > /dev/null 2>&1
|
||||||
|
- identifier: so_postgres_backup
|
||||||
|
- user: root
|
||||||
|
- minute: '5'
|
||||||
|
- hour: '0'
|
||||||
|
- daymonth: '*'
|
||||||
|
- month: '*'
|
||||||
|
- dayweek: '*'
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -e
|
||||||
|
|
||||||
|
# Create or update application user for SOC platform access
|
||||||
|
# This script runs on first database initialization via docker-entrypoint-initdb.d
|
||||||
|
# The password is properly escaped to handle special characters
|
||||||
|
if [ -z "${SO_POSTGRES_PASS:-}" ] && [ -n "${SO_POSTGRES_PASS_FILE:-}" ] && [ -r "$SO_POSTGRES_PASS_FILE" ]; then
|
||||||
|
SO_POSTGRES_PASS="$(< "$SO_POSTGRES_PASS_FILE")"
|
||||||
|
fi
|
||||||
|
psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --dbname "$POSTGRES_DB" <<-EOSQL
|
||||||
|
DO \$\$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '${SO_POSTGRES_USER}') THEN
|
||||||
|
EXECUTE format('CREATE ROLE %I WITH LOGIN PASSWORD %L', '${SO_POSTGRES_USER}', '${SO_POSTGRES_PASS}');
|
||||||
|
ELSE
|
||||||
|
EXECUTE format('ALTER ROLE %I WITH PASSWORD %L', '${SO_POSTGRES_USER}', '${SO_POSTGRES_PASS}');
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
\$\$;
|
||||||
|
GRANT ALL PRIVILEGES ON DATABASE "$POSTGRES_DB" TO "$SO_POSTGRES_USER";
|
||||||
|
-- Lock the SOC database down at the connect layer; PUBLIC gets CONNECT
|
||||||
|
-- by default, which would let per-minion telegraf roles open sessions
|
||||||
|
-- here. They have no schema/table grants inside so reads fail, but
|
||||||
|
-- revoking CONNECT closes the soft edge entirely.
|
||||||
|
REVOKE CONNECT ON DATABASE "$POSTGRES_DB" FROM PUBLIC;
|
||||||
|
GRANT CONNECT ON DATABASE "$POSTGRES_DB" TO "$SO_POSTGRES_USER";
|
||||||
|
EOSQL
|
||||||
|
|
||||||
|
# Bootstrap the Telegraf metrics database. Per-minion roles + schemas are
|
||||||
|
# reconciled on every state.apply by postgres/telegraf_users.sls; this block
|
||||||
|
# only ensures the shared database exists on first initialization.
|
||||||
|
if ! psql -U "$POSTGRES_USER" -tAc "SELECT 1 FROM pg_database WHERE datname='so_telegraf'" | grep -q 1; then
|
||||||
|
psql -v ON_ERROR_STOP=1 -U "$POSTGRES_USER" -c "CREATE DATABASE so_telegraf"
|
||||||
|
fi
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
#
|
||||||
|
# Managed by Salt — do not edit by hand.
|
||||||
|
# Client authentication config: only local (Unix socket) connections and TLS-wrapped TCP
|
||||||
|
# connections are accepted. Plain-text `host ...` lines are intentionally omitted so a
|
||||||
|
# misconfigured client with sslmode=disable cannot negotiate a cleartext session.
|
||||||
|
|
||||||
|
# Local connections (Unix socket, container-internal) use peer/trust.
|
||||||
|
local all all trust
|
||||||
|
|
||||||
|
# TCP connections MUST use TLS (hostssl) and authenticate with SCRAM.
|
||||||
|
hostssl all all 0.0.0.0/0 scram-sha-256
|
||||||
|
hostssl all all ::/0 scram-sha-256
|
||||||
@@ -0,0 +1,8 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
|
{% for key, value in PGMERGED.config.items() %}
|
||||||
|
{{ key }} = '{{ value | string | replace("'", "''") }}'
|
||||||
|
{% endfor %}
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'postgres/map.jinja' import PGMERGED %}
|
||||||
|
|
||||||
|
include:
|
||||||
|
{% if PGMERGED.enabled %}
|
||||||
|
- postgres.enabled
|
||||||
|
{% else %}
|
||||||
|
- postgres.disabled
|
||||||
|
{% endif %}
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
Elastic License 2.0. #}
|
||||||
|
|
||||||
|
{% import_yaml 'postgres/defaults.yaml' as PGDEFAULTS %}
|
||||||
|
{% set PGMERGED = salt['pillar.get']('postgres', PGDEFAULTS.postgres, merge=True) %}
|
||||||
@@ -0,0 +1,89 @@
|
|||||||
|
postgres:
|
||||||
|
enabled:
|
||||||
|
description: Whether the PostgreSQL database container is enabled on this grid. Backs the assistant store and the Telegraf metrics database.
|
||||||
|
forcedType: bool
|
||||||
|
readonly: True
|
||||||
|
helpLink: influxdb
|
||||||
|
telegraf:
|
||||||
|
retention_days:
|
||||||
|
description: Number of days of Telegraf metrics to keep in the so_telegraf database. Older partitions are dropped hourly by pg_partman.
|
||||||
|
forcedType: int
|
||||||
|
helpLink: postgres
|
||||||
|
config:
|
||||||
|
max_connections:
|
||||||
|
description: Maximum number of concurrent PostgreSQL connections.
|
||||||
|
forcedType: int
|
||||||
|
global: True
|
||||||
|
helpLink: postgres
|
||||||
|
shared_buffers:
|
||||||
|
description: Amount of memory PostgreSQL uses for shared buffers (e.g. 256MB, 1GB). Raising this improves read cache hit rate at the cost of system RAM.
|
||||||
|
global: True
|
||||||
|
helpLink: postgres
|
||||||
|
log_min_messages:
|
||||||
|
description: Minimum severity of server messages written to the PostgreSQL log.
|
||||||
|
options:
|
||||||
|
- debug1
|
||||||
|
- info
|
||||||
|
- notice
|
||||||
|
- warning
|
||||||
|
- error
|
||||||
|
- log
|
||||||
|
- fatal
|
||||||
|
global: True
|
||||||
|
helpLink: postgres
|
||||||
|
listen_addresses:
|
||||||
|
description: Interfaces PostgreSQL listens on. Must remain '*' so clients on the docker bridge network can connect.
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
|
port:
|
||||||
|
description: TCP port PostgreSQL listens on inside the container. Firewall rules and container port mapping assume 5432.
|
||||||
|
forcedType: int
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
|
ssl:
|
||||||
|
description: Whether PostgreSQL accepts TLS connections. Must remain 'on' — pg_hba.conf requires hostssl for TCP.
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
|
ssl_cert_file:
|
||||||
|
description: Path (inside the container) to the TLS server certificate. Salt-managed.
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
|
ssl_key_file:
|
||||||
|
description: Path (inside the container) to the TLS server private key. Salt-managed.
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
|
ssl_ca_file:
|
||||||
|
description: Path (inside the container) to the CA bundle PostgreSQL uses to verify client certificates. Salt-managed.
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
|
hba_file:
|
||||||
|
description: Path (inside the container) to the pg_hba.conf authentication file. Salt-managed — edit salt/postgres/files/pg_hba.conf.
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
|
log_destination:
|
||||||
|
description: Where PostgreSQL writes its server log. 'stderr' routes to the container log stream.
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
|
logging_collector:
|
||||||
|
description: Whether to run a separate logging collector process. Disabled because the docker log stream already captures stderr.
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
|
shared_preload_libraries:
|
||||||
|
description: Comma-separated list of extensions loaded at server start. Required for pg_cron which drives pg_partman maintenance — do not remove.
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
|
cron.database_name:
|
||||||
|
description: Database pg_cron schedules jobs in. Must be so_telegraf so partman maintenance runs in the right database context.
|
||||||
|
global: True
|
||||||
|
advanced: True
|
||||||
|
helpLink: postgres
|
||||||
@@ -0,0 +1,21 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
|
||||||
|
append_so-postgres_so-status.conf:
|
||||||
|
file.append:
|
||||||
|
- name: /opt/so/conf/so-status/so-status.conf
|
||||||
|
- text: so-postgres
|
||||||
|
- unless: grep -q so-postgres /opt/so/conf/so-status/so-status.conf
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -0,0 +1,55 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
{% from 'ca/map.jinja' import CA %}
|
||||||
|
|
||||||
|
postgres_key:
|
||||||
|
x509.private_key_managed:
|
||||||
|
- name: /etc/pki/postgres.key
|
||||||
|
- keysize: 4096
|
||||||
|
- backup: True
|
||||||
|
- new: True
|
||||||
|
{% if salt['file.file_exists']('/etc/pki/postgres.key') -%}
|
||||||
|
- prereq:
|
||||||
|
- x509: /etc/pki/postgres.crt
|
||||||
|
{%- endif %}
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
postgres_crt:
|
||||||
|
x509.certificate_managed:
|
||||||
|
- name: /etc/pki/postgres.crt
|
||||||
|
- ca_server: {{ CA.server }}
|
||||||
|
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
|
||||||
|
- signing_policy: postgres
|
||||||
|
- private_key: /etc/pki/postgres.key
|
||||||
|
- CN: {{ GLOBALS.hostname }}
|
||||||
|
- days_remaining: 7
|
||||||
|
- days_valid: 820
|
||||||
|
- backup: True
|
||||||
|
- timeout: 30
|
||||||
|
- retry:
|
||||||
|
attempts: 5
|
||||||
|
interval: 30
|
||||||
|
|
||||||
|
postgresKeyperms:
|
||||||
|
file.managed:
|
||||||
|
- replace: False
|
||||||
|
- name: /etc/pki/postgres.key
|
||||||
|
- mode: 400
|
||||||
|
- user: 939
|
||||||
|
- group: 939
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -0,0 +1,157 @@
|
|||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||||
|
{% if sls.split('.')[0] in allowed_states %}
|
||||||
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||||
|
{% from 'telegraf/map.jinja' import TELEGRAFMERGED %}
|
||||||
|
|
||||||
|
{# postgres_wait_ready below requires `docker_container: so-postgres`, which is
|
||||||
|
declared in postgres.enabled. Include it here so state.apply postgres.telegraf_users
|
||||||
|
on its own (e.g. from orch.deploy_newnode) still has that ID in scope. Salt
|
||||||
|
de-duplicates the circular include. #}
|
||||||
|
include:
|
||||||
|
- postgres.enabled
|
||||||
|
|
||||||
|
{% set TG_OUT = TELEGRAFMERGED.output | upper %}
|
||||||
|
{% if TG_OUT in ['POSTGRES', 'BOTH'] %}
|
||||||
|
|
||||||
|
# docker_container.running returns as soon as the container starts, but on
|
||||||
|
# first-init docker-entrypoint.sh starts a temporary postgres with
|
||||||
|
# `listen_addresses=''` to run /docker-entrypoint-initdb.d scripts, then
|
||||||
|
# shuts it down before exec'ing the real CMD. A default pg_isready check
|
||||||
|
# (Unix socket) passes during that ephemeral phase and races the shutdown
|
||||||
|
# with "the database system is shutting down". Checking TCP readiness on
|
||||||
|
# 127.0.0.1 only succeeds after the final postgres binds the port.
|
||||||
|
postgres_wait_ready:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
for i in $(seq 1 60); do
|
||||||
|
if docker exec so-postgres pg_isready -h 127.0.0.1 -U postgres -q 2>/dev/null; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
sleep 2
|
||||||
|
done
|
||||||
|
echo "so-postgres did not accept TCP connections within 120s" >&2
|
||||||
|
exit 1
|
||||||
|
- require:
|
||||||
|
- docker_container: so-postgres
|
||||||
|
|
||||||
|
# Ensure the shared Telegraf database exists. init-users.sh only runs on a
|
||||||
|
# fresh data dir, so hosts upgraded onto an existing /nsm/postgres volume
|
||||||
|
# would otherwise never get so_telegraf.
|
||||||
|
postgres_create_telegraf_db:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
if ! docker exec so-postgres psql -U postgres -tAc "SELECT 1 FROM pg_database WHERE datname='so_telegraf'" | grep -q 1; then
|
||||||
|
docker exec so-postgres psql -v ON_ERROR_STOP=1 -U postgres -c "CREATE DATABASE so_telegraf"
|
||||||
|
fi
|
||||||
|
- require:
|
||||||
|
- cmd: postgres_wait_ready
|
||||||
|
|
||||||
|
# Provision the shared group role and schema once. Every per-minion role is a
|
||||||
|
# member of so_telegraf, and each Telegraf connection does SET ROLE so_telegraf
|
||||||
|
# (via options='-c role=so_telegraf' in the connection string) so tables created
|
||||||
|
# on first write are owned by the group role and every member can INSERT/SELECT.
|
||||||
|
postgres_telegraf_group_role:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d so_telegraf <<'EOSQL'
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = 'so_telegraf') THEN
|
||||||
|
CREATE ROLE so_telegraf NOLOGIN;
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;
|
||||||
|
GRANT CONNECT ON DATABASE so_telegraf TO so_telegraf;
|
||||||
|
CREATE SCHEMA IF NOT EXISTS telegraf AUTHORIZATION so_telegraf;
|
||||||
|
GRANT USAGE, CREATE ON SCHEMA telegraf TO so_telegraf;
|
||||||
|
CREATE SCHEMA IF NOT EXISTS partman;
|
||||||
|
CREATE EXTENSION IF NOT EXISTS pg_partman SCHEMA partman;
|
||||||
|
CREATE EXTENSION IF NOT EXISTS pg_cron;
|
||||||
|
-- Telegraf (running as so_telegraf) calls partman.create_parent()
|
||||||
|
-- on first write of each metric, which needs USAGE on the partman
|
||||||
|
-- schema, EXECUTE on its functions/procedures, and write access to
|
||||||
|
-- partman.part_config so it can register new partitioned parents.
|
||||||
|
GRANT USAGE, CREATE ON SCHEMA partman TO so_telegraf;
|
||||||
|
GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA partman TO so_telegraf;
|
||||||
|
GRANT EXECUTE ON ALL FUNCTIONS IN SCHEMA partman TO so_telegraf;
|
||||||
|
GRANT EXECUTE ON ALL PROCEDURES IN SCHEMA partman TO so_telegraf;
|
||||||
|
-- partman creates per-parent template tables (partman.template_*) at
|
||||||
|
-- runtime; default privileges extend DML/sequence access to them.
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA partman
|
||||||
|
GRANT SELECT, INSERT, UPDATE, DELETE ON TABLES TO so_telegraf;
|
||||||
|
ALTER DEFAULT PRIVILEGES IN SCHEMA partman
|
||||||
|
GRANT USAGE, SELECT, UPDATE ON SEQUENCES TO so_telegraf;
|
||||||
|
-- Hourly partman maintenance. cron.schedule is idempotent by jobname.
|
||||||
|
SELECT cron.schedule(
|
||||||
|
'telegraf-partman-maintenance',
|
||||||
|
'17 * * * *',
|
||||||
|
'CALL partman.run_maintenance_proc()'
|
||||||
|
);
|
||||||
|
EOSQL
|
||||||
|
- require:
|
||||||
|
- cmd: postgres_create_telegraf_db
|
||||||
|
|
||||||
|
{% set creds = salt['pillar.get']('telegraf:postgres_creds', {}) %}
|
||||||
|
{% for mid, entry in creds.items() %}
|
||||||
|
{% if entry.get('user') and entry.get('pass') %}
|
||||||
|
{% set u = entry.user %}
|
||||||
|
{% set p = entry.pass | replace("'", "''") %}
|
||||||
|
|
||||||
|
postgres_telegraf_role_{{ u }}:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d so_telegraf <<'EOSQL'
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF NOT EXISTS (SELECT FROM pg_catalog.pg_roles WHERE rolname = '{{ u }}') THEN
|
||||||
|
EXECUTE format('CREATE ROLE %I WITH LOGIN PASSWORD %L', '{{ u }}', '{{ p }}');
|
||||||
|
ELSE
|
||||||
|
EXECUTE format('ALTER ROLE %I WITH PASSWORD %L', '{{ u }}', '{{ p }}');
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;
|
||||||
|
GRANT CONNECT ON DATABASE so_telegraf TO "{{ u }}";
|
||||||
|
GRANT so_telegraf TO "{{ u }}";
|
||||||
|
EOSQL
|
||||||
|
- require:
|
||||||
|
- cmd: postgres_telegraf_group_role
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
|
# Reconcile partman retention from pillar. Runs after role/schema setup so
|
||||||
|
# any partitioned parents Telegraf has already created get their retention
|
||||||
|
# refreshed whenever postgres.telegraf.retention_days changes.
|
||||||
|
{% set retention = salt['pillar.get']('postgres:telegraf:retention_days', 14) | int %}
|
||||||
|
postgres_telegraf_retention_reconcile:
|
||||||
|
cmd.run:
|
||||||
|
- name: |
|
||||||
|
docker exec -i so-postgres psql -v ON_ERROR_STOP=1 -U postgres -d so_telegraf <<'EOSQL'
|
||||||
|
DO $$
|
||||||
|
BEGIN
|
||||||
|
IF EXISTS (SELECT 1 FROM pg_catalog.pg_extension WHERE extname = 'pg_partman') THEN
|
||||||
|
UPDATE partman.part_config
|
||||||
|
SET retention = '{{ retention }} days',
|
||||||
|
retention_keep_table = false
|
||||||
|
WHERE parent_table LIKE 'telegraf.%';
|
||||||
|
END IF;
|
||||||
|
END
|
||||||
|
$$;
|
||||||
|
EOSQL
|
||||||
|
- require:
|
||||||
|
- cmd: postgres_telegraf_group_role
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
|
|
||||||
|
{% else %}
|
||||||
|
|
||||||
|
{{sls}}_state_not_allowed:
|
||||||
|
test.fail_without_changes:
|
||||||
|
- name: {{sls}}_state_not_allowed
|
||||||
|
|
||||||
|
{% endif %}
|
||||||
@@ -0,0 +1,39 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
#
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
# Backups contain role password hashes and full chat data; keep them 0600.
|
||||||
|
umask 0077
|
||||||
|
|
||||||
|
TODAY=$(date '+%Y_%m_%d')
|
||||||
|
BACKUPDIR=/nsm/backup
|
||||||
|
BACKUPFILE="$BACKUPDIR/so-postgres-backup-$TODAY.sql.gz"
|
||||||
|
MAXBACKUPS=7
|
||||||
|
|
||||||
|
mkdir -p $BACKUPDIR
|
||||||
|
|
||||||
|
# Skip if already backed up today
|
||||||
|
if [ -f "$BACKUPFILE" ]; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Skip if container isn't running
|
||||||
|
if ! docker ps --format '{{.Names}}' | grep -q '^so-postgres$'; then
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Dump all databases and roles, compress
|
||||||
|
docker exec so-postgres pg_dumpall -U postgres | gzip > "$BACKUPFILE"
|
||||||
|
|
||||||
|
# Retention cleanup
|
||||||
|
NUMBACKUPS=$(find $BACKUPDIR -type f -name "so-postgres-backup*" | wc -l)
|
||||||
|
while [ "$NUMBACKUPS" -gt "$MAXBACKUPS" ]; do
|
||||||
|
OLDEST=$(find $BACKUPDIR -type f -name "so-postgres-backup*" -printf '%T+ %p\n' | sort | head -n 1 | awk -F" " '{print $2}')
|
||||||
|
rm -f "$OLDEST"
|
||||||
|
NUMBACKUPS=$(find $BACKUPDIR -type f -name "so-postgres-backup*" | wc -l)
|
||||||
|
done
|
||||||
@@ -0,0 +1,80 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
echo "Usage: $0 <operation> [args]"
|
||||||
|
echo ""
|
||||||
|
echo "Supported Operations:"
|
||||||
|
echo " sql Execute a SQL command, requires: <sql>"
|
||||||
|
echo " sqlfile Execute a SQL file, requires: <path>"
|
||||||
|
echo " shell Open an interactive psql shell"
|
||||||
|
echo " dblist List databases"
|
||||||
|
echo " userlist List database roles"
|
||||||
|
echo ""
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ $# -lt 1 ]; then
|
||||||
|
usage
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Check for prerequisites
|
||||||
|
if [ "$(id -u)" -ne 0 ]; then
|
||||||
|
echo "This script must be run using sudo!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
COMMAND=$(basename $0)
|
||||||
|
OP=$1
|
||||||
|
shift
|
||||||
|
|
||||||
|
set -eo pipefail
|
||||||
|
|
||||||
|
log() {
|
||||||
|
echo -e "$(date) | $COMMAND | $@" >&2
|
||||||
|
}
|
||||||
|
|
||||||
|
so_psql() {
|
||||||
|
docker exec so-postgres psql -U postgres -d securityonion "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
case "$OP" in
|
||||||
|
|
||||||
|
sql)
|
||||||
|
[ $# -lt 1 ] && usage
|
||||||
|
so_psql -c "$1"
|
||||||
|
;;
|
||||||
|
|
||||||
|
sqlfile)
|
||||||
|
[ $# -ne 1 ] && usage
|
||||||
|
if [ ! -f "$1" ]; then
|
||||||
|
log "File not found: $1"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
docker cp "$1" so-postgres:/tmp/sqlfile.sql
|
||||||
|
docker exec so-postgres psql -U postgres -d securityonion -f /tmp/sqlfile.sql
|
||||||
|
docker exec so-postgres rm -f /tmp/sqlfile.sql
|
||||||
|
;;
|
||||||
|
|
||||||
|
shell)
|
||||||
|
docker exec -it so-postgres psql -U postgres -d securityonion
|
||||||
|
;;
|
||||||
|
|
||||||
|
dblist)
|
||||||
|
so_psql -c "\l"
|
||||||
|
;;
|
||||||
|
|
||||||
|
userlist)
|
||||||
|
so_psql -c "\du"
|
||||||
|
;;
|
||||||
|
|
||||||
|
*)
|
||||||
|
usage
|
||||||
|
;;
|
||||||
|
esac
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
/usr/sbin/so-restart postgres $1
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
/usr/sbin/so-start postgres $1
|
||||||
@@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
/usr/sbin/so-stop postgres $1
|
||||||
@@ -0,0 +1,157 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||||
|
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||||
|
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||||
|
# Elastic License 2.0.
|
||||||
|
|
||||||
|
# Point-in-time host metrics from the Telegraf Postgres backend.
|
||||||
|
# Sanity-check tool for verifying metrics are landing before the grid
|
||||||
|
# dashboards consume them.
|
||||||
|
#
|
||||||
|
# Assumes Telegraf's postgresql output is configured with
|
||||||
|
# tags_as_foreign_keys = true, tags_as_jsonb = true, fields_as_jsonb = true,
|
||||||
|
# so metric tables are (time, tag_id, fields jsonb) and tag tables are
|
||||||
|
# (tag_id, tags jsonb).
|
||||||
|
|
||||||
|
. /usr/sbin/so-common
|
||||||
|
|
||||||
|
usage() {
|
||||||
|
cat <<EOF
|
||||||
|
Usage: $0 [host]
|
||||||
|
|
||||||
|
Shows the most recent CPU, memory, disk, and load metrics for each host
|
||||||
|
from the so_telegraf Postgres database. Without an argument, reports on
|
||||||
|
every host that has data. With a host, limits output to that one.
|
||||||
|
|
||||||
|
Requires: sudo, so-postgres running, telegraf.output set to
|
||||||
|
POSTGRES or BOTH.
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ "$(id -u)" -ne 0 ]; then
|
||||||
|
echo "This script must be run using sudo!"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
case "${1:-}" in
|
||||||
|
-h|--help) usage ;;
|
||||||
|
esac
|
||||||
|
|
||||||
|
FILTER_HOST="${1:-}"
|
||||||
|
SCHEMA="telegraf"
|
||||||
|
|
||||||
|
# Host values are interpolated into SQL below. Hostnames are [A-Za-z0-9._-];
|
||||||
|
# any other character in a tag value or CLI arg is rejected to prevent a
|
||||||
|
# stored-tag (or CLI) → SQL injection via a compromised Telegraf writer.
|
||||||
|
HOST_RE='^[A-Za-z0-9._-]+$'
|
||||||
|
if [ -n "$FILTER_HOST" ] && ! [[ "$FILTER_HOST" =~ $HOST_RE ]]; then
|
||||||
|
echo "Invalid host filter: $FILTER_HOST" >&2
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
so_psql() {
|
||||||
|
docker exec so-postgres psql -U postgres -d so_telegraf -At -F $'\t' "$@"
|
||||||
|
}
|
||||||
|
|
||||||
|
if ! docker exec so-postgres psql -U postgres -lqt 2>/dev/null | cut -d\| -f1 | grep -qw so_telegraf; then
|
||||||
|
echo "Database so_telegraf not found. Is telegraf.output set to POSTGRES or BOTH?"
|
||||||
|
exit 2
|
||||||
|
fi
|
||||||
|
|
||||||
|
table_exists() {
|
||||||
|
local table="$1"
|
||||||
|
[ -n "$(so_psql -c "SELECT 1 FROM information_schema.tables WHERE table_schema='${SCHEMA}' AND table_name='${table}' LIMIT 1;")" ]
|
||||||
|
}
|
||||||
|
|
||||||
|
# Discover hosts from cpu_tag (every minion reports cpu).
|
||||||
|
if ! table_exists "cpu_tag"; then
|
||||||
|
echo "${SCHEMA}.cpu_tag not found. Has Telegraf written any rows yet?"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
HOSTS=$(so_psql -c "
|
||||||
|
SELECT DISTINCT tags->>'host'
|
||||||
|
FROM \"${SCHEMA}\".cpu_tag
|
||||||
|
WHERE tags ? 'host'
|
||||||
|
ORDER BY 1;")
|
||||||
|
|
||||||
|
if [ -z "$HOSTS" ]; then
|
||||||
|
echo "No hosts found in ${SCHEMA}. Is Telegraf configured to write to Postgres?"
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
print_metric() {
|
||||||
|
so_psql -c "$1"
|
||||||
|
}
|
||||||
|
|
||||||
|
for host in $HOSTS; do
|
||||||
|
if ! [[ "$host" =~ $HOST_RE ]]; then
|
||||||
|
echo "Skipping host with invalid characters in tag value: $host" >&2
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
if [ -n "$FILTER_HOST" ] && [ "$host" != "$FILTER_HOST" ]; then
|
||||||
|
continue
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "===================================================================="
|
||||||
|
echo " Host: $host"
|
||||||
|
echo "===================================================================="
|
||||||
|
|
||||||
|
if table_exists "cpu"; then
|
||||||
|
print_metric "
|
||||||
|
SELECT 'cpu ' AS metric,
|
||||||
|
to_char(c.time, 'YYYY-MM-DD HH24:MI:SS') AS ts,
|
||||||
|
round((100 - (c.fields->>'usage_idle')::numeric), 1) || '% used'
|
||||||
|
FROM \"${SCHEMA}\".cpu c
|
||||||
|
JOIN \"${SCHEMA}\".cpu_tag t USING (tag_id)
|
||||||
|
WHERE t.tags->>'host' = '${host}' AND t.tags->>'cpu' = 'cpu-total'
|
||||||
|
ORDER BY c.time DESC LIMIT 1;"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if table_exists "mem"; then
|
||||||
|
print_metric "
|
||||||
|
SELECT 'memory ' AS metric,
|
||||||
|
to_char(m.time, 'YYYY-MM-DD HH24:MI:SS') AS ts,
|
||||||
|
round((m.fields->>'used_percent')::numeric, 1) || '% used (' ||
|
||||||
|
pg_size_pretty((m.fields->>'used')::bigint) || ' of ' ||
|
||||||
|
pg_size_pretty((m.fields->>'total')::bigint) || ')'
|
||||||
|
FROM \"${SCHEMA}\".mem m
|
||||||
|
JOIN \"${SCHEMA}\".mem_tag t USING (tag_id)
|
||||||
|
WHERE t.tags->>'host' = '${host}'
|
||||||
|
ORDER BY m.time DESC LIMIT 1;"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if table_exists "disk"; then
|
||||||
|
print_metric "
|
||||||
|
SELECT 'disk ' || rpad(t.tags->>'path', 12) AS metric,
|
||||||
|
to_char(d.time, 'YYYY-MM-DD HH24:MI:SS') AS ts,
|
||||||
|
round((d.fields->>'used_percent')::numeric, 1) || '% used (' ||
|
||||||
|
pg_size_pretty((d.fields->>'used')::bigint) || ' of ' ||
|
||||||
|
pg_size_pretty((d.fields->>'total')::bigint) || ')'
|
||||||
|
FROM \"${SCHEMA}\".disk d
|
||||||
|
JOIN \"${SCHEMA}\".disk_tag t USING (tag_id)
|
||||||
|
WHERE t.tags->>'host' = '${host}'
|
||||||
|
AND d.time = (SELECT max(d2.time)
|
||||||
|
FROM \"${SCHEMA}\".disk d2
|
||||||
|
JOIN \"${SCHEMA}\".disk_tag t2 USING (tag_id)
|
||||||
|
WHERE t2.tags->>'host' = '${host}')
|
||||||
|
ORDER BY t.tags->>'path';"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if table_exists "system"; then
|
||||||
|
print_metric "
|
||||||
|
SELECT 'load ' AS metric,
|
||||||
|
to_char(s.time, 'YYYY-MM-DD HH24:MI:SS') AS ts,
|
||||||
|
(s.fields->>'load1') || ' / ' ||
|
||||||
|
(s.fields->>'load5') || ' / ' ||
|
||||||
|
(s.fields->>'load15') || ' (1/5/15m)'
|
||||||
|
FROM \"${SCHEMA}\".system s
|
||||||
|
JOIN \"${SCHEMA}\".system_tag t USING (tag_id)
|
||||||
|
WHERE t.tags->>'host' = '${host}'
|
||||||
|
ORDER BY s.time DESC LIMIT 1;"
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo ""
|
||||||
|
done
|
||||||
@@ -6,39 +6,74 @@
|
|||||||
# Elastic License 2.0.
|
# Elastic License 2.0.
|
||||||
|
|
||||||
import logging
|
import logging
|
||||||
from subprocess import call
|
import os
|
||||||
import yaml
|
import re
|
||||||
|
import shlex
|
||||||
|
import subprocess
|
||||||
|
|
||||||
log = logging.getLogger(__name__)
|
log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
SO_MINION = '/usr/sbin/so-minion'
|
||||||
|
|
||||||
|
_NODETYPE_RE = re.compile(r'^[A-Z][A-Z0-9_]{0,31}$')
|
||||||
|
_MINIONID_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
|
||||||
|
_HOSTPART_RE = re.compile(r'^[A-Za-z0-9._-]{1,253}$')
|
||||||
|
_IPV4_RE = re.compile(
|
||||||
|
r'^(?:(?:25[0-5]|2[0-4]\d|[01]?\d?\d)\.){3}'
|
||||||
|
r'(?:25[0-5]|2[0-4]\d|[01]?\d?\d)$'
|
||||||
|
)
|
||||||
|
_HEAP_RE = re.compile(r'^\d{1,6}[kKmMgG]?$')
|
||||||
|
|
||||||
|
|
||||||
|
def _check(name, value, pattern):
|
||||||
|
s = str(value)
|
||||||
|
if not pattern.match(s):
|
||||||
|
raise ValueError("sominion_setup_reactor: refusing unsafe %s=%r" % (name, value))
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
def run():
|
def run():
|
||||||
log.info('sominion_setup_reactor: Running')
|
log.info('sominion_setup_reactor: Running')
|
||||||
minionid = data['id']
|
minionid = data['id']
|
||||||
DATA = data['data']
|
DATA = data['data']
|
||||||
hv_name = DATA['HYPERVISOR_HOST']
|
|
||||||
log.info('sominion_setup_reactor: DATA: %s' % DATA)
|
log.info('sominion_setup_reactor: DATA: %s' % DATA)
|
||||||
|
|
||||||
# Build the base command
|
nodetype = _check('NODETYPE', DATA['NODETYPE'], _NODETYPE_RE)
|
||||||
cmd = "NODETYPE=" + DATA['NODETYPE'] + " /usr/sbin/so-minion -o=addVM -m=" + minionid + " -n=" + DATA['MNIC'] + " -i=" + DATA['MAINIP'] + " -c=" + str(DATA['CPUCORES']) + " -d='" + DATA['NODE_DESCRIPTION'] + "'"
|
|
||||||
|
argv = [
|
||||||
|
SO_MINION,
|
||||||
|
'-o=addVM',
|
||||||
|
'-m=' + _check('minionid', minionid, _MINIONID_RE),
|
||||||
|
'-n=' + _check('MNIC', DATA['MNIC'], _HOSTPART_RE),
|
||||||
|
'-i=' + _check('MAINIP', DATA['MAINIP'], _IPV4_RE),
|
||||||
|
'-c=' + str(int(DATA['CPUCORES'])),
|
||||||
|
'-d=' + str(DATA['NODE_DESCRIPTION']),
|
||||||
|
]
|
||||||
|
|
||||||
# Add optional arguments only if they exist in DATA
|
|
||||||
if 'CORECOUNT' in DATA:
|
if 'CORECOUNT' in DATA:
|
||||||
cmd += " -C=" + str(DATA['CORECOUNT'])
|
argv.append('-C=' + str(int(DATA['CORECOUNT'])))
|
||||||
|
|
||||||
if 'INTERFACE' in DATA:
|
if 'INTERFACE' in DATA:
|
||||||
cmd += " -a=" + DATA['INTERFACE']
|
argv.append('-a=' + _check('INTERFACE', DATA['INTERFACE'], _HOSTPART_RE))
|
||||||
|
|
||||||
if 'ES_HEAP_SIZE' in DATA:
|
if 'ES_HEAP_SIZE' in DATA:
|
||||||
cmd += " -e=" + DATA['ES_HEAP_SIZE']
|
argv.append('-e=' + _check('ES_HEAP_SIZE', DATA['ES_HEAP_SIZE'], _HEAP_RE))
|
||||||
|
|
||||||
if 'LS_HEAP_SIZE' in DATA:
|
if 'LS_HEAP_SIZE' in DATA:
|
||||||
cmd += " -l=" + DATA['LS_HEAP_SIZE']
|
argv.append('-l=' + _check('LS_HEAP_SIZE', DATA['LS_HEAP_SIZE'], _HEAP_RE))
|
||||||
|
|
||||||
if 'LSHOSTNAME' in DATA:
|
if 'LSHOSTNAME' in DATA:
|
||||||
cmd += " -L=" + DATA['LSHOSTNAME']
|
argv.append('-L=' + _check('LSHOSTNAME', DATA['LSHOSTNAME'], _HOSTPART_RE))
|
||||||
|
|
||||||
log.info('sominion_setup_reactor: Command: %s' % cmd)
|
env = os.environ.copy()
|
||||||
rc = call(cmd, shell=True)
|
env['NODETYPE'] = nodetype
|
||||||
|
|
||||||
|
log.info(
|
||||||
|
'sominion_setup_reactor: argv: %s (NODETYPE=%s)',
|
||||||
|
' '.join(shlex.quote(a) for a in argv),
|
||||||
|
shlex.quote(nodetype),
|
||||||
|
)
|
||||||
|
rc = subprocess.call(argv, shell=False, env=env)
|
||||||
|
|
||||||
log.info('sominion_setup_reactor: rc: %s' % rc)
|
log.info('sominion_setup_reactor: rc: %s' % rc)
|
||||||
|
|
||||||
|
|||||||
@@ -27,6 +27,7 @@ sool9_{{host}}:
|
|||||||
log_file: /opt/so/log/salt/minion
|
log_file: /opt/so/log/salt/minion
|
||||||
grains:
|
grains:
|
||||||
hypervisor_host: {{host ~ "_" ~ role}}
|
hypervisor_host: {{host ~ "_" ~ role}}
|
||||||
|
sosmodel: HVGUEST
|
||||||
preflight_cmds:
|
preflight_cmds:
|
||||||
- |
|
- |
|
||||||
{%- set hostnames = [MANAGERHOSTNAME] %}
|
{%- set hostnames = [MANAGERHOSTNAME] %}
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user