mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2026-04-08 05:42:10 +02:00
Compare commits
215 Commits
2.4/main
...
reyesj2-es
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
dd56e7f1ac | ||
|
|
51a3c04c3d | ||
|
|
8101bc4941 | ||
|
|
51e0ca2602 | ||
|
|
2a37ad82b2 | ||
|
|
80540da52f | ||
|
|
e4ba3d6a2a | ||
|
|
bbfb58ea4e | ||
|
|
c91deb97b1 | ||
|
|
dc2598d5cf | ||
|
|
ff45e5ebc6 | ||
|
|
1e2b51eae6 | ||
|
|
58d332ea94 | ||
|
|
dcc67b9b8f | ||
|
|
cd886dd0f9 | ||
|
|
37a6e28a6c | ||
|
|
434a2e7866 | ||
|
|
79707db6ee | ||
|
|
0707507412 | ||
|
|
c7e865aa1c | ||
|
|
a89db79854 | ||
|
|
812f65eee8 | ||
|
|
cfa530ba9c | ||
|
|
922c008b11 | ||
|
|
ea30749512 | ||
|
|
0a55592d7e | ||
|
|
115ca2c41d | ||
|
|
9e53bd3f2d | ||
|
|
d4f1078f84 | ||
|
|
1f9bf45b66 | ||
|
|
271de757e7 | ||
|
|
d4ac352b5a | ||
|
|
afcef1d0e7 | ||
|
|
91b164b728 | ||
|
|
6a4501241d | ||
|
|
c6978f9037 | ||
|
|
7300513636 | ||
|
|
fb7b73c601 | ||
|
|
f2b6d59c65 | ||
|
|
67162357a3 | ||
|
|
8ea97e4af3 | ||
|
|
2f9a2e15b3 | ||
|
|
a4fcf4ddf2 | ||
|
|
165e69cd11 | ||
|
|
07580c3afd | ||
|
|
f0f9de4b44 | ||
|
|
e857a8487a | ||
|
|
fa4bf218d5 | ||
|
|
2186872317 | ||
|
|
6e3986b0b0 | ||
|
|
2585bdd23f | ||
|
|
ca588d2e78 | ||
|
|
f756ecb396 | ||
|
|
82107f00a1 | ||
|
|
5c53244b54 | ||
|
|
3b269e8b82 | ||
|
|
7ece93d7e0 | ||
|
|
14d254e81b | ||
|
|
7af6efda1e | ||
|
|
ce972238fe | ||
|
|
442bd1499d | ||
|
|
30ea309dff | ||
|
|
bfeefeea2f | ||
|
|
8251d56a96 | ||
|
|
1b1e602716 | ||
|
|
034b1d045b | ||
|
|
20bf88b338 | ||
|
|
d3f819017b | ||
|
|
c92aedfff3 | ||
|
|
7aded184b3 | ||
|
|
d3938b61d2 | ||
|
|
c2c5aea244 | ||
|
|
83b7fecbbc | ||
|
|
d227cf71c8 | ||
|
|
020b9db610 | ||
|
|
cceaebe350 | ||
|
|
a982056363 | ||
|
|
db81834e06 | ||
|
|
318e4ec54b | ||
|
|
20bf05e9f3 | ||
|
|
4254769e68 | ||
|
|
c16ff2bd99 | ||
|
|
0c88b32fc2 | ||
|
|
0814f34f0e | ||
|
|
b6366e52ba | ||
|
|
825f377d2d | ||
|
|
74ad2990a7 | ||
|
|
738ce62d35 | ||
|
|
057ec6f0f1 | ||
|
|
20c4da50b1 | ||
|
|
5fb396fc09 | ||
|
|
a0b1e31717 | ||
|
|
cacae12ba3 | ||
|
|
83bd8a025c | ||
|
|
2a271b950b | ||
|
|
e19e83bebb | ||
|
|
066918e27d | ||
|
|
930985b770 | ||
|
|
346dc446de | ||
|
|
7e7b8dc8a8 | ||
|
|
341471d38e | ||
|
|
2349750e13 | ||
|
|
2c6c502067 | ||
|
|
00986dc2fd | ||
|
|
d60bef1371 | ||
|
|
5806a85214 | ||
|
|
2d97dfc8a1 | ||
|
|
d6263812a6 | ||
|
|
ef7d1771ab | ||
|
|
4dc377c99f | ||
|
|
a52e5d0474 | ||
|
|
1a943aefc5 | ||
|
|
4bb61d999d | ||
|
|
e0e0e3e97b | ||
|
|
6b039b3f94 | ||
|
|
d2d2f0cb5f | ||
|
|
e6ee7dac7c | ||
|
|
7bf63b822d | ||
|
|
1a7d72c630 | ||
|
|
4224713cc6 | ||
|
|
b452e70419 | ||
|
|
6809497730 | ||
|
|
70597a77ab | ||
|
|
f5faf86cb3 | ||
|
|
be4e253620 | ||
|
|
ebc1152376 | ||
|
|
625bfb3ba7 | ||
|
|
c11b83c712 | ||
|
|
a3b471c1d1 | ||
|
|
eaf3f10adc | ||
|
|
84f4e460f6 | ||
|
|
88841c9814 | ||
|
|
64bb0dfb5b | ||
|
|
ddb26a9f42 | ||
|
|
744d8fdd5e | ||
|
|
6feb06e623 | ||
|
|
afc14ec29d | ||
|
|
59134c65d0 | ||
|
|
614537998a | ||
|
|
d2cee468a0 | ||
|
|
94f454c311 | ||
|
|
17881c9a36 | ||
|
|
5b2def6fdd | ||
|
|
9b6d29212d | ||
|
|
c1bff03b1c | ||
|
|
b00f113658 | ||
|
|
7dcd923ebf | ||
|
|
1fcd8a7c1a | ||
|
|
4a89f7f26b | ||
|
|
a9196348ab | ||
|
|
12dec366e0 | ||
|
|
1713f6af76 | ||
|
|
7f4adb70bd | ||
|
|
e2483e4be0 | ||
|
|
322c0b8d56 | ||
|
|
81c1d8362d | ||
|
|
d1156ee3fd | ||
|
|
18f971954b | ||
|
|
e55ac7062c | ||
|
|
c178eada22 | ||
|
|
92213e302f | ||
|
|
72193b0249 | ||
|
|
066d7106b0 | ||
|
|
589de8e361 | ||
|
|
914cd8b611 | ||
|
|
845290595e | ||
|
|
544b60d111 | ||
|
|
aa0787b0ff | ||
|
|
89f144df75 | ||
|
|
cfccbe2bed | ||
|
|
3dd9a06d67 | ||
|
|
4bfe9039ed | ||
|
|
75cddbf444 | ||
|
|
89b18341c5 | ||
|
|
90137f7093 | ||
|
|
480187b1f5 | ||
|
|
b3ed54633f | ||
|
|
0360d4145c | ||
|
|
2bec5afcdd | ||
|
|
4539024280 | ||
|
|
398bd0c1da | ||
|
|
91759587f5 | ||
|
|
bc9841ea8c | ||
|
|
32241faf55 | ||
|
|
685e22bd68 | ||
|
|
88de779ff7 | ||
|
|
d452694c55 | ||
|
|
7fba8ac2b4 | ||
|
|
0738208627 | ||
|
|
a3720219d8 | ||
|
|
385726b87c | ||
|
|
d78a5867b8 | ||
|
|
ad960c2101 | ||
|
|
7f07c96a2f | ||
|
|
90bea975d0 | ||
|
|
e8adea3022 | ||
|
|
71839bc87f | ||
|
|
6809a40257 | ||
|
|
cea55a72c3 | ||
|
|
e38a4a21ee | ||
|
|
7ac1e767ab | ||
|
|
2c4d833a5b | ||
|
|
41d3dd0aa5 | ||
|
|
6050ab6b21 | ||
|
|
ae05251359 | ||
|
|
f23158aed5 | ||
|
|
b03b75315d | ||
|
|
cbd98efaf4 | ||
|
|
1f7bf1fd88 | ||
|
|
179019b136 | ||
|
|
ac022acbbe | ||
|
|
6bfe020c3b | ||
|
|
55a960bbc5 | ||
|
|
cd0d88e2c0 | ||
|
|
80f8fdc8d3 |
546
.github/.gitleaks.toml
vendored
546
.github/.gitleaks.toml
vendored
@@ -1,546 +0,0 @@
|
||||
title = "gitleaks config"
|
||||
|
||||
# Gitleaks rules are defined by regular expressions and entropy ranges.
|
||||
# Some secrets have unique signatures which make detecting those secrets easy.
|
||||
# Examples of those secrets would be GitLab Personal Access Tokens, AWS keys, and GitHub Access Tokens.
|
||||
# All these examples have defined prefixes like `glpat`, `AKIA`, `ghp_`, etc.
|
||||
#
|
||||
# Other secrets might just be a hash which means we need to write more complex rules to verify
|
||||
# that what we are matching is a secret.
|
||||
#
|
||||
# Here is an example of a semi-generic secret
|
||||
#
|
||||
# discord_client_secret = "8dyfuiRyq=vVc3RRr_edRk-fK__JItpZ"
|
||||
#
|
||||
# We can write a regular expression to capture the variable name (identifier),
|
||||
# the assignment symbol (like '=' or ':='), and finally the actual secret.
|
||||
# The structure of a rule to match this example secret is below:
|
||||
#
|
||||
# Beginning string
|
||||
# quotation
|
||||
# │ End string quotation
|
||||
# │ │
|
||||
# ▼ ▼
|
||||
# (?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_\-]{32})['\"]
|
||||
#
|
||||
# ▲ ▲ ▲
|
||||
# │ │ │
|
||||
# │ │ │
|
||||
# identifier assignment symbol
|
||||
# Secret
|
||||
#
|
||||
[[rules]]
|
||||
id = "gitlab-pat"
|
||||
description = "GitLab Personal Access Token"
|
||||
regex = '''glpat-[0-9a-zA-Z\-\_]{20}'''
|
||||
|
||||
[[rules]]
|
||||
id = "aws-access-token"
|
||||
description = "AWS"
|
||||
regex = '''(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}'''
|
||||
|
||||
# Cryptographic keys
|
||||
[[rules]]
|
||||
id = "PKCS8-PK"
|
||||
description = "PKCS8 private key"
|
||||
regex = '''-----BEGIN PRIVATE KEY-----'''
|
||||
|
||||
[[rules]]
|
||||
id = "RSA-PK"
|
||||
description = "RSA private key"
|
||||
regex = '''-----BEGIN RSA PRIVATE KEY-----'''
|
||||
|
||||
[[rules]]
|
||||
id = "OPENSSH-PK"
|
||||
description = "SSH private key"
|
||||
regex = '''-----BEGIN OPENSSH PRIVATE KEY-----'''
|
||||
|
||||
[[rules]]
|
||||
id = "PGP-PK"
|
||||
description = "PGP private key"
|
||||
regex = '''-----BEGIN PGP PRIVATE KEY BLOCK-----'''
|
||||
|
||||
[[rules]]
|
||||
id = "github-pat"
|
||||
description = "GitHub Personal Access Token"
|
||||
regex = '''ghp_[0-9a-zA-Z]{36}'''
|
||||
|
||||
[[rules]]
|
||||
id = "github-oauth"
|
||||
description = "GitHub OAuth Access Token"
|
||||
regex = '''gho_[0-9a-zA-Z]{36}'''
|
||||
|
||||
[[rules]]
|
||||
id = "SSH-DSA-PK"
|
||||
description = "SSH (DSA) private key"
|
||||
regex = '''-----BEGIN DSA PRIVATE KEY-----'''
|
||||
|
||||
[[rules]]
|
||||
id = "SSH-EC-PK"
|
||||
description = "SSH (EC) private key"
|
||||
regex = '''-----BEGIN EC PRIVATE KEY-----'''
|
||||
|
||||
|
||||
[[rules]]
|
||||
id = "github-app-token"
|
||||
description = "GitHub App Token"
|
||||
regex = '''(ghu|ghs)_[0-9a-zA-Z]{36}'''
|
||||
|
||||
[[rules]]
|
||||
id = "github-refresh-token"
|
||||
description = "GitHub Refresh Token"
|
||||
regex = '''ghr_[0-9a-zA-Z]{76}'''
|
||||
|
||||
[[rules]]
|
||||
id = "shopify-shared-secret"
|
||||
description = "Shopify shared secret"
|
||||
regex = '''shpss_[a-fA-F0-9]{32}'''
|
||||
|
||||
[[rules]]
|
||||
id = "shopify-access-token"
|
||||
description = "Shopify access token"
|
||||
regex = '''shpat_[a-fA-F0-9]{32}'''
|
||||
|
||||
[[rules]]
|
||||
id = "shopify-custom-access-token"
|
||||
description = "Shopify custom app access token"
|
||||
regex = '''shpca_[a-fA-F0-9]{32}'''
|
||||
|
||||
[[rules]]
|
||||
id = "shopify-private-app-access-token"
|
||||
description = "Shopify private app access token"
|
||||
regex = '''shppa_[a-fA-F0-9]{32}'''
|
||||
|
||||
[[rules]]
|
||||
id = "slack-access-token"
|
||||
description = "Slack token"
|
||||
regex = '''xox[baprs]-([0-9a-zA-Z]{10,48})?'''
|
||||
|
||||
[[rules]]
|
||||
id = "stripe-access-token"
|
||||
description = "Stripe"
|
||||
regex = '''(?i)(sk|pk)_(test|live)_[0-9a-z]{10,32}'''
|
||||
|
||||
[[rules]]
|
||||
id = "pypi-upload-token"
|
||||
description = "PyPI upload token"
|
||||
regex = '''pypi-AgEIcHlwaS5vcmc[A-Za-z0-9\-_]{50,1000}'''
|
||||
|
||||
[[rules]]
|
||||
id = "gcp-service-account"
|
||||
description = "Google (GCP) Service-account"
|
||||
regex = '''\"type\": \"service_account\"'''
|
||||
|
||||
[[rules]]
|
||||
id = "heroku-api-key"
|
||||
description = "Heroku API Key"
|
||||
regex = ''' (?i)(heroku[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "slack-web-hook"
|
||||
description = "Slack Webhook"
|
||||
regex = '''https://hooks.slack.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8,12}/[a-zA-Z0-9_]{24}'''
|
||||
|
||||
[[rules]]
|
||||
id = "twilio-api-key"
|
||||
description = "Twilio API Key"
|
||||
regex = '''SK[0-9a-fA-F]{32}'''
|
||||
|
||||
[[rules]]
|
||||
id = "age-secret-key"
|
||||
description = "Age secret key"
|
||||
regex = '''AGE-SECRET-KEY-1[QPZRY9X8GF2TVDW0S3JN54KHCE6MUA7L]{58}'''
|
||||
|
||||
[[rules]]
|
||||
id = "facebook-token"
|
||||
description = "Facebook token"
|
||||
regex = '''(?i)(facebook[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "twitter-token"
|
||||
description = "Twitter token"
|
||||
regex = '''(?i)(twitter[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{35,44})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "adobe-client-id"
|
||||
description = "Adobe Client ID (Oauth Web)"
|
||||
regex = '''(?i)(adobe[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "adobe-client-secret"
|
||||
description = "Adobe Client Secret"
|
||||
regex = '''(p8e-)(?i)[a-z0-9]{32}'''
|
||||
|
||||
[[rules]]
|
||||
id = "alibaba-access-key-id"
|
||||
description = "Alibaba AccessKey ID"
|
||||
regex = '''(LTAI)(?i)[a-z0-9]{20}'''
|
||||
|
||||
[[rules]]
|
||||
id = "alibaba-secret-key"
|
||||
description = "Alibaba Secret Key"
|
||||
regex = '''(?i)(alibaba[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{30})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "asana-client-id"
|
||||
description = "Asana Client ID"
|
||||
regex = '''(?i)(asana[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9]{16})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "asana-client-secret"
|
||||
description = "Asana Client Secret"
|
||||
regex = '''(?i)(asana[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{32})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "atlassian-api-token"
|
||||
description = "Atlassian API token"
|
||||
regex = '''(?i)(atlassian[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{24})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "bitbucket-client-id"
|
||||
description = "Bitbucket client ID"
|
||||
regex = '''(?i)(bitbucket[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{32})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "bitbucket-client-secret"
|
||||
description = "Bitbucket client secret"
|
||||
regex = '''(?i)(bitbucket[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9_\-]{64})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "beamer-api-token"
|
||||
description = "Beamer API token"
|
||||
regex = '''(?i)(beamer[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](b_[a-z0-9=_\-]{44})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "clojars-api-token"
|
||||
description = "Clojars API token"
|
||||
regex = '''(CLOJARS_)(?i)[a-z0-9]{60}'''
|
||||
|
||||
[[rules]]
|
||||
id = "contentful-delivery-api-token"
|
||||
description = "Contentful delivery API token"
|
||||
regex = '''(?i)(contentful[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9\-=_]{43})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "databricks-api-token"
|
||||
description = "Databricks API token"
|
||||
regex = '''dapi[a-h0-9]{32}'''
|
||||
|
||||
[[rules]]
|
||||
id = "discord-api-token"
|
||||
description = "Discord API key"
|
||||
regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{64})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "discord-client-id"
|
||||
description = "Discord client ID"
|
||||
regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9]{18})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "discord-client-secret"
|
||||
description = "Discord client secret"
|
||||
regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_\-]{32})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "doppler-api-token"
|
||||
description = "Doppler API token"
|
||||
regex = '''['\"](dp\.pt\.)(?i)[a-z0-9]{43}['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "dropbox-api-secret"
|
||||
description = "Dropbox API secret/key"
|
||||
regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{15})['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "dropbox--api-key"
|
||||
description = "Dropbox API secret/key"
|
||||
regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{15})['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "dropbox-short-lived-api-token"
|
||||
description = "Dropbox short lived API token"
|
||||
regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](sl\.[a-z0-9\-=_]{135})['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "dropbox-long-lived-api-token"
|
||||
description = "Dropbox long lived API token"
|
||||
regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"][a-z0-9]{11}(AAAAAAAAAA)[a-z0-9\-_=]{43}['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "duffel-api-token"
|
||||
description = "Duffel API token"
|
||||
regex = '''['\"]duffel_(test|live)_(?i)[a-z0-9_-]{43}['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "dynatrace-api-token"
|
||||
description = "Dynatrace API token"
|
||||
regex = '''['\"]dt0c01\.(?i)[a-z0-9]{24}\.[a-z0-9]{64}['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "easypost-api-token"
|
||||
description = "EasyPost API token"
|
||||
regex = '''['\"]EZAK(?i)[a-z0-9]{54}['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "easypost-test-api-token"
|
||||
description = "EasyPost test API token"
|
||||
regex = '''['\"]EZTK(?i)[a-z0-9]{54}['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "fastly-api-token"
|
||||
description = "Fastly API token"
|
||||
regex = '''(?i)(fastly[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9\-=_]{32})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "finicity-client-secret"
|
||||
description = "Finicity client secret"
|
||||
regex = '''(?i)(finicity[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{20})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "finicity-api-token"
|
||||
description = "Finicity API token"
|
||||
regex = '''(?i)(finicity[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "flutterwave-public-key"
|
||||
description = "Flutterwave public key"
|
||||
regex = '''FLWPUBK_TEST-(?i)[a-h0-9]{32}-X'''
|
||||
|
||||
[[rules]]
|
||||
id = "flutterwave-secret-key"
|
||||
description = "Flutterwave secret key"
|
||||
regex = '''FLWSECK_TEST-(?i)[a-h0-9]{32}-X'''
|
||||
|
||||
[[rules]]
|
||||
id = "flutterwave-enc-key"
|
||||
description = "Flutterwave encrypted key"
|
||||
regex = '''FLWSECK_TEST[a-h0-9]{12}'''
|
||||
|
||||
[[rules]]
|
||||
id = "frameio-api-token"
|
||||
description = "Frame.io API token"
|
||||
regex = '''fio-u-(?i)[a-z0-9\-_=]{64}'''
|
||||
|
||||
[[rules]]
|
||||
id = "gocardless-api-token"
|
||||
description = "GoCardless API token"
|
||||
regex = '''['\"]live_(?i)[a-z0-9\-_=]{40}['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "grafana-api-token"
|
||||
description = "Grafana API token"
|
||||
regex = '''['\"]eyJrIjoi(?i)[a-z0-9\-_=]{72,92}['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "hashicorp-tf-api-token"
|
||||
description = "HashiCorp Terraform user/org API token"
|
||||
regex = '''['\"](?i)[a-z0-9]{14}\.atlasv1\.[a-z0-9\-_=]{60,70}['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "hubspot-api-token"
|
||||
description = "HubSpot API token"
|
||||
regex = '''(?i)(hubspot[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "intercom-api-token"
|
||||
description = "Intercom API token"
|
||||
regex = '''(?i)(intercom[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_]{60})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "intercom-client-secret"
|
||||
description = "Intercom client secret/ID"
|
||||
regex = '''(?i)(intercom[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "ionic-api-token"
|
||||
description = "Ionic API token"
|
||||
regex = '''(?i)(ionic[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](ion_[a-z0-9]{42})['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "linear-api-token"
|
||||
description = "Linear API token"
|
||||
regex = '''lin_api_(?i)[a-z0-9]{40}'''
|
||||
|
||||
[[rules]]
|
||||
id = "linear-client-secret"
|
||||
description = "Linear client secret/ID"
|
||||
regex = '''(?i)(linear[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "lob-api-key"
|
||||
description = "Lob API Key"
|
||||
regex = '''(?i)(lob[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]((live|test)_[a-f0-9]{35})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "lob-pub-api-key"
|
||||
description = "Lob Publishable API Key"
|
||||
regex = '''(?i)(lob[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]((test|live)_pub_[a-f0-9]{31})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "mailchimp-api-key"
|
||||
description = "Mailchimp API key"
|
||||
regex = '''(?i)(mailchimp[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32}-us20)['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "mailgun-private-api-token"
|
||||
description = "Mailgun private API token"
|
||||
regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](key-[a-f0-9]{32})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "mailgun-pub-key"
|
||||
description = "Mailgun public validation key"
|
||||
regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](pubkey-[a-f0-9]{32})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "mailgun-signing-key"
|
||||
description = "Mailgun webhook signing key"
|
||||
regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{32}-[a-h0-9]{8}-[a-h0-9]{8})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "mapbox-api-token"
|
||||
description = "Mapbox API token"
|
||||
regex = '''(?i)(pk\.[a-z0-9]{60}\.[a-z0-9]{22})'''
|
||||
|
||||
[[rules]]
|
||||
id = "messagebird-api-token"
|
||||
description = "MessageBird API token"
|
||||
regex = '''(?i)(messagebird[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{25})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "messagebird-client-id"
|
||||
description = "MessageBird API client ID"
|
||||
regex = '''(?i)(messagebird[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "new-relic-user-api-key"
|
||||
description = "New Relic user API Key"
|
||||
regex = '''['\"](NRAK-[A-Z0-9]{27})['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "new-relic-user-api-id"
|
||||
description = "New Relic user API ID"
|
||||
regex = '''(?i)(newrelic[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([A-Z0-9]{64})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "new-relic-browser-api-token"
|
||||
description = "New Relic ingest browser API token"
|
||||
regex = '''['\"](NRJS-[a-f0-9]{19})['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "npm-access-token"
|
||||
description = "npm access token"
|
||||
regex = '''['\"](npm_(?i)[a-z0-9]{36})['\"]'''
|
||||
|
||||
[[rules]]
|
||||
id = "planetscale-password"
|
||||
description = "PlanetScale password"
|
||||
regex = '''pscale_pw_(?i)[a-z0-9\-_\.]{43}'''
|
||||
|
||||
[[rules]]
|
||||
id = "planetscale-api-token"
|
||||
description = "PlanetScale API token"
|
||||
regex = '''pscale_tkn_(?i)[a-z0-9\-_\.]{43}'''
|
||||
|
||||
[[rules]]
|
||||
id = "postman-api-token"
|
||||
description = "Postman API token"
|
||||
regex = '''PMAK-(?i)[a-f0-9]{24}\-[a-f0-9]{34}'''
|
||||
|
||||
[[rules]]
|
||||
id = "pulumi-api-token"
|
||||
description = "Pulumi API token"
|
||||
regex = '''pul-[a-f0-9]{40}'''
|
||||
|
||||
[[rules]]
|
||||
id = "rubygems-api-token"
|
||||
description = "Rubygem API token"
|
||||
regex = '''rubygems_[a-f0-9]{48}'''
|
||||
|
||||
[[rules]]
|
||||
id = "sendgrid-api-token"
|
||||
description = "SendGrid API token"
|
||||
regex = '''SG\.(?i)[a-z0-9_\-\.]{66}'''
|
||||
|
||||
[[rules]]
|
||||
id = "sendinblue-api-token"
|
||||
description = "Sendinblue API token"
|
||||
regex = '''xkeysib-[a-f0-9]{64}\-(?i)[a-z0-9]{16}'''
|
||||
|
||||
[[rules]]
|
||||
id = "shippo-api-token"
|
||||
description = "Shippo API token"
|
||||
regex = '''shippo_(live|test)_[a-f0-9]{40}'''
|
||||
|
||||
[[rules]]
|
||||
id = "linkedin-client-secret"
|
||||
description = "LinkedIn Client secret"
|
||||
regex = '''(?i)(linkedin[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z]{16})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "linkedin-client-id"
|
||||
description = "LinkedIn Client ID"
|
||||
regex = '''(?i)(linkedin[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{14})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "twitch-api-token"
|
||||
description = "Twitch API token"
|
||||
regex = '''(?i)(twitch[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{30})['\"]'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "typeform-api-token"
|
||||
description = "Typeform API token"
|
||||
regex = '''(?i)(typeform[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}(tfp_[a-z0-9\-_\.=]{59})'''
|
||||
secretGroup = 3
|
||||
|
||||
[[rules]]
|
||||
id = "generic-api-key"
|
||||
description = "Generic API Key"
|
||||
regex = '''(?i)((key|api[^Version]|token|secret|password)[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9a-zA-Z\-_=]{8,64})['\"]'''
|
||||
entropy = 3.7
|
||||
secretGroup = 4
|
||||
|
||||
|
||||
[allowlist]
|
||||
description = "global allow lists"
|
||||
regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}''', '''RPM-GPG-KEY.*''', '''.*:.*StrelkaHexDump.*''', '''.*:.*PLACEHOLDER.*''', '''ssl_.*password''', '''integration_key\s=\s"so-logs-"''']
|
||||
paths = [
|
||||
'''gitleaks.toml''',
|
||||
'''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''',
|
||||
'''(go.mod|go.sum)$''',
|
||||
'''salt/nginx/files/enterprise-attack.json''',
|
||||
'''(.*?)whl$'''
|
||||
]
|
||||
5
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
5
.github/DISCUSSION_TEMPLATE/2-4.yml
vendored
@@ -2,13 +2,11 @@ body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
⚠️ This category is solely for conversations related to Security Onion 2.4 ⚠️
|
||||
|
||||
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Version
|
||||
description: Which version of Security Onion 2.4.x are you asking about?
|
||||
description: Which version of Security Onion are you asking about?
|
||||
options:
|
||||
-
|
||||
- 2.4.10
|
||||
@@ -35,6 +33,7 @@ body:
|
||||
- 2.4.200
|
||||
- 2.4.201
|
||||
- 2.4.210
|
||||
- 2.4.211
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
|
||||
178
.github/DISCUSSION_TEMPLATE/3-0.yml
vendored
Normal file
178
.github/DISCUSSION_TEMPLATE/3-0.yml
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Version
|
||||
description: Which version of Security Onion are you asking about?
|
||||
options:
|
||||
-
|
||||
- 3.0.0
|
||||
- 3.1.0
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Installation Method
|
||||
description: How did you install Security Onion?
|
||||
options:
|
||||
-
|
||||
- Security Onion ISO image
|
||||
- Cloud image (Amazon, Azure, Google)
|
||||
- Network installation on Oracle 9 (unsupported)
|
||||
- Other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Description
|
||||
description: >
|
||||
Is this discussion about installation, configuration, upgrading, or other?
|
||||
options:
|
||||
-
|
||||
- installation
|
||||
- configuration
|
||||
- upgrading
|
||||
- other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Installation Type
|
||||
description: >
|
||||
When you installed, did you choose Import, Eval, Standalone, Distributed, or something else?
|
||||
options:
|
||||
-
|
||||
- Import
|
||||
- Eval
|
||||
- Standalone
|
||||
- Distributed
|
||||
- other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Location
|
||||
description: >
|
||||
Is this deployment in the cloud, on-prem with Internet access, or airgap?
|
||||
options:
|
||||
-
|
||||
- cloud
|
||||
- on-prem with Internet access
|
||||
- airgap
|
||||
- other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Hardware Specs
|
||||
description: >
|
||||
Does your hardware meet or exceed the minimum requirements for your installation type as shown at https://securityonion.net/docs/hardware?
|
||||
options:
|
||||
-
|
||||
- Meets minimum requirements
|
||||
- Exceeds minimum requirements
|
||||
- Does not meet minimum requirements
|
||||
- other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: CPU
|
||||
description: How many CPU cores do you have?
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: RAM
|
||||
description: How much RAM do you have?
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Storage for /
|
||||
description: How much storage do you have for the / partition?
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
attributes:
|
||||
label: Storage for /nsm
|
||||
description: How much storage do you have for the /nsm partition?
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Network Traffic Collection
|
||||
description: >
|
||||
Are you collecting network traffic from a tap or span port?
|
||||
options:
|
||||
-
|
||||
- tap
|
||||
- span port
|
||||
- other (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Network Traffic Speeds
|
||||
description: >
|
||||
How much network traffic are you monitoring?
|
||||
options:
|
||||
-
|
||||
- Less than 1Gbps
|
||||
- 1Gbps to 10Gbps
|
||||
- more than 10Gbps
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Status
|
||||
description: >
|
||||
Does SOC Grid show all services on all nodes as running OK?
|
||||
options:
|
||||
-
|
||||
- Yes, all services on all nodes are running OK
|
||||
- No, one or more services are failed (please provide detail below)
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Salt Status
|
||||
description: >
|
||||
Do you get any failures when you run "sudo salt-call state.highstate"?
|
||||
options:
|
||||
-
|
||||
- Yes, there are salt failures (please provide detail below)
|
||||
- No, there are no failures
|
||||
validations:
|
||||
required: true
|
||||
- type: dropdown
|
||||
attributes:
|
||||
label: Logs
|
||||
description: >
|
||||
Are there any additional clues in /opt/so/log/?
|
||||
options:
|
||||
-
|
||||
- Yes, there are additional clues in /opt/so/log/ (please provide detail below)
|
||||
- No, there are no additional clues
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Detail
|
||||
description: Please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and then provide detailed information to help us help you.
|
||||
placeholder: |-
|
||||
STOP! Before typing, please read our discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 in their entirety!
|
||||
|
||||
If your organization needs more immediate, enterprise grade professional support, with one-on-one virtual meetings and screensharing, contact us via our website: https://securityonion.com/support
|
||||
validations:
|
||||
required: true
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Guidelines
|
||||
options:
|
||||
- label: I have read the discussion guidelines at https://github.com/Security-Onion-Solutions/securityonion/discussions/1720 and assert that I have followed the guidelines.
|
||||
required: true
|
||||
22
.github/pull_request_template.md
vendored
Normal file
22
.github/pull_request_template.md
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
## Description
|
||||
|
||||
<!--
|
||||
Explain the purpose of the pull request. Be brief or detailed depending on the scope of the changes.
|
||||
-->
|
||||
|
||||
## Related Issues
|
||||
|
||||
<!--
|
||||
Optionally, list any related issues that this pull request addresses.
|
||||
-->
|
||||
|
||||
## Checklist
|
||||
|
||||
- [ ] I have read and followed the [CONTRIBUTING.md](https://github.com/Security-Onion-Solutions/securityonion/blob/3/main/CONTRIBUTING.md) file.
|
||||
- [ ] I have read and agree to the terms of the [Contributor License Agreement](https://securityonionsolutions.com/cla)
|
||||
|
||||
## Questions or Comments
|
||||
|
||||
<!--
|
||||
If you have any questions or comments about this pull request, add them here.
|
||||
-->
|
||||
24
.github/workflows/contrib.yml
vendored
24
.github/workflows/contrib.yml
vendored
@@ -1,24 +0,0 @@
|
||||
name: contrib
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
pull_request_target:
|
||||
types: [opened,closed,synchronize]
|
||||
|
||||
jobs:
|
||||
CLAssistant:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: "Contributor Check"
|
||||
if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the CLA Document and I hereby sign the CLA') || github.event_name == 'pull_request_target'
|
||||
uses: cla-assistant/github-action@v2.3.1
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
PERSONAL_ACCESS_TOKEN : ${{ secrets.PERSONAL_ACCESS_TOKEN }}
|
||||
with:
|
||||
path-to-signatures: 'signatures_v1.json'
|
||||
path-to-document: 'https://securityonionsolutions.com/cla'
|
||||
allowlist: dependabot[bot],jertel,dougburks,TOoSmOotH,defensivedepth,m0duspwnens
|
||||
remote-organization-name: Security-Onion-Solutions
|
||||
remote-repository-name: licensing
|
||||
|
||||
17
.github/workflows/leaktest.yml
vendored
17
.github/workflows/leaktest.yml
vendored
@@ -1,17 +0,0 @@
|
||||
name: leak-test
|
||||
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: '0'
|
||||
|
||||
- name: Gitleaks
|
||||
uses: gitleaks/gitleaks-action@v1.6.0
|
||||
with:
|
||||
config-path: .github/.gitleaks.toml
|
||||
2
.github/workflows/pythontest.yml
vendored
2
.github/workflows/pythontest.yml
vendored
@@ -13,7 +13,7 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
python-version: ["3.13"]
|
||||
python-version: ["3.14"]
|
||||
python-code-path: ["salt/sensoroni/files/analyzers", "salt/manager/tools/sbin"]
|
||||
|
||||
steps:
|
||||
|
||||
@@ -23,7 +23,7 @@
|
||||
|
||||
* Link the PR to the related issue, either using [keywords](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-issues/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) in the PR description, or [manually](https://docs.github.com/en/issues/tracking-your-work-with-issues/creating-issues/linking-a-pull-request-to-an-issue#manually-linking-a-pull-request-to-an-issue).
|
||||
|
||||
* **Pull requests should be opened against the `dev` branch of this repo**, and should clearly describe the problem and solution.
|
||||
* **Pull requests should be opened against the current `?/dev` branch of this repo**, and should clearly describe the problem and solution.
|
||||
|
||||
* Be sure you have tested your changes and are confident they will not break other parts of the product.
|
||||
|
||||
|
||||
@@ -1,46 +1,46 @@
|
||||
### 2.4.210-20260302 ISO image released on 2026/03/02
|
||||
### 3.0.0-20260331 ISO image released on 2026/03/31
|
||||
|
||||
|
||||
### Download and Verify
|
||||
|
||||
2.4.210-20260302 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-2.4.210-20260302.iso
|
||||
3.0.0-20260331 ISO image:
|
||||
https://download.securityonion.net/file/securityonion/securityonion-3.0.0-20260331.iso
|
||||
|
||||
MD5: 575F316981891EBED2EE4E1F42A1F016
|
||||
SHA1: 600945E8823221CBC5F1C056084A71355308227E
|
||||
SHA256: A6AA6471125F07FA6E2796430E94BEAFDEF728E833E9728FDFA7106351EBC47E
|
||||
MD5: ECD318A1662A6FDE0EF213F5A9BD4B07
|
||||
SHA1: E55BE314440CCF3392DC0B06BC5E270B43176D9C
|
||||
SHA256: 7FC47405E335CBE5C2B6C51FE7AC60248F35CBE504907B8B5A33822B23F8F4D5
|
||||
|
||||
Signature for ISO image:
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.210-20260302.iso.sig
|
||||
https://github.com/Security-Onion-Solutions/securityonion/raw/3/main/sigs/securityonion-3.0.0-20260331.iso.sig
|
||||
|
||||
Signing key:
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS
|
||||
https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/3/main/KEYS
|
||||
|
||||
For example, here are the steps you can use on most Linux distributions to download and verify our Security Onion ISO image.
|
||||
|
||||
Download and import the signing key:
|
||||
```
|
||||
wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS -O - | gpg --import -
|
||||
wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/3/main/KEYS -O - | gpg --import -
|
||||
```
|
||||
|
||||
Download the signature file for the ISO:
|
||||
```
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.210-20260302.iso.sig
|
||||
wget https://github.com/Security-Onion-Solutions/securityonion/raw/3/main/sigs/securityonion-3.0.0-20260331.iso.sig
|
||||
```
|
||||
|
||||
Download the ISO image:
|
||||
```
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-2.4.210-20260302.iso
|
||||
wget https://download.securityonion.net/file/securityonion/securityonion-3.0.0-20260331.iso
|
||||
```
|
||||
|
||||
Verify the downloaded ISO image using the signature file:
|
||||
```
|
||||
gpg --verify securityonion-2.4.210-20260302.iso.sig securityonion-2.4.210-20260302.iso
|
||||
gpg --verify securityonion-3.0.0-20260331.iso.sig securityonion-3.0.0-20260331.iso
|
||||
```
|
||||
|
||||
The output should show "Good signature" and the Primary key fingerprint should match what's shown below:
|
||||
```
|
||||
gpg: Signature made Mon 02 Mar 2026 11:55:24 AM EST using RSA key ID FE507013
|
||||
gpg: Signature made Mon 30 Mar 2026 06:22:14 PM EDT using RSA key ID FE507013
|
||||
gpg: Good signature from "Security Onion Solutions, LLC <info@securityonionsolutions.com>"
|
||||
gpg: WARNING: This key is not certified with a trusted signature!
|
||||
gpg: There is no indication that the signature belongs to the owner.
|
||||
|
||||
66
README.md
66
README.md
@@ -1,50 +1,58 @@
|
||||
## Security Onion 2.4
|
||||
<p align="center">
|
||||
<img src="https://securityonionsolutions.com/logo/logo-so-onion-dark.svg" width="400" alt="Security Onion Logo">
|
||||
</p>
|
||||
|
||||
Security Onion 2.4 is here!
|
||||
# Security Onion
|
||||
|
||||
## Screenshots
|
||||
Security Onion is a free and open Linux distribution for threat hunting, enterprise security monitoring, and log management. It includes a comprehensive suite of tools designed to work together to provide visibility into your network and host activity.
|
||||
|
||||
Alerts
|
||||

|
||||
## ✨ Features
|
||||
|
||||
Dashboards
|
||||

|
||||
Security Onion includes everything you need to monitor your network and host systems:
|
||||
|
||||
Hunt
|
||||

|
||||
* **Security Onion Console (SOC)**: A unified web interface for analyzing security events and managing your grid.
|
||||
* **Elastic Stack**: Powerful search backed by Elasticsearch.
|
||||
* **Intrusion Detection**: Network-based IDS with Suricata and host-based monitoring with Elastic Fleet.
|
||||
* **Network Metadata**: Detailed network metadata generated by Zeek or Suricata.
|
||||
* **Full Packet Capture**: Retain and analyze raw network traffic with Suricata PCAP.
|
||||
|
||||
Detections
|
||||

|
||||
## ⭐ Security Onion Pro
|
||||
|
||||
PCAP
|
||||

|
||||
For organizations and enterprises requiring advanced capabilities, **Security Onion Pro** offers additional features designed for scale and efficiency:
|
||||
|
||||
Grid
|
||||

|
||||
* **Onion AI**: Leverage powerful AI-driven insights to accelerate your analysis and investigations.
|
||||
* **Enterprise Features**: Enhanced tools and integrations tailored for enterprise-grade security operations.
|
||||
|
||||
Config
|
||||

|
||||
For more information, visit the [Security Onion Pro](https://securityonionsolutions.com/pro) page.
|
||||
|
||||
### Release Notes
|
||||
## ☁️ Cloud Deployment
|
||||
|
||||
https://securityonion.net/docs/release-notes
|
||||
Security Onion is available and ready to deploy in the **AWS**, **Azure**, and **Google Cloud (GCP)** marketplaces.
|
||||
|
||||
### Requirements
|
||||
## 🚀 Getting Started
|
||||
|
||||
https://securityonion.net/docs/hardware
|
||||
| Goal | Resource |
|
||||
| :--- | :--- |
|
||||
| **Download** | [Security Onion ISO](https://securityonion.net/docs/download) |
|
||||
| **Requirements** | [Hardware Guide](https://securityonion.net/docs/hardware) |
|
||||
| **Install** | [Installation Instructions](https://securityonion.net/docs/installation) |
|
||||
| **What's New** | [Release Notes](https://securityonion.net/docs/release-notes) |
|
||||
|
||||
### Download
|
||||
## 📖 Documentation & Support
|
||||
|
||||
https://securityonion.net/docs/download
|
||||
For more detailed information, please visit our [Documentation](https://docs.securityonion.net).
|
||||
|
||||
### Installation
|
||||
* **FAQ**: [Frequently Asked Questions](https://securityonion.net/docs/faq)
|
||||
* **Community**: [Discussions & Support](https://securityonion.net/docs/community-support)
|
||||
* **Training**: [Official Training](https://securityonion.net/training)
|
||||
|
||||
https://securityonion.net/docs/installation
|
||||
## 🤝 Contributing
|
||||
|
||||
### FAQ
|
||||
We welcome contributions! Please see our [CONTRIBUTING.md](CONTRIBUTING.md) for guidelines on how to get involved.
|
||||
|
||||
https://securityonion.net/docs/faq
|
||||
## 🛡️ License
|
||||
|
||||
### Feedback
|
||||
Security Onion is licensed under the terms of the license found in the [LICENSE](LICENSE) file.
|
||||
|
||||
https://securityonion.net/docs/community-support
|
||||
---
|
||||
*Built with 🧅 by Security Onion Solutions.*
|
||||
|
||||
@@ -4,6 +4,7 @@
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 3.x | :white_check_mark: |
|
||||
| 2.4.x | :white_check_mark: |
|
||||
| 2.3.x | :x: |
|
||||
| 16.04.x | :x: |
|
||||
|
||||
@@ -87,8 +87,6 @@ base:
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
- pcap.soc_pcap
|
||||
- pcap.adv_pcap
|
||||
- suricata.soc_suricata
|
||||
- suricata.adv_suricata
|
||||
- minions.{{ grains.id }}
|
||||
@@ -134,8 +132,6 @@ base:
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
- pcap.soc_pcap
|
||||
- pcap.adv_pcap
|
||||
- suricata.soc_suricata
|
||||
- suricata.adv_suricata
|
||||
- minions.{{ grains.id }}
|
||||
@@ -185,8 +181,6 @@ base:
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
- pcap.soc_pcap
|
||||
- pcap.adv_pcap
|
||||
- suricata.soc_suricata
|
||||
- suricata.adv_suricata
|
||||
- minions.{{ grains.id }}
|
||||
@@ -209,8 +203,6 @@ base:
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
- pcap.soc_pcap
|
||||
- pcap.adv_pcap
|
||||
- suricata.soc_suricata
|
||||
- suricata.adv_suricata
|
||||
- strelka.soc_strelka
|
||||
@@ -297,8 +289,6 @@ base:
|
||||
- zeek.adv_zeek
|
||||
- bpf.soc_bpf
|
||||
- bpf.adv_bpf
|
||||
- pcap.soc_pcap
|
||||
- pcap.adv_pcap
|
||||
- suricata.soc_suricata
|
||||
- suricata.adv_suricata
|
||||
- strelka.soc_strelka
|
||||
|
||||
@@ -1,24 +1,14 @@
|
||||
from os import path
|
||||
import subprocess
|
||||
|
||||
def check():
|
||||
|
||||
osfam = __grains__['os_family']
|
||||
retval = 'False'
|
||||
|
||||
if osfam == 'Debian':
|
||||
if path.exists('/var/run/reboot-required'):
|
||||
retval = 'True'
|
||||
cmd = 'needs-restarting -r > /dev/null 2>&1'
|
||||
|
||||
elif osfam == 'RedHat':
|
||||
cmd = 'needs-restarting -r > /dev/null 2>&1'
|
||||
|
||||
try:
|
||||
needs_restarting = subprocess.check_call(cmd, shell=True)
|
||||
except subprocess.CalledProcessError:
|
||||
retval = 'True'
|
||||
|
||||
else:
|
||||
retval = 'Unsupported OS: %s' % os
|
||||
try:
|
||||
needs_restarting = subprocess.check_call(cmd, shell=True)
|
||||
except subprocess.CalledProcessError:
|
||||
retval = 'True'
|
||||
|
||||
return retval
|
||||
|
||||
@@ -38,7 +38,6 @@
|
||||
] %}
|
||||
|
||||
{% set sensor_states = [
|
||||
'pcap',
|
||||
'suricata',
|
||||
'healthcheck',
|
||||
'tcpreplay',
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
backup:
|
||||
locations:
|
||||
description: List of locations to back up to the destination.
|
||||
helpLink: backup.html
|
||||
helpLink: backup
|
||||
global: True
|
||||
destination:
|
||||
description: Directory to store the configuration backups in.
|
||||
helpLink: backup.html
|
||||
helpLink: backup
|
||||
global: True
|
||||
|
||||
@@ -1,21 +1,15 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% set PCAP_BPF_STATUS = 0 %}
|
||||
{% set STENO_BPF_COMPILED = "" %}
|
||||
|
||||
{% if GLOBALS.pcap_engine == "TRANSITION" %}
|
||||
{% set PCAPBPF = ["ip and host 255.255.255.1 and port 1"] %}
|
||||
{% else %}
|
||||
{% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %}
|
||||
{% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %}
|
||||
{% import 'bpf/macros.jinja' as MACROS %}
|
||||
{{ MACROS.remove_comments(BPFMERGED, 'pcap') }}
|
||||
{% set PCAPBPF = BPFMERGED.pcap %}
|
||||
{% endif %}
|
||||
|
||||
{% if PCAPBPF %}
|
||||
{% set PCAP_BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %}
|
||||
{% if PCAP_BPF_CALC['retcode'] == 0 %}
|
||||
{% set PCAP_BPF_STATUS = 1 %}
|
||||
{% set STENO_BPF_COMPILED = ",\\\"--filter=" + PCAP_BPF_CALC['stdout'] + "\\\"" %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
@@ -3,14 +3,14 @@ bpf:
|
||||
description: List of BPF filters to apply to the PCAP engine.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
helpLink: bpf
|
||||
suricata:
|
||||
description: List of BPF filters to apply to Suricata. This will apply to alerts and, if enabled, to metadata and PCAP logs generated by Suricata.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
helpLink: bpf
|
||||
zeek:
|
||||
description: List of BPF filters to apply to Zeek.
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: bpf.html
|
||||
helpLink: bpf
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
- docker
|
||||
|
||||
@@ -18,9 +16,3 @@ trusttheca:
|
||||
- show_changes: False
|
||||
- makedirs: True
|
||||
|
||||
{% if GLOBALS.os_family == 'Debian' %}
|
||||
symlinkca:
|
||||
file.symlink:
|
||||
- target: /etc/pki/tls/certs/intca.crt
|
||||
- name: /etc/ssl/certs/intca.crt
|
||||
{% endif %}
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
{
|
||||
"registry-mirrors": [
|
||||
"https://:5000"
|
||||
],
|
||||
"bip": "172.17.0.1/24",
|
||||
"default-address-pools": [
|
||||
{
|
||||
"base": "172.17.0.0/24",
|
||||
"size": 24
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -20,11 +20,6 @@ kernel.printk:
|
||||
sysctl.present:
|
||||
- value: "3 4 1 3"
|
||||
|
||||
# Remove variables.txt from /tmp - This is temp
|
||||
rmvariablesfile:
|
||||
file.absent:
|
||||
- name: /tmp/variables.txt
|
||||
|
||||
# Add socore Group
|
||||
socoregroup:
|
||||
group.present:
|
||||
@@ -149,28 +144,6 @@ common_sbin_jinja:
|
||||
- so-import-pcap
|
||||
{% endif %}
|
||||
|
||||
{% if GLOBALS.role == 'so-heavynode' %}
|
||||
remove_so-pcap-import_heavynode:
|
||||
file.absent:
|
||||
- name: /usr/sbin/so-pcap-import
|
||||
|
||||
remove_so-import-pcap_heavynode:
|
||||
file.absent:
|
||||
- name: /usr/sbin/so-import-pcap
|
||||
{% endif %}
|
||||
|
||||
{% if not GLOBALS.is_manager%}
|
||||
# prior to 2.4.50 these scripts were in common/tools/sbin on the manager because of soup and distributed to non managers
|
||||
# these two states remove the scripts from non manager nodes
|
||||
remove_soup:
|
||||
file.absent:
|
||||
- name: /usr/sbin/soup
|
||||
|
||||
remove_so-firewall:
|
||||
file.absent:
|
||||
- name: /usr/sbin/so-firewall
|
||||
{% endif %}
|
||||
|
||||
so-status_script:
|
||||
file.managed:
|
||||
- name: /usr/sbin/so-status
|
||||
|
||||
@@ -1,52 +1,5 @@
|
||||
# we cannot import GLOBALS from vars/globals.map.jinja in this state since it is called in setup.virt.init
|
||||
# since it is early in setup of a new VM, the pillars imported in GLOBALS are not yet defined
|
||||
{% if grains.os_family == 'Debian' %}
|
||||
commonpkgs:
|
||||
pkg.installed:
|
||||
- skip_suggestions: True
|
||||
- pkgs:
|
||||
- apache2-utils
|
||||
- wget
|
||||
- ntpdate
|
||||
- jq
|
||||
- curl
|
||||
- ca-certificates
|
||||
- software-properties-common
|
||||
- apt-transport-https
|
||||
- openssl
|
||||
- netcat-openbsd
|
||||
- sqlite3
|
||||
- libssl-dev
|
||||
- procps
|
||||
- python3-dateutil
|
||||
- python3-docker
|
||||
- python3-packaging
|
||||
- python3-lxml
|
||||
- git
|
||||
- rsync
|
||||
- vim
|
||||
- tar
|
||||
- unzip
|
||||
- bc
|
||||
{% if grains.oscodename != 'focal' %}
|
||||
- python3-rich
|
||||
{% endif %}
|
||||
|
||||
{% if grains.oscodename == 'focal' %}
|
||||
# since Ubuntu requires and internet connection we can use pip to install modules
|
||||
python3-pip:
|
||||
pkg.installed
|
||||
|
||||
python-rich:
|
||||
pip.installed:
|
||||
- name: rich
|
||||
- target: /usr/local/lib/python3.8/dist-packages/
|
||||
- require:
|
||||
- pkg: python3-pip
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
|
||||
remove_mariadb:
|
||||
pkg.removed:
|
||||
@@ -84,5 +37,3 @@ commonpkgs:
|
||||
- unzip
|
||||
- wget
|
||||
- yum-utils
|
||||
|
||||
{% endif %}
|
||||
|
||||
@@ -3,8 +3,6 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% if '2.4' in salt['cp.get_file_str']('/etc/soversion') %}
|
||||
|
||||
{% import_yaml '/opt/so/saltstack/local/pillar/global/soc_global.sls' as SOC_GLOBAL %}
|
||||
{% if SOC_GLOBAL.global.airgap %}
|
||||
{% set UPDATE_DIR='/tmp/soagupdate/SecurityOnion' %}
|
||||
@@ -13,14 +11,6 @@
|
||||
{% endif %}
|
||||
{% set SOVERSION = salt['file.read']('/etc/soversion').strip() %}
|
||||
|
||||
remove_common_soup:
|
||||
file.absent:
|
||||
- name: /opt/so/saltstack/default/salt/common/tools/sbin/soup
|
||||
|
||||
remove_common_so-firewall:
|
||||
file.absent:
|
||||
- name: /opt/so/saltstack/default/salt/common/tools/sbin/so-firewall
|
||||
|
||||
# This section is used to put the scripts in place in the Salt file system
|
||||
# in case a state run tries to overwrite what we do in the next section.
|
||||
copy_so-common_common_tools_sbin:
|
||||
@@ -120,23 +110,3 @@ copy_bootstrap-salt_sbin:
|
||||
- source: {{UPDATE_DIR}}/salt/salt/scripts/bootstrap-salt.sh
|
||||
- force: True
|
||||
- preserve: True
|
||||
|
||||
{# this is added in 2.4.120 to remove salt repo files pointing to saltproject.io to accomodate the move to broadcom and new bootstrap-salt script #}
|
||||
{% if salt['pkg.version_cmp'](SOVERSION, '2.4.120') == -1 %}
|
||||
{% set saltrepofile = '/etc/yum.repos.d/salt.repo' %}
|
||||
{% if grains.os_family == 'Debian' %}
|
||||
{% set saltrepofile = '/etc/apt/sources.list.d/salt.list' %}
|
||||
{% endif %}
|
||||
remove_saltproject_io_repo_manager:
|
||||
file.absent:
|
||||
- name: {{ saltrepofile }}
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
fix_23_soup_sbin:
|
||||
cmd.run:
|
||||
- name: curl -s -f -o /usr/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
|
||||
fix_23_soup_salt:
|
||||
cmd.run:
|
||||
- name: curl -s -f -o /opt/so/saltstack/defalt/salt/common/tools/sbin/soup https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.3/main/salt/common/tools/sbin/soup
|
||||
{% endif %}
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
|
||||
if [ "$#" -lt 2 ]; then
|
||||
cat 1>&2 <<EOF
|
||||
$0 compiles a BPF expression to be passed to stenotype to apply a socket filter.
|
||||
$0 compiles a BPF expression to be passed to PCAP to apply a socket filter.
|
||||
Its first argument is the interface (link type is required) and all other arguments
|
||||
are passed to TCPDump.
|
||||
|
||||
|
||||
@@ -333,8 +333,8 @@ get_elastic_agent_vars() {
|
||||
|
||||
if [ -f "$defaultsfile" ]; then
|
||||
ELASTIC_AGENT_TARBALL_VERSION=$(egrep " +version: " $defaultsfile | awk -F: '{print $2}' | tr -d '[:space:]')
|
||||
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
|
||||
ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/3/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/3/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
|
||||
ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz"
|
||||
ELASTIC_AGENT_MD5="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5"
|
||||
ELASTIC_AGENT_EXPANSION_DIR=/nsm/elastic-fleet/artifacts/beats/elastic-agent
|
||||
@@ -349,21 +349,16 @@ get_random_value() {
|
||||
}
|
||||
|
||||
gpg_rpm_import() {
|
||||
if [[ $is_oracle ]]; then
|
||||
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
|
||||
local RPMKEYSLOC="../salt/repo/client/files/$OS/keys"
|
||||
else
|
||||
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/$OS/keys"
|
||||
fi
|
||||
RPMKEYS=('RPM-GPG-KEY-oracle' 'RPM-GPG-KEY-EPEL-9' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub')
|
||||
for RPMKEY in "${RPMKEYS[@]}"; do
|
||||
rpm --import $RPMKEYSLOC/$RPMKEY
|
||||
echo "Imported $RPMKEY"
|
||||
done
|
||||
elif [[ $is_rpm ]]; then
|
||||
echo "Importing the security onion GPG key"
|
||||
rpm --import ../salt/repo/client/files/oracle/keys/securityonion.pub
|
||||
if [[ "$WHATWOULDYOUSAYYAHDOHERE" == "setup" ]]; then
|
||||
local RPMKEYSLOC="../salt/repo/client/files/$OS/keys"
|
||||
else
|
||||
local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/$OS/keys"
|
||||
fi
|
||||
RPMKEYS=('RPM-GPG-KEY-oracle' 'RPM-GPG-KEY-EPEL-9' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub')
|
||||
for RPMKEY in "${RPMKEYS[@]}"; do
|
||||
rpm --import $RPMKEYSLOC/$RPMKEY
|
||||
echo "Imported $RPMKEY"
|
||||
done
|
||||
}
|
||||
|
||||
header() {
|
||||
@@ -550,6 +545,22 @@ retry() {
|
||||
return $exitcode
|
||||
}
|
||||
|
||||
rollover_index() {
|
||||
idx=$1
|
||||
exists=$(so-elasticsearch-query $idx -o /dev/null -w "%{http_code}")
|
||||
if [[ $exists -eq 200 ]]; then
|
||||
rollover=$(so-elasticsearch-query $idx/_rollover -o /dev/null -w "%{http_code}" -XPOST)
|
||||
|
||||
if [[ $rollover -eq 200 ]]; then
|
||||
echo "Successfully triggered rollover for $idx..."
|
||||
else
|
||||
echo "Could not trigger rollover for $idx..."
|
||||
fi
|
||||
else
|
||||
echo "Could not find index $idx..."
|
||||
fi
|
||||
}
|
||||
|
||||
run_check_net_err() {
|
||||
local cmd=$1
|
||||
local err_msg=${2:-"Unknown error occured, please check /root/$WHATWOULDYOUSAYYAHDOHERE.log for details."} # Really need to rename that variable
|
||||
@@ -615,69 +626,19 @@ salt_minion_count() {
|
||||
}
|
||||
|
||||
set_os() {
|
||||
if [ -f /etc/redhat-release ]; then
|
||||
if grep -q "Rocky Linux release 9" /etc/redhat-release; then
|
||||
OS=rocky
|
||||
OSVER=9
|
||||
is_rocky=true
|
||||
is_rpm=true
|
||||
elif grep -q "CentOS Stream release 9" /etc/redhat-release; then
|
||||
OS=centos
|
||||
OSVER=9
|
||||
is_centos=true
|
||||
is_rpm=true
|
||||
elif grep -q "AlmaLinux release 9" /etc/redhat-release; then
|
||||
OS=alma
|
||||
OSVER=9
|
||||
is_alma=true
|
||||
is_rpm=true
|
||||
elif grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release; then
|
||||
if [ -f /etc/oracle-release ]; then
|
||||
OS=oracle
|
||||
OSVER=9
|
||||
is_oracle=true
|
||||
is_rpm=true
|
||||
else
|
||||
OS=rhel
|
||||
OSVER=9
|
||||
is_rhel=true
|
||||
is_rpm=true
|
||||
fi
|
||||
fi
|
||||
cron_service_name="crond"
|
||||
elif [ -f /etc/os-release ]; then
|
||||
if grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then
|
||||
OSVER=focal
|
||||
UBVER=20.04
|
||||
OS=ubuntu
|
||||
is_ubuntu=true
|
||||
is_deb=true
|
||||
elif grep -q "UBUNTU_CODENAME=jammy" /etc/os-release; then
|
||||
OSVER=jammy
|
||||
UBVER=22.04
|
||||
OS=ubuntu
|
||||
is_ubuntu=true
|
||||
is_deb=true
|
||||
elif grep -q "VERSION_CODENAME=bookworm" /etc/os-release; then
|
||||
OSVER=bookworm
|
||||
DEBVER=12
|
||||
is_debian=true
|
||||
OS=debian
|
||||
is_deb=true
|
||||
fi
|
||||
cron_service_name="cron"
|
||||
if [ -f /etc/redhat-release ] && grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release && [ -f /etc/oracle-release ]; then
|
||||
OS=oracle
|
||||
OSVER=9
|
||||
is_oracle=true
|
||||
is_rpm=true
|
||||
fi
|
||||
cron_service_name="crond"
|
||||
}
|
||||
|
||||
set_minionid() {
|
||||
MINIONID=$(lookup_grain id)
|
||||
}
|
||||
|
||||
set_palette() {
|
||||
if [[ $is_deb ]]; then
|
||||
update-alternatives --set newt-palette /etc/newt/palette.original
|
||||
fi
|
||||
}
|
||||
|
||||
set_version() {
|
||||
CURRENTVERSION=0.0.0
|
||||
|
||||
@@ -32,7 +32,6 @@ container_list() {
|
||||
"so-nginx"
|
||||
"so-pcaptools"
|
||||
"so-soc"
|
||||
"so-steno"
|
||||
"so-suricata"
|
||||
"so-telegraf"
|
||||
"so-zeek"
|
||||
@@ -58,7 +57,6 @@ container_list() {
|
||||
"so-pcaptools"
|
||||
"so-redis"
|
||||
"so-soc"
|
||||
"so-steno"
|
||||
"so-strelka-backend"
|
||||
"so-strelka-manager"
|
||||
"so-suricata"
|
||||
@@ -71,7 +69,6 @@ container_list() {
|
||||
"so-logstash"
|
||||
"so-nginx"
|
||||
"so-redis"
|
||||
"so-steno"
|
||||
"so-suricata"
|
||||
"so-soc"
|
||||
"so-telegraf"
|
||||
|
||||
@@ -131,6 +131,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not configured for GeoIP" # SO does not bundle the maxminddb with Zeek
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|HTTP 404: Not Found" # Salt loops until Kratos returns 200, during startup Kratos may not be ready
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Cancelling deferred write event maybeFenceReplicas because the event queue is now closed" # Kafka controller log during shutdown/restart
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Redis may have been restarted" # Redis likely restarted by salt
|
||||
fi
|
||||
|
||||
if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then
|
||||
@@ -179,7 +180,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|soc.field." # known ingest type collisions issue with earlier versions of SO
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error parsing signature" # Malformed Suricata rule, from upstream provider
|
||||
EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sticky buffer has no matches" # Non-critical Suricata error
|
||||
|
||||
@@ -55,19 +55,22 @@ if [ $SKIP -ne 1 ]; then
|
||||
fi
|
||||
|
||||
delete_pcap() {
|
||||
PCAP_DATA="/nsm/pcap/"
|
||||
[ -d $PCAP_DATA ] && so-pcap-stop && rm -rf $PCAP_DATA/* && so-pcap-start
|
||||
PCAP_DATA="/nsm/suripcap/"
|
||||
[ -d $PCAP_DATA ] && rm -rf $PCAP_DATA/*
|
||||
}
|
||||
delete_suricata() {
|
||||
SURI_LOG="/nsm/suricata/"
|
||||
[ -d $SURI_LOG ] && so-suricata-stop && rm -rf $SURI_LOG/* && so-suricata-start
|
||||
[ -d $SURI_LOG ] && rm -rf $SURI_LOG/*
|
||||
}
|
||||
delete_zeek() {
|
||||
ZEEK_LOG="/nsm/zeek/logs/"
|
||||
[ -d $ZEEK_LOG ] && so-zeek-stop && rm -rf $ZEEK_LOG/* && so-zeek-start
|
||||
}
|
||||
|
||||
so-suricata-stop
|
||||
delete_pcap
|
||||
delete_suricata
|
||||
delete_zeek
|
||||
so-suricata-start
|
||||
|
||||
|
||||
|
||||
@@ -23,7 +23,6 @@ if [ $# -ge 1 ]; then
|
||||
fi
|
||||
|
||||
case $1 in
|
||||
"steno") docker stop so-steno && docker rm so-steno && salt-call state.apply pcap queue=True;;
|
||||
"elastic-fleet") docker stop so-elastic-fleet && docker rm so-elastic-fleet && salt-call state.apply elasticfleet queue=True;;
|
||||
*) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;;
|
||||
esac
|
||||
|
||||
@@ -72,7 +72,7 @@ clean() {
|
||||
done
|
||||
fi
|
||||
|
||||
## Clean up extracted pcaps from Steno
|
||||
## Clean up extracted pcaps
|
||||
PCAPS='/nsm/pcapout'
|
||||
OLDEST_PCAP=$(find $PCAPS -type f -printf '%T+ %p\n' | sort -n | head -n 1)
|
||||
if [ -z "$OLDEST_PCAP" -o "$OLDEST_PCAP" == ".." -o "$OLDEST_PCAP" == "." ]; then
|
||||
|
||||
@@ -23,7 +23,6 @@ if [ $# -ge 1 ]; then
|
||||
|
||||
case $1 in
|
||||
"all") salt-call state.highstate queue=True;;
|
||||
"steno") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply pcap queue=True; fi ;;
|
||||
"elastic-fleet") if docker ps | grep -q so-$1; then printf "\n$1 is already running!\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply elasticfleet queue=True; fi ;;
|
||||
*) if docker ps | grep -E -q '^so-$1$'; then printf "\n$1 is already running\n\n"; else docker rm so-$1 >/dev/null 2>&1 ; salt-call state.apply $1 queue=True; fi ;;
|
||||
esac
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
||||
# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
so-curator:
|
||||
docker_container.absent:
|
||||
- force: True
|
||||
|
||||
so-curator_so-status.disabled:
|
||||
file.line:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
- match: ^so-curator$
|
||||
- mode: delete
|
||||
|
||||
so-curator-cluster-close:
|
||||
cron.absent:
|
||||
- identifier: so-curator-cluster-close
|
||||
|
||||
so-curator-cluster-delete:
|
||||
cron.absent:
|
||||
- identifier: so-curator-cluster-delete
|
||||
|
||||
delete_curator_configuration:
|
||||
file.absent:
|
||||
- name: /opt/so/conf/curator
|
||||
- recurse: True
|
||||
|
||||
{% set files = salt.file.find(path='/usr/sbin', name='so-curator*') %}
|
||||
{% if files|length > 0 %}
|
||||
delete_curator_scripts:
|
||||
file.absent:
|
||||
- names: {{files|yaml}}
|
||||
{% endif %}
|
||||
@@ -1,6 +1,10 @@
|
||||
docker:
|
||||
range: '172.17.1.0/24'
|
||||
gateway: '172.17.1.1'
|
||||
ulimits:
|
||||
- name: nofile
|
||||
soft: 1048576
|
||||
hard: 1048576
|
||||
containers:
|
||||
'so-dockerregistry':
|
||||
final_octet: 20
|
||||
@@ -9,6 +13,7 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-elastic-fleet':
|
||||
final_octet: 21
|
||||
port_bindings:
|
||||
@@ -16,6 +21,7 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-elasticsearch':
|
||||
final_octet: 22
|
||||
port_bindings:
|
||||
@@ -24,6 +30,16 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits:
|
||||
- name: memlock
|
||||
soft: -1
|
||||
hard: -1
|
||||
- name: nofile
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
- name: nproc
|
||||
soft: 4096
|
||||
hard: 4096
|
||||
'so-influxdb':
|
||||
final_octet: 26
|
||||
port_bindings:
|
||||
@@ -31,6 +47,7 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-kibana':
|
||||
final_octet: 27
|
||||
port_bindings:
|
||||
@@ -38,6 +55,7 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-kratos':
|
||||
final_octet: 28
|
||||
port_bindings:
|
||||
@@ -46,6 +64,7 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-hydra':
|
||||
final_octet: 30
|
||||
port_bindings:
|
||||
@@ -54,6 +73,7 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-logstash':
|
||||
final_octet: 29
|
||||
port_bindings:
|
||||
@@ -70,6 +90,7 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-nginx':
|
||||
final_octet: 31
|
||||
port_bindings:
|
||||
@@ -81,6 +102,7 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-nginx-fleet-node':
|
||||
final_octet: 31
|
||||
port_bindings:
|
||||
@@ -88,6 +110,7 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-redis':
|
||||
final_octet: 33
|
||||
port_bindings:
|
||||
@@ -96,11 +119,13 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-sensoroni':
|
||||
final_octet: 99
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-soc':
|
||||
final_octet: 34
|
||||
port_bindings:
|
||||
@@ -108,16 +133,19 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-backend':
|
||||
final_octet: 36
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-filestream':
|
||||
final_octet: 37
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-frontend':
|
||||
final_octet: 38
|
||||
port_bindings:
|
||||
@@ -125,11 +153,13 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-manager':
|
||||
final_octet: 39
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-gatekeeper':
|
||||
final_octet: 40
|
||||
port_bindings:
|
||||
@@ -137,6 +167,7 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-strelka-coordinator':
|
||||
final_octet: 41
|
||||
port_bindings:
|
||||
@@ -144,11 +175,13 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-elastalert':
|
||||
final_octet: 42
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-elastic-fleet-package-registry':
|
||||
final_octet: 44
|
||||
port_bindings:
|
||||
@@ -156,11 +189,13 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-idh':
|
||||
final_octet: 45
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-elastic-agent':
|
||||
final_octet: 46
|
||||
port_bindings:
|
||||
@@ -169,28 +204,28 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-telegraf':
|
||||
final_octet: 99
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
'so-steno':
|
||||
final_octet: 99
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
'so-suricata':
|
||||
final_octet: 99
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits:
|
||||
- memlock=524288000
|
||||
ulimits: []
|
||||
'so-zeek':
|
||||
final_octet: 99
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits:
|
||||
- name: core
|
||||
soft: 0
|
||||
hard: 0
|
||||
'so-kafka':
|
||||
final_octet: 88
|
||||
port_bindings:
|
||||
@@ -201,3 +236,4 @@ docker:
|
||||
custom_bind_mounts: []
|
||||
extra_hosts: []
|
||||
extra_env: []
|
||||
ulimits: []
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
{% import_yaml 'docker/defaults.yaml' as DOCKERDEFAULTS %}
|
||||
{% set DOCKER = salt['pillar.get']('docker', DOCKERDEFAULTS.docker, merge=True) %}
|
||||
{% set RANGESPLIT = DOCKER.range.split('.') %}
|
||||
{% set DOCKERMERGED = salt['pillar.get']('docker', DOCKERDEFAULTS.docker, merge=True) %}
|
||||
{% set RANGESPLIT = DOCKERMERGED.range.split('.') %}
|
||||
{% set FIRSTTHREE = RANGESPLIT[0] ~ '.' ~ RANGESPLIT[1] ~ '.' ~ RANGESPLIT[2] ~ '.' %}
|
||||
|
||||
{% for container, vals in DOCKER.containers.items() %}
|
||||
{% do DOCKER.containers[container].update({'ip': FIRSTTHREE ~ DOCKER.containers[container].final_octet}) %}
|
||||
{% for container, vals in DOCKERMERGED.containers.items() %}
|
||||
{% do DOCKERMERGED.containers[container].update({'ip': FIRSTTHREE ~ DOCKERMERGED.containers[container].final_octet}) %}
|
||||
{% endfor %}
|
||||
|
||||
24
salt/docker/files/daemon.json.jinja
Normal file
24
salt/docker/files/daemon.json.jinja
Normal file
@@ -0,0 +1,24 @@
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED -%}
|
||||
{
|
||||
"registry-mirrors": [
|
||||
"https://:5000"
|
||||
],
|
||||
"bip": "172.17.0.1/24",
|
||||
"default-address-pools": [
|
||||
{
|
||||
"base": "172.17.0.0/24",
|
||||
"size": 24
|
||||
}
|
||||
]
|
||||
{%- if DOCKERMERGED.ulimits %},
|
||||
"default-ulimits": {
|
||||
{%- for ULIMIT in DOCKERMERGED.ulimits %}
|
||||
"{{ ULIMIT.name }}": {
|
||||
"Name": "{{ ULIMIT.name }}",
|
||||
"Soft": {{ ULIMIT.soft }},
|
||||
"Hard": {{ ULIMIT.hard }}
|
||||
}{{ "," if not loop.last else "" }}
|
||||
{%- endfor %}
|
||||
}
|
||||
{%- endif %}
|
||||
}
|
||||
@@ -3,7 +3,7 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
# docker service requires the ca.crt
|
||||
@@ -15,39 +15,6 @@ dockergroup:
|
||||
- name: docker
|
||||
- gid: 920
|
||||
|
||||
{% if GLOBALS.os_family == 'Debian' %}
|
||||
{% if grains.oscodename == 'bookworm' %}
|
||||
dockerheldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 2.2.1-1~debian.12~bookworm
|
||||
- docker-ce: 5:29.2.1-1~debian.12~bookworm
|
||||
- docker-ce-cli: 5:29.2.1-1~debian.12~bookworm
|
||||
- docker-ce-rootless-extras: 5:29.2.1-1~debian.12~bookworm
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% elif grains.oscodename == 'jammy' %}
|
||||
dockerheldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 2.2.1-1~ubuntu.22.04~jammy
|
||||
- docker-ce: 5:29.2.1-1~ubuntu.22.04~jammy
|
||||
- docker-ce-cli: 5:29.2.1-1~ubuntu.22.04~jammy
|
||||
- docker-ce-rootless-extras: 5:29.2.1-1~ubuntu.22.04~jammy
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% else %}
|
||||
dockerheldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
- containerd.io: 1.7.21-1
|
||||
- docker-ce: 5:27.2.0-1~ubuntu.20.04~focal
|
||||
- docker-ce-cli: 5:27.2.0-1~ubuntu.20.04~focal
|
||||
- docker-ce-rootless-extras: 5:27.2.0-1~ubuntu.20.04~focal
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% endif %}
|
||||
{% else %}
|
||||
dockerheldpackages:
|
||||
pkg.installed:
|
||||
- pkgs:
|
||||
@@ -57,7 +24,6 @@ dockerheldpackages:
|
||||
- docker-ce-rootless-extras: 29.2.1-1.el9
|
||||
- hold: True
|
||||
- update_holds: True
|
||||
{% endif %}
|
||||
|
||||
#disable docker from managing iptables
|
||||
iptables_disabled:
|
||||
@@ -75,10 +41,9 @@ dockeretc:
|
||||
file.directory:
|
||||
- name: /etc/docker
|
||||
|
||||
# Manager daemon.json
|
||||
docker_daemon:
|
||||
file.managed:
|
||||
- source: salt://common/files/daemon.json
|
||||
- source: salt://docker/files/daemon.json.jinja
|
||||
- name: /etc/docker/daemon.json
|
||||
- template: jinja
|
||||
|
||||
@@ -109,8 +74,8 @@ dockerreserveports:
|
||||
sos_docker_net:
|
||||
docker_network.present:
|
||||
- name: sobridge
|
||||
- subnet: {{ DOCKER.range }}
|
||||
- gateway: {{ DOCKER.gateway }}
|
||||
- subnet: {{ DOCKERMERGED.range }}
|
||||
- gateway: {{ DOCKERMERGED.gateway }}
|
||||
- options:
|
||||
com.docker.network.bridge.name: 'sobridge'
|
||||
com.docker.network.driver.mtu: '1500'
|
||||
|
||||
@@ -1,44 +1,82 @@
|
||||
docker:
|
||||
gateway:
|
||||
description: Gateway for the default docker interface.
|
||||
helpLink: docker.html
|
||||
helpLink: docker
|
||||
advanced: True
|
||||
range:
|
||||
description: Default docker IP range for containers.
|
||||
helpLink: docker.html
|
||||
helpLink: docker
|
||||
advanced: True
|
||||
ulimits:
|
||||
description: |
|
||||
Default ulimit settings applied to all containers via the Docker daemon. Each entry specifies a resource name (e.g. nofile, memlock, core, nproc) with soft and hard limits. Individual container ulimits override these defaults. Valid resource names include: cpu, fsize, data, stack, core, rss, nproc, nofile, memlock, as, locks, sigpending, msgqueue, nice, rtprio, rttime.
|
||||
forcedType: "[]{}"
|
||||
syntax: json
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
uiElements:
|
||||
- field: name
|
||||
label: Resource Name
|
||||
required: True
|
||||
regex: ^(cpu|fsize|data|stack|core|rss|nproc|nofile|memlock|as|locks|sigpending|msgqueue|nice|rtprio|rttime)$
|
||||
regexFailureMessage: You must enter a valid ulimit name (cpu, fsize, data, stack, core, rss, nproc, nofile, memlock, as, locks, sigpending, msgqueue, nice, rtprio, rttime).
|
||||
- field: soft
|
||||
label: Soft Limit
|
||||
forcedType: int
|
||||
- field: hard
|
||||
label: Hard Limit
|
||||
forcedType: int
|
||||
containers:
|
||||
so-dockerregistry: &dockerOptions
|
||||
final_octet:
|
||||
description: Last octet of the container IP address.
|
||||
helpLink: docker.html
|
||||
helpLink: docker
|
||||
readonly: True
|
||||
advanced: True
|
||||
global: True
|
||||
port_bindings:
|
||||
description: List of port bindings for the container.
|
||||
helpLink: docker.html
|
||||
helpLink: docker
|
||||
advanced: True
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
custom_bind_mounts:
|
||||
description: List of custom local volume bindings.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
helpLink: docker
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
extra_hosts:
|
||||
description: List of additional host entries for the container.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
helpLink: docker
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
extra_env:
|
||||
description: List of additional ENV entries for the container.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
helpLink: docker
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
ulimits:
|
||||
description: |
|
||||
Ulimit settings for the container. Each entry specifies a resource name (e.g. nofile, memlock, core, nproc) with optional soft and hard limits. Valid resource names include: cpu, fsize, data, stack, core, rss, nproc, nofile, memlock, as, locks, sigpending, msgqueue, nice, rtprio, rttime.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
forcedType: "[]{}"
|
||||
syntax: json
|
||||
uiElements:
|
||||
- field: name
|
||||
label: Resource Name
|
||||
required: True
|
||||
regex: ^(cpu|fsize|data|stack|core|rss|nproc|nofile|memlock|as|locks|sigpending|msgqueue|nice|rtprio|rttime)$
|
||||
regexFailureMessage: You must enter a valid ulimit name (cpu, fsize, data, stack, core, rss, nproc, nofile, memlock, as, locks, sigpending, msgqueue, nice, rtprio, rttime).
|
||||
- field: soft
|
||||
label: Soft Limit
|
||||
forcedType: int
|
||||
- field: hard
|
||||
label: Hard Limit
|
||||
forcedType: int
|
||||
so-elastic-fleet: *dockerOptions
|
||||
so-elasticsearch: *dockerOptions
|
||||
so-influxdb: *dockerOptions
|
||||
@@ -62,43 +100,6 @@ docker:
|
||||
so-idh: *dockerOptions
|
||||
so-elastic-agent: *dockerOptions
|
||||
so-telegraf: *dockerOptions
|
||||
so-steno: *dockerOptions
|
||||
so-suricata:
|
||||
final_octet:
|
||||
description: Last octet of the container IP address.
|
||||
helpLink: docker.html
|
||||
readonly: True
|
||||
advanced: True
|
||||
global: True
|
||||
port_bindings:
|
||||
description: List of port bindings for the container.
|
||||
helpLink: docker.html
|
||||
advanced: True
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
custom_bind_mounts:
|
||||
description: List of custom local volume bindings.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
extra_hosts:
|
||||
description: List of additional host entries for the container.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
extra_env:
|
||||
description: List of additional ENV entries for the container.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
ulimits:
|
||||
description: Ulimits for the container, in bytes.
|
||||
advanced: True
|
||||
helpLink: docker.html
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
so-suricata: *dockerOptions
|
||||
so-zeek: *dockerOptions
|
||||
so-kafka: *dockerOptions
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
|
||||
include:
|
||||
- elastalert.config
|
||||
@@ -24,7 +24,7 @@ so-elastalert:
|
||||
- user: so-elastalert
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elastalert'].ip }}
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-elastalert'].ip }}
|
||||
- detach: True
|
||||
- binds:
|
||||
- /opt/so/rules/elastalert:/opt/elastalert/rules/:ro
|
||||
@@ -33,24 +33,30 @@ so-elastalert:
|
||||
- /opt/so/conf/elastalert/predefined/:/opt/elastalert/predefined/:ro
|
||||
- /opt/so/conf/elastalert/custom/:/opt/elastalert/custom/:ro
|
||||
- /opt/so/conf/elastalert/elastalert_config.yaml:/opt/elastalert/config.yaml:ro
|
||||
{% if DOCKER.containers['so-elastalert'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-elastalert'].custom_bind_mounts %}
|
||||
{% if DOCKERMERGED.containers['so-elastalert'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-elastalert'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
{% if DOCKER.containers['so-elastalert'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-elastalert'].extra_hosts %}
|
||||
{% if DOCKERMERGED.containers['so-elastalert'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-elastalert'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-elastalert'].extra_env %}
|
||||
{% if DOCKERMERGED.containers['so-elastalert'].extra_env %}
|
||||
- environment:
|
||||
{% for XTRAENV in DOCKER.containers['so-elastalert'].extra_env %}
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-elastalert'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-elastalert'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-elastalert'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- require:
|
||||
- cmd: wait_for_elasticsearch
|
||||
- file: elastarules
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
THIS IS A PLACEHOLDER FILE
|
||||
@@ -1,47 +1,48 @@
|
||||
elastalert:
|
||||
enabled:
|
||||
description: Enables or disables the ElastAlert 2 process. This process is critical for ensuring alerts arrive in SOC, and for outbound notification delivery.
|
||||
helpLink: elastalert.html
|
||||
forcedType: bool
|
||||
helpLink: elastalert
|
||||
alerter_parameters:
|
||||
title: Custom Configuration Parameters
|
||||
description: Optional configuration parameters made available as defaults for all rules and alerters. Use YAML format for these parameters, and reference the ElastAlert 2 documentation, located at https://elastalert2.readthedocs.io, for available configuration parameters. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
multiline: True
|
||||
syntax: yaml
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
forcedType: string
|
||||
jira_api_key:
|
||||
title: Jira API Key
|
||||
description: Optional configuration parameter for Jira API Key, used instead of the Jira username and password. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
sensitive: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
forcedType: string
|
||||
jira_pass:
|
||||
title: Jira Password
|
||||
description: Optional configuration parameter for Jira password. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
sensitive: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
forcedType: string
|
||||
jira_user:
|
||||
title: Jira Username
|
||||
description: Optional configuration parameter for Jira username. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
forcedType: string
|
||||
smtp_pass:
|
||||
title: SMTP Password
|
||||
description: Optional configuration parameter for SMTP password, required for authenticating email servers. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
sensitive: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
forcedType: string
|
||||
smtp_user:
|
||||
title: SMTP Username
|
||||
description: Optional configuration parameter for SMTP username, required for authenticating email servers. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
forcedType: string
|
||||
files:
|
||||
custom:
|
||||
@@ -49,91 +50,131 @@ elastalert:
|
||||
description: Optional custom Certificate Authority for connecting to an AlertManager server. To utilize this custom file, the alertmanager_ca_certs key must be set to /opt/elastalert/custom/alertmanager_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
file: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
gelf_ca__crt:
|
||||
description: Optional custom Certificate Authority for connecting to a Graylog server. To utilize this custom file, the graylog_ca_certs key must be set to /opt/elastalert/custom/graylog_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
file: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
http_post_ca__crt:
|
||||
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the legacy HTTP POST alerter. To utilize this custom file, the http_post_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
file: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
http_post2_ca__crt:
|
||||
description: Optional custom Certificate Authority for connecting to a generic HTTP server, via the newer HTTP POST 2 alerter. To utilize this custom file, the http_post2_ca_certs key must be set to /opt/elastalert/custom/http_post2_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
file: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
ms_teams_ca__crt:
|
||||
description: Optional custom Certificate Authority for connecting to Microsoft Teams server. To utilize this custom file, the ms_teams_ca_certs key must be set to /opt/elastalert/custom/ms_teams_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
file: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
pagerduty_ca__crt:
|
||||
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the pagerduty_ca_certs key must be set to /opt/elastalert/custom/pagerduty_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
file: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
rocket_chat_ca__crt:
|
||||
description: Optional custom Certificate Authority for connecting to PagerDuty server. To utilize this custom file, the rocket_chart_ca_certs key must be set to /opt/elastalert/custom/rocket_chat_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
file: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
smtp__crt:
|
||||
description: Optional custom certificate for connecting to an SMTP server. To utilize this custom file, the smtp_cert_file key must be set to /opt/elastalert/custom/smtp.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
file: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
smtp__key:
|
||||
description: Optional custom certificate key for connecting to an SMTP server. To utilize this custom file, the smtp_key_file key must be set to /opt/elastalert/custom/smtp.key in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
file: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
slack_ca__crt:
|
||||
description: Optional custom Certificate Authority for connecting to Slack. To utilize this custom file, the slack_ca_certs key must be set to /opt/elastalert/custom/slack_ca.crt in the Alerter Parameters setting. Requires a valid Security Onion license key.
|
||||
global: True
|
||||
file: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
config:
|
||||
scan_subdirectories:
|
||||
description: Recursively scan subdirectories for rules.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastalert
|
||||
disable_rules_on_error:
|
||||
description: Disable rules on failure.
|
||||
forcedType: bool
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
run_every:
|
||||
minutes:
|
||||
description: Amount of time in minutes between searches.
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
buffer_time:
|
||||
minutes:
|
||||
description: Amount of time in minutes to look through.
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
old_query_limit:
|
||||
minutes:
|
||||
description: Amount of time in minutes between queries to start at the most recently run query.
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
es_conn_timeout:
|
||||
description: Timeout in seconds for connecting to and reading from Elasticsearch.
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
max_query_size:
|
||||
description: The maximum number of documents that will be returned from Elasticsearch in a single query.
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
use_ssl:
|
||||
description: Use SSL to connect to Elasticsearch.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastalert
|
||||
verify_certs:
|
||||
description: Verify TLS certificates when connecting to Elasticsearch.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastalert
|
||||
alert_time_limit:
|
||||
days:
|
||||
description: The retry window for failed alerts.
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
index_settings:
|
||||
shards:
|
||||
description: The number of shards for elastalert indices.
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
replicas:
|
||||
description: The number of replicas for elastalert indices.
|
||||
global: True
|
||||
helpLink: elastalert.html
|
||||
helpLink: elastalert
|
||||
logging:
|
||||
incremental:
|
||||
description: When incremental is false (the default), the logging configuration is applied in full, replacing any existing logging setup. When true, only the level attributes of existing loggers and handlers are updated, leaving the rest of the logging configuration unchanged.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastalert
|
||||
disable_existing_loggers:
|
||||
description: Disable existing loggers.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastalert
|
||||
loggers:
|
||||
'':
|
||||
propagate:
|
||||
description: Propagate log messages to parent loggers.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastalert
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
|
||||
include:
|
||||
- elastic-fleet-package-registry.config
|
||||
@@ -21,30 +21,36 @@ so-elastic-fleet-package-registry:
|
||||
- user: 948
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet-package-registry'].ip }}
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-elastic-fleet-package-registry'].ip }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
|
||||
{% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-elastic-fleet-package-registry'].extra_hosts %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-fleet-package-registry'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-elastic-fleet-package-registry'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-elastic-fleet-package-registry'].port_bindings %}
|
||||
{% for BINDING in DOCKERMERGED.containers['so-elastic-fleet-package-registry'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
{% if DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
|
||||
- binds:
|
||||
{% for BIND in DOCKER.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-elastic-fleet-package-registry'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-fleet-package-registry'].extra_env %}
|
||||
- environment:
|
||||
{% for XTRAENV in DOCKER.containers['so-elastic-fleet-package-registry'].extra_env %}
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-elastic-fleet-package-registry'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-fleet-package-registry'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-elastic-fleet-package-registry'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
delete_so-elastic-fleet-package-registry_so-status.disabled:
|
||||
file.uncomment:
|
||||
- name: /opt/so/conf/so-status/so-status.conf
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
elastic_fleet_package_registry:
|
||||
enabled:
|
||||
description: Enables or disables the Fleet package registry process. This process must remain enabled to allow Elastic Agent packages to be updated.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
|
||||
include:
|
||||
- ca
|
||||
@@ -22,17 +22,17 @@ so-elastic-agent:
|
||||
- user: 949
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elastic-agent'].ip }}
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-elastic-agent'].ip }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
|
||||
{% if DOCKER.containers['so-elastic-agent'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-elastic-agent'].extra_hosts %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-agent'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-elastic-agent'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-elastic-agent'].port_bindings %}
|
||||
{% for BINDING in DOCKERMERGED.containers['so-elastic-agent'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- binds:
|
||||
@@ -41,19 +41,25 @@ so-elastic-agent:
|
||||
- /etc/pki/tls/certs/intca.crt:/etc/pki/tls/certs/intca.crt:ro
|
||||
- /nsm:/nsm:ro
|
||||
- /opt/so/log:/opt/so/log:ro
|
||||
{% if DOCKER.containers['so-elastic-agent'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-elastic-agent'].custom_bind_mounts %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-agent'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-elastic-agent'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- environment:
|
||||
- FLEET_CA=/etc/pki/tls/certs/intca.crt
|
||||
- LOGS_PATH=logs
|
||||
{% if DOCKER.containers['so-elastic-agent'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-elastic-agent'].extra_env %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-agent'].extra_env %}
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-elastic-agent'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-agent'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-elastic-agent'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- require:
|
||||
- file: create-elastic-agent-config
|
||||
- file: trusttheca
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
elasticagent:
|
||||
enabled:
|
||||
description: Enables or disables the Elastic Agent process. This process must remain enabled to allow collection of node events.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %}
|
||||
|
||||
{# This value is generated during node install and stored in minion pillar #}
|
||||
@@ -94,17 +94,17 @@ so-elastic-fleet:
|
||||
- user: 947
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elastic-fleet'].ip }}
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-elastic-fleet'].ip }}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
- {{ GLOBALS.hostname }}:{{ GLOBALS.node_ip }}
|
||||
{% if DOCKER.containers['so-elastic-fleet'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-elastic-fleet'].extra_hosts %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-fleet'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-elastic-fleet'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-elastic-fleet'].port_bindings %}
|
||||
{% for BINDING in DOCKERMERGED.containers['so-elastic-fleet'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- binds:
|
||||
@@ -112,8 +112,8 @@ so-elastic-fleet:
|
||||
- /etc/pki/elasticfleet-server.key:/etc/pki/elasticfleet-server.key:ro
|
||||
- /etc/pki/tls/certs/intca.crt:/etc/pki/tls/certs/intca.crt:ro
|
||||
- /opt/so/log/elasticfleet:/usr/share/elastic-agent/logs
|
||||
{% if DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-elastic-fleet'].custom_bind_mounts %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-fleet'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-elastic-fleet'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -128,11 +128,17 @@ so-elastic-fleet:
|
||||
- FLEET_CA=/etc/pki/tls/certs/intca.crt
|
||||
- FLEET_SERVER_ELASTICSEARCH_CA=/etc/pki/tls/certs/intca.crt
|
||||
- LOGS_PATH=logs
|
||||
{% if DOCKER.containers['so-elastic-fleet'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-elastic-fleet'].extra_env %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-fleet'].extra_env %}
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-elastic-fleet'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-elastic-fleet'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-elastic-fleet'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: trusttheca
|
||||
- x509: etc_elasticfleet_key
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
"package": {
|
||||
"name": "endpoint",
|
||||
"title": "Elastic Defend",
|
||||
"version": "9.0.2",
|
||||
"version": "9.3.0",
|
||||
"requires_root": true
|
||||
},
|
||||
"enabled": true,
|
||||
|
||||
@@ -6,21 +6,23 @@
|
||||
"name": "agent-monitor",
|
||||
"namespace": "",
|
||||
"description": "",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"output_id": null,
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/agents/agent-monitor.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "agentmonitor",
|
||||
"pipeline": "elasticagent.monitor",
|
||||
"parsers": "",
|
||||
@@ -34,15 +36,16 @@
|
||||
"ignore_older": "72h",
|
||||
"clean_inactive": -1,
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": true,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": 64,
|
||||
"file_identity_native": false,
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"force": true
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "hydra-logs",
|
||||
"namespace": "so",
|
||||
"description": "Hydra logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/hydra/hydra.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "hydra",
|
||||
"pipeline": "hydra",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -34,10 +40,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "idh-logs",
|
||||
"namespace": "so",
|
||||
"description": "IDH integration",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/idh/opencanary.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "idh",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -31,10 +37,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,26 +4,32 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-evtx-logs",
|
||||
"namespace": "so",
|
||||
"description": "Import Windows EVTX logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/evtx/*.json"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "import",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
"\\.gz$"
|
||||
],
|
||||
"include_files": [],
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.6.1\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.1.2\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.6.1\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.6.1\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.1.2\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"processors": "- dissect:\n tokenizer: \"/nsm/import/%{import.id}/evtx/%{import.file}\"\n field: \"log.file.path\"\n target_prefix: \"\"\n- decode_json_fields:\n fields: [\"message\"]\n target: \"\"\n- drop_fields:\n fields: [\"host\"]\n ignore_missing: true\n- add_fields:\n target: data_stream\n fields:\n type: logs\n dataset: system.security\n- add_fields:\n target: event\n fields:\n dataset: system.security\n module: system\n imported: true\n- add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.security-2.13.0\n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-Sysmon/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.sysmon_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.sysmon_operational\n module: windows\n imported: true\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.sysmon_operational-3.6.0\n- if:\n equals:\n winlog.channel: 'Application'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.application\n - add_fields:\n target: event\n fields:\n dataset: system.application\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.application-2.13.0\n- if:\n equals:\n winlog.channel: 'System'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: system.system\n - add_fields:\n target: event\n fields:\n dataset: system.system\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-system.system-2.13.0\n \n- if:\n equals:\n winlog.channel: 'Microsoft-Windows-PowerShell/Operational'\n then: \n - add_fields:\n target: data_stream\n fields:\n dataset: windows.powershell_operational\n - add_fields:\n target: event\n fields:\n dataset: windows.powershell_operational\n module: windows\n - add_fields:\n target: \"@metadata\"\n fields:\n pipeline: logs-windows.powershell_operational-3.6.0\n- add_fields:\n target: data_stream\n fields:\n dataset: import",
|
||||
"tags": [
|
||||
"import"
|
||||
],
|
||||
@@ -33,10 +39,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "import-suricata-logs",
|
||||
"namespace": "so",
|
||||
"description": "Import Suricata logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/import/*/suricata/eve*.json"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "import",
|
||||
"pipeline": "suricata.common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -32,10 +38,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,14 +4,18 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "rita-logs",
|
||||
"namespace": "so",
|
||||
"description": "RITA Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
@@ -19,6 +23,8 @@
|
||||
"/nsm/rita/exploded-dns.csv",
|
||||
"/nsm/rita/long-connections.csv"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "rita",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
@@ -33,10 +39,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "so-ip-mappings",
|
||||
"namespace": "so",
|
||||
"description": "IP Description mappings",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/custom-mappings/ip-descriptions.csv"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "hostnamemappings",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
"exclude_files": [
|
||||
@@ -32,10 +38,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-auth-sync-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Elastic Auth Sync - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sync.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -31,10 +37,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,20 +4,26 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-detections-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion Console - Detections Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/detections_runtime-status_sigma.log",
|
||||
"/opt/so/log/soc/detections_runtime-status_yara.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -35,10 +41,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-salt-relay-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Salt Relay - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/salt-relay.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -33,10 +39,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-sensoroni-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion - Sensoroni - Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/sensoroni/sensoroni.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -31,10 +37,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "soc-server-logs",
|
||||
"namespace": "so",
|
||||
"description": "Security Onion Console Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/opt/so/log/soc/sensoroni-server.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "soc",
|
||||
"pipeline": "common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -33,10 +39,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "strelka-logs",
|
||||
"namespace": "so",
|
||||
"description": "Strelka Logs",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/strelka/log/strelka.log"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "strelka",
|
||||
"pipeline": "strelka.file",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -31,10 +37,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,19 +4,25 @@
|
||||
"version": ""
|
||||
},
|
||||
"name": "suricata-logs",
|
||||
"namespace": "so",
|
||||
"description": "Suricata integration",
|
||||
"policy_id": "so-grid-nodes_general",
|
||||
"namespace": "so",
|
||||
"policy_ids": [
|
||||
"so-grid-nodes_general"
|
||||
],
|
||||
"vars": {},
|
||||
"inputs": {
|
||||
"filestream-filestream": {
|
||||
"enabled": true,
|
||||
"streams": {
|
||||
"filestream.generic": {
|
||||
"filestream.filestream": {
|
||||
"enabled": true,
|
||||
"vars": {
|
||||
"paths": [
|
||||
"/nsm/suricata/eve*.json"
|
||||
],
|
||||
"compression_gzip": false,
|
||||
"use_logs_stream": false,
|
||||
"data_stream.dataset": "suricata",
|
||||
"pipeline": "suricata.common",
|
||||
"parsers": "#- ndjson:\n# target: \"\"\n# message_key: msg\n#- multiline:\n# type: count\n# count_lines: 3\n",
|
||||
@@ -31,10 +37,10 @@
|
||||
"harvester_limit": 0,
|
||||
"fingerprint": false,
|
||||
"fingerprint_offset": 0,
|
||||
"fingerprint_length": "64",
|
||||
"file_identity_native": true,
|
||||
"exclude_lines": [],
|
||||
"include_lines": []
|
||||
"include_lines": [],
|
||||
"delete_enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,15 @@
|
||||
elasticfleet:
|
||||
enabled:
|
||||
description: Enables or disables the Elastic Fleet process. This process is critical for managing Elastic Agents.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
enable_manager_output:
|
||||
description: Setting this option to False should only be considered if there is at least one receiver node in the grid. If True, Elastic Agent will send events to the manager and receivers. If False, events will only be send to the receivers.
|
||||
advanced: True
|
||||
global: True
|
||||
forcedType: bool
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
files:
|
||||
soc:
|
||||
elastic-defend-disabled-filters__yaml:
|
||||
@@ -17,7 +18,7 @@ elasticfleet:
|
||||
syntax: yaml
|
||||
file: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
elastic-defend-custom-filters__yaml:
|
||||
title: Custom Elastic Defend filters
|
||||
@@ -25,31 +26,32 @@ elasticfleet:
|
||||
syntax: yaml
|
||||
file: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
logging:
|
||||
zeek:
|
||||
excluded:
|
||||
description: This is a list of Zeek logs that are excluded from being shipped through the data processing pipeline. If you remove a log from this list, Elastic Agent will attempt to process it. If an ingest node pipeline is not available to process the logs, you may experience errors.
|
||||
forcedType: "[]string"
|
||||
helpLink: zeek.html
|
||||
helpLink: zeek
|
||||
config:
|
||||
defend_filters:
|
||||
enable_auto_configuration:
|
||||
description: Enable auto-configuration and management of the Elastic Defend Exclusion filters.
|
||||
forcedType: bool
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
subscription_integrations:
|
||||
description: Enable the installation of integrations that require an Elastic license.
|
||||
global: True
|
||||
forcedType: bool
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
auto_upgrade_integrations:
|
||||
description: Enables or disables automatically upgrading Elastic Agent integrations.
|
||||
global: True
|
||||
forcedType: bool
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
outputs:
|
||||
logstash:
|
||||
bulk_max_size:
|
||||
@@ -57,67 +59,68 @@ elasticfleet:
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
worker:
|
||||
description: The number of workers per configured host publishing events.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: true
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
queue_mem_events:
|
||||
title: queued events
|
||||
description: The number of events the queue can store. This value should be evenly divisible by the smaller of 'bulk_max_size' to avoid sending partial batches to the output.
|
||||
global: True
|
||||
forcedType: int
|
||||
advanced: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
timeout:
|
||||
description: The number of seconds to wait for responses from the Logstash server before timing out. Eg 30s
|
||||
regex: ^[0-9]+s$
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
loadbalance:
|
||||
description: If true and multiple Logstash hosts are configured, the output plugin load balances published events onto all Logstash hosts. If false, the output plugin sends all events to one host (determined at random) and switches to another host if the selected one becomes unresponsive.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
compression_level:
|
||||
description: The gzip compression level. The compression level must be in the range of 1 (best speed) to 9 (best compression).
|
||||
regex: ^[1-9]$
|
||||
forcedType: int
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
server:
|
||||
custom_fqdn:
|
||||
description: Custom FQDN for Agents to connect to. One per line.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
forcedType: "[]string"
|
||||
enable_auto_configuration:
|
||||
description: Enable auto-configuration of Logstash Outputs & Fleet Host URLs.
|
||||
forcedType: bool
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
endpoints_enrollment:
|
||||
description: Endpoint enrollment key.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
sensitive: True
|
||||
advanced: True
|
||||
es_token:
|
||||
description: Elastic auth token.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
sensitive: True
|
||||
advanced: True
|
||||
grid_enrollment:
|
||||
description: Grid enrollment key.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
sensitive: True
|
||||
advanced: True
|
||||
optional_integrations:
|
||||
@@ -125,57 +128,57 @@ elasticfleet:
|
||||
enabled_nodes:
|
||||
description: Fleet nodes with the Sublime Platform integration enabled. Enter one per line.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
forcedType: "[]string"
|
||||
api_key:
|
||||
description: API key for Sublime Platform.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
forcedType: string
|
||||
sensitive: True
|
||||
base_url:
|
||||
description: Base URL for Sublime Platform.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
forcedType: string
|
||||
poll_interval:
|
||||
description: Poll interval for alerts from Sublime Platform.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
forcedType: string
|
||||
limit:
|
||||
description: The maximum number of message groups to return from Sublime Platform.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
forcedType: int
|
||||
kismet:
|
||||
base_url:
|
||||
description: Base URL for Kismet.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
forcedType: string
|
||||
poll_interval:
|
||||
description: Poll interval for wireless device data from Kismet. Integration is currently configured to return devices seen as active by any Kismet sensor within the last 10 minutes.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
forcedType: string
|
||||
api_key:
|
||||
description: API key for Kismet.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
forcedType: string
|
||||
sensitive: True
|
||||
enabled_nodes:
|
||||
description: Fleet nodes with the Kismet integration enabled. Enter one per line.
|
||||
global: True
|
||||
helpLink: elastic-fleet.html
|
||||
helpLink: elastic-fleet
|
||||
advanced: True
|
||||
forcedType: "[]string"
|
||||
|
||||
@@ -6,8 +6,6 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'elasticsearch/defaults.yaml' as ELASTICSEARCHDEFAULTS with context %}
|
||||
|
||||
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
|
||||
|
||||
{# this is a list of dicts containing hostname:ip for elasticsearch nodes that need to know about each other for cluster #}
|
||||
{% set ELASTICSEARCH_SEED_HOSTS = [] %}
|
||||
{% set node_data = salt['pillar.get']('elasticsearch:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
|
||||
@@ -36,14 +34,8 @@
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% elif grains.id.split('_') | last == 'searchnode' %}
|
||||
{% if HIGHLANDER %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.roles.extend(['ml', 'master', 'transform']) %}
|
||||
{% endif %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': [GLOBALS.manager]}}) %}
|
||||
{% endif %}
|
||||
{% if HIGHLANDER %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.xpack.ml.update({'enabled': true}) %}
|
||||
{% endif %}
|
||||
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'name': GLOBALS.hostname}) %}
|
||||
{% do ELASTICSEARCHDEFAULTS.elasticsearch.config.cluster.update({'name': GLOBALS.hostname}) %}
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
|
||||
vm.max_map_count:
|
||||
sysctl.present:
|
||||
- value: 262144
|
||||
- value: {{ ELASTICSEARCHMERGED.vm.max_map_count }}
|
||||
|
||||
# Add ES Group
|
||||
elasticsearchgroup:
|
||||
@@ -98,10 +98,6 @@ esrolesdir:
|
||||
- group: 939
|
||||
- makedirs: True
|
||||
|
||||
eslibdir:
|
||||
file.absent:
|
||||
- name: /opt/so/conf/elasticsearch/lib
|
||||
|
||||
esingestdynamicconf:
|
||||
file.recurse:
|
||||
- name: /opt/so/conf/elasticsearch/ingest
|
||||
@@ -119,11 +115,6 @@ esingestconf:
|
||||
- group: 939
|
||||
- show_changes: False
|
||||
|
||||
# Remove .fleet_final_pipeline-1 because we are using global@custom now
|
||||
so-fleet-final-pipeline-remove:
|
||||
file.absent:
|
||||
- name: /opt/so/conf/elasticsearch/ingest/.fleet_final_pipeline-1
|
||||
|
||||
# Auto-generate Elasticsearch ingest node pipelines from pillar
|
||||
{% for pipeline, config in ELASTICSEARCHMERGED.pipelines.items() %}
|
||||
es_ingest_conf_{{pipeline}}:
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
elasticsearch:
|
||||
enabled: false
|
||||
version: 9.0.8
|
||||
version: 9.3.2
|
||||
index_clean: true
|
||||
vm:
|
||||
max_map_count: 1048576
|
||||
config:
|
||||
action:
|
||||
destructive_requires_name: true
|
||||
@@ -117,7 +119,7 @@ elasticsearch:
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-case*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -129,8 +131,6 @@ elasticsearch:
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-case-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
@@ -141,14 +141,7 @@ elasticsearch:
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-common:
|
||||
close: 30
|
||||
delete: 365
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
@@ -212,7 +205,9 @@ elasticsearch:
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- winlog-mappings
|
||||
data_stream: {}
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-*-so*
|
||||
@@ -272,7 +267,7 @@ elasticsearch:
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-detection*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -284,8 +279,6 @@ elasticsearch:
|
||||
match_mapping_type: string
|
||||
settings:
|
||||
index:
|
||||
lifecycle:
|
||||
name: so-detection-logs
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
@@ -296,11 +289,6 @@ elasticsearch:
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
sos-backup:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -460,7 +448,7 @@ elasticsearch:
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- endgame*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -508,8 +496,6 @@ elasticsearch:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-idh:
|
||||
close: 30
|
||||
delete: 365
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
@@ -564,10 +550,13 @@ elasticsearch:
|
||||
- dtc-user_agent-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-idh-*
|
||||
priority: 500
|
||||
- logs-idh-so*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -677,11 +666,13 @@ elasticsearch:
|
||||
- common-dynamic-mappings
|
||||
- winlog-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-import-so*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -736,7 +727,7 @@ elasticsearch:
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- so-ip*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -751,19 +742,12 @@ elasticsearch:
|
||||
mapping:
|
||||
total_fields:
|
||||
limit: 1500
|
||||
lifecycle:
|
||||
name: so-ip-mappings-logs
|
||||
number_of_replicas: 0
|
||||
number_of_shards: 1
|
||||
refresh_interval: 30s
|
||||
sort:
|
||||
field: '@timestamp'
|
||||
order: desc
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
actions: {}
|
||||
min_age: 0ms
|
||||
so-items:
|
||||
index_sorting: false
|
||||
index_template:
|
||||
@@ -772,7 +756,7 @@ elasticsearch:
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- .items-default-**
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -851,8 +835,6 @@ elasticsearch:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-kratos:
|
||||
close: 30
|
||||
delete: 365
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
@@ -873,7 +855,7 @@ elasticsearch:
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-kratos-so*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -921,8 +903,6 @@ elasticsearch:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-hydra:
|
||||
close: 30
|
||||
delete: 365
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
@@ -983,7 +963,7 @@ elasticsearch:
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-hydra-so*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -1038,7 +1018,7 @@ elasticsearch:
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- .lists-default-**
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -1524,6 +1504,9 @@ elasticsearch:
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates:
|
||||
- logs-elastic_agent.cloudbeat@custom
|
||||
index_patterns:
|
||||
@@ -1759,6 +1742,9 @@ elasticsearch:
|
||||
- so-fleet_integrations.ip_mappings-1
|
||||
- so-fleet_globals-1
|
||||
- so-fleet_agent_id_verification-1
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates:
|
||||
- logs-elastic_agent.heartbeat@custom
|
||||
index_patterns:
|
||||
@@ -3018,8 +3004,6 @@ elasticsearch:
|
||||
priority: 50
|
||||
min_age: 30d
|
||||
so-logs-soc:
|
||||
close: 30
|
||||
delete: 365
|
||||
index_sorting: false
|
||||
index_template:
|
||||
composed_of:
|
||||
@@ -3074,11 +3058,13 @@ elasticsearch:
|
||||
- dtc-user_agent-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
data_stream: {}
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-soc-so*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -3668,10 +3654,13 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-logstash-default*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -3969,10 +3958,13 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-redis-default*
|
||||
priority: 500
|
||||
- logs-redis.log*
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -4083,11 +4075,13 @@ elasticsearch:
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-strelka-so*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -4197,11 +4191,13 @@ elasticsearch:
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-suricata-so*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -4311,11 +4307,13 @@ elasticsearch:
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-suricata.alerts-*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -4425,11 +4423,13 @@ elasticsearch:
|
||||
- vulnerability-mappings
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
data_stream: {}
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-syslog-so*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
@@ -4541,11 +4541,13 @@ elasticsearch:
|
||||
- common-settings
|
||||
- common-dynamic-mappings
|
||||
- hash-mappings
|
||||
data_stream: {}
|
||||
data_stream:
|
||||
allow_custom_routing: false
|
||||
hidden: false
|
||||
ignore_missing_component_templates: []
|
||||
index_patterns:
|
||||
- logs-zeek-so*
|
||||
priority: 500
|
||||
priority: 501
|
||||
template:
|
||||
mappings:
|
||||
date_detection: false
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_NODES %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCH_SEED_HOSTS %}
|
||||
{% from 'elasticsearch/config.map.jinja' import ELASTICSEARCHMERGED %}
|
||||
@@ -28,15 +28,15 @@ so-elasticsearch:
|
||||
- user: elasticsearch
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-elasticsearch'].ip }}
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-elasticsearch'].ip }}
|
||||
- extra_hosts:
|
||||
{% for node in ELASTICSEARCH_NODES %}
|
||||
{% for hostname, ip in node.items() %}
|
||||
- {{hostname}}:{{ip}}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
||||
{% if DOCKER.containers['so-elasticsearch'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-elasticsearch'].extra_hosts %}
|
||||
{% if DOCKERMERGED.containers['so-elasticsearch'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-elasticsearch'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -45,17 +45,19 @@ so-elasticsearch:
|
||||
- discovery.type=single-node
|
||||
{% endif %}
|
||||
- ES_JAVA_OPTS=-Xms{{ GLOBALS.elasticsearch.es_heap }} -Xmx{{ GLOBALS.elasticsearch.es_heap }} -Des.transport.cname_in_publish_address=true -Dlog4j2.formatMsgNoLookups=true
|
||||
ulimits:
|
||||
- memlock=-1:-1
|
||||
- nofile=65536:65536
|
||||
- nproc=4096
|
||||
{% if DOCKER.containers['so-elasticsearch'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-elasticsearch'].extra_env %}
|
||||
{% if DOCKERMERGED.containers['so-elasticsearch'].extra_env %}
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-elasticsearch'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-elasticsearch'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-elasticsearch'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-elasticsearch'].port_bindings %}
|
||||
{% for BINDING in DOCKERMERGED.containers['so-elasticsearch'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- binds:
|
||||
@@ -75,8 +77,8 @@ so-elasticsearch:
|
||||
- {{ repo }}:{{ repo }}:rw
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-elasticsearch'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-elasticsearch'].custom_bind_mounts %}
|
||||
{% if DOCKERMERGED.containers['so-elasticsearch'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-elasticsearch'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
0
salt/elasticsearch/files/ingest-dynamic/.gitkeep
Normal file
0
salt/elasticsearch/files/ingest-dynamic/.gitkeep
Normal file
@@ -1,5 +1,3 @@
|
||||
{%- set HIGHLANDER = salt['pillar.get']('global:highlander', False) -%}
|
||||
{%- raw -%}
|
||||
{
|
||||
"description" : "common",
|
||||
"processors" : [
|
||||
@@ -67,19 +65,7 @@
|
||||
{ "append": { "if": "ctx.dataset_tag_temp != null", "field": "tags", "value": "{{dataset_tag_temp.1}}" } },
|
||||
{ "grok": { "if": "ctx.http?.response?.status_code != null", "field": "http.response.status_code", "patterns": ["%{NUMBER:http.response.status_code:long} %{GREEDYDATA}"]} },
|
||||
{ "set": { "if": "ctx?.metadata?.kafka != null" , "field": "kafka.id", "value": "{{metadata.kafka.partition}}{{metadata.kafka.offset}}{{metadata.kafka.timestamp}}", "ignore_failure": true } },
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } }
|
||||
{%- endraw %}
|
||||
{%- if HIGHLANDER %}
|
||||
,
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "ecs"
|
||||
}
|
||||
}
|
||||
{%- endif %}
|
||||
{%- raw %}
|
||||
,
|
||||
{ "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "dataset_tag_temp", "event.dataset_temp" ], "ignore_missing": true, "ignore_failure": true } },
|
||||
{ "pipeline": { "name": "global@custom", "ignore_missing_pipeline": true, "description": "[Fleet] Global pipeline for all data streams" } }
|
||||
]
|
||||
}
|
||||
{% endraw %}
|
||||
@@ -10,24 +10,28 @@
|
||||
"processors": [
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_ecs_version_f5923549",
|
||||
"field": "ecs.version",
|
||||
"value": "8.17.0"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_observer_vendor_ad9d35cc",
|
||||
"field": "observer.vendor",
|
||||
"value": "netgate"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_observer_type_5dddf3ba",
|
||||
"field": "observer.type",
|
||||
"value": "firewall"
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"tag": "rename_message_to_event_original_56a77271",
|
||||
"field": "message",
|
||||
"target_field": "event.original",
|
||||
"ignore_missing": true,
|
||||
@@ -36,12 +40,14 @@
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_event_kind_de80643c",
|
||||
"field": "event.kind",
|
||||
"value": "event"
|
||||
}
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_event_timezone_4ca44cac",
|
||||
"field": "event.timezone",
|
||||
"value": "{{{_tmp.tz_offset}}}",
|
||||
"if": "ctx._tmp?.tz_offset != null && ctx._tmp?.tz_offset != 'local'"
|
||||
@@ -49,6 +55,7 @@
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"tag": "grok_event_original_27d9c8c7",
|
||||
"description": "Parse syslog header",
|
||||
"field": "event.original",
|
||||
"patterns": [
|
||||
@@ -72,6 +79,7 @@
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"tag": "date__tmp_timestamp8601_to_timestamp_6ac9d3ce",
|
||||
"if": "ctx._tmp.timestamp8601 != null",
|
||||
"field": "_tmp.timestamp8601",
|
||||
"target_field": "@timestamp",
|
||||
@@ -82,6 +90,7 @@
|
||||
},
|
||||
{
|
||||
"date": {
|
||||
"tag": "date__tmp_timestamp_to_timestamp_f21e536e",
|
||||
"if": "ctx.event?.timezone != null && ctx._tmp?.timestamp != null",
|
||||
"field": "_tmp.timestamp",
|
||||
"target_field": "@timestamp",
|
||||
@@ -95,6 +104,7 @@
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"tag": "grok_process_name_cef3d489",
|
||||
"description": "Set Event Provider",
|
||||
"field": "process.name",
|
||||
"patterns": [
|
||||
@@ -107,71 +117,83 @@
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.1-firewall",
|
||||
"tag": "pipeline_e16851a7",
|
||||
"name": "logs-pfsense.log-1.25.1-firewall",
|
||||
"if": "ctx.event.provider == 'filterlog'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.1-openvpn",
|
||||
"tag": "pipeline_828590b5",
|
||||
"name": "logs-pfsense.log-1.25.1-openvpn",
|
||||
"if": "ctx.event.provider == 'openvpn'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.1-ipsec",
|
||||
"tag": "pipeline_9d37039c",
|
||||
"name": "logs-pfsense.log-1.25.1-ipsec",
|
||||
"if": "ctx.event.provider == 'charon'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.1-dhcp",
|
||||
"tag": "pipeline_ad56bbca",
|
||||
"name": "logs-pfsense.log-1.25.1-dhcp",
|
||||
"if": "[\"dhcpd\", \"dhclient\", \"dhcp6c\"].contains(ctx.event.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.1-unbound",
|
||||
"tag": "pipeline_dd85553d",
|
||||
"name": "logs-pfsense.log-1.25.1-unbound",
|
||||
"if": "ctx.event.provider == 'unbound'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.1-haproxy",
|
||||
"tag": "pipeline_720ed255",
|
||||
"name": "logs-pfsense.log-1.25.1-haproxy",
|
||||
"if": "ctx.event.provider == 'haproxy'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.1-php-fpm",
|
||||
"tag": "pipeline_456beba5",
|
||||
"name": "logs-pfsense.log-1.25.1-php-fpm",
|
||||
"if": "ctx.event.provider == 'php-fpm'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.1-squid",
|
||||
"tag": "pipeline_a0d89375",
|
||||
"name": "logs-pfsense.log-1.25.1-squid",
|
||||
"if": "ctx.event.provider == 'squid'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.1-snort",
|
||||
"tag": "pipeline_c2f1ed55",
|
||||
"name": "logs-pfsense.log-1.25.1-snort",
|
||||
"if": "ctx.event.provider == 'snort'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "logs-pfsense.log-1.23.1-suricata",
|
||||
"tag":"pipeline_33db1c9e",
|
||||
"name": "logs-pfsense.log-1.25.1-suricata",
|
||||
"if": "ctx.event.provider == 'suricata'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"drop": {
|
||||
"tag": "drop_9d7c46f8",
|
||||
"if": "![\"filterlog\", \"openvpn\", \"charon\", \"dhcpd\", \"dhclient\", \"dhcp6c\", \"unbound\", \"haproxy\", \"php-fpm\", \"squid\", \"snort\", \"suricata\"].contains(ctx.event?.provider)"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_event_category_4780a983",
|
||||
"field": "event.category",
|
||||
"value": "network",
|
||||
"if": "ctx.network != null"
|
||||
@@ -179,6 +201,7 @@
|
||||
},
|
||||
{
|
||||
"convert": {
|
||||
"tag": "convert_source_address_to_source_ip_f5632a20",
|
||||
"field": "source.address",
|
||||
"target_field": "source.ip",
|
||||
"type": "ip",
|
||||
@@ -188,6 +211,7 @@
|
||||
},
|
||||
{
|
||||
"convert": {
|
||||
"tag": "convert_destination_address_to_destination_ip_f1388f0c",
|
||||
"field": "destination.address",
|
||||
"target_field": "destination.ip",
|
||||
"type": "ip",
|
||||
@@ -197,6 +221,7 @@
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_network_type_1f1d940a",
|
||||
"field": "network.type",
|
||||
"value": "ipv6",
|
||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\":\")"
|
||||
@@ -204,6 +229,7 @@
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_network_type_69deca38",
|
||||
"field": "network.type",
|
||||
"value": "ipv4",
|
||||
"if": "ctx.source?.ip != null && ctx.source.ip.contains(\".\")"
|
||||
@@ -211,6 +237,7 @@
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"tag": "geoip_source_ip_to_source_geo_da2e41b2",
|
||||
"field": "source.ip",
|
||||
"target_field": "source.geo",
|
||||
"ignore_missing": true
|
||||
@@ -218,6 +245,7 @@
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"tag": "geoip_destination_ip_to_destination_geo_ab5e2968",
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination.geo",
|
||||
"ignore_missing": true
|
||||
@@ -225,6 +253,7 @@
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"tag": "geoip_source_ip_to_source_as_28d69883",
|
||||
"ignore_missing": true,
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"field": "source.ip",
|
||||
@@ -237,6 +266,7 @@
|
||||
},
|
||||
{
|
||||
"geoip": {
|
||||
"tag": "geoip_destination_ip_to_destination_as_8a007787",
|
||||
"database_file": "GeoLite2-ASN.mmdb",
|
||||
"field": "destination.ip",
|
||||
"target_field": "destination.as",
|
||||
@@ -249,6 +279,7 @@
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"tag": "rename_source_as_asn_to_source_as_number_a917047d",
|
||||
"field": "source.as.asn",
|
||||
"target_field": "source.as.number",
|
||||
"ignore_missing": true
|
||||
@@ -256,6 +287,7 @@
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"tag": "rename_source_as_organization_name_to_source_as_organization_name_f1362d0b",
|
||||
"field": "source.as.organization_name",
|
||||
"target_field": "source.as.organization.name",
|
||||
"ignore_missing": true
|
||||
@@ -263,6 +295,7 @@
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"tag": "rename_destination_as_asn_to_destination_as_number_3b459fcd",
|
||||
"field": "destination.as.asn",
|
||||
"target_field": "destination.as.number",
|
||||
"ignore_missing": true
|
||||
@@ -270,6 +303,7 @@
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"tag": "rename_destination_as_organization_name_to_destination_as_organization_name_814bd459",
|
||||
"field": "destination.as.organization_name",
|
||||
"target_field": "destination.as.organization.name",
|
||||
"ignore_missing": true
|
||||
@@ -277,12 +311,14 @@
|
||||
},
|
||||
{
|
||||
"community_id": {
|
||||
"tag": "community_id_d2308e7a",
|
||||
"target_field": "network.community_id",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"grok": {
|
||||
"tag": "grok_observer_ingress_interface_name_968018d3",
|
||||
"field": "observer.ingress.interface.name",
|
||||
"patterns": [
|
||||
"%{DATA}.%{NONNEGINT:observer.ingress.vlan.id}"
|
||||
@@ -293,6 +329,7 @@
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_network_vlan_id_efd4d96a",
|
||||
"field": "network.vlan.id",
|
||||
"copy_from": "observer.ingress.vlan.id",
|
||||
"ignore_empty_value": true
|
||||
@@ -300,6 +337,7 @@
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_related_ip_c1a6356b",
|
||||
"field": "related.ip",
|
||||
"value": "{{{destination.ip}}}",
|
||||
"allow_duplicates": false,
|
||||
@@ -308,6 +346,7 @@
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_related_ip_8121c591",
|
||||
"field": "related.ip",
|
||||
"value": "{{{source.ip}}}",
|
||||
"allow_duplicates": false,
|
||||
@@ -316,6 +355,7 @@
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_related_ip_53b62ed8",
|
||||
"field": "related.ip",
|
||||
"value": "{{{source.nat.ip}}}",
|
||||
"allow_duplicates": false,
|
||||
@@ -324,6 +364,7 @@
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_related_hosts_6f162628",
|
||||
"field": "related.hosts",
|
||||
"value": "{{{destination.domain}}}",
|
||||
"if": "ctx.destination?.domain != null"
|
||||
@@ -331,6 +372,7 @@
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_related_user_c036eec2",
|
||||
"field": "related.user",
|
||||
"value": "{{{user.name}}}",
|
||||
"if": "ctx.user?.name != null"
|
||||
@@ -338,6 +380,7 @@
|
||||
},
|
||||
{
|
||||
"set": {
|
||||
"tag": "set_network_direction_cb1e3125",
|
||||
"field": "network.direction",
|
||||
"value": "{{{network.direction}}}bound",
|
||||
"if": "ctx.network?.direction != null && ctx.network?.direction =~ /^(in|out)$/"
|
||||
@@ -345,6 +388,7 @@
|
||||
},
|
||||
{
|
||||
"remove": {
|
||||
"tag": "remove_a82e20f2",
|
||||
"field": [
|
||||
"_tmp"
|
||||
],
|
||||
@@ -353,11 +397,21 @@
|
||||
},
|
||||
{
|
||||
"script": {
|
||||
"tag": "script_a7f2c062",
|
||||
"lang": "painless",
|
||||
"description": "This script processor iterates over the whole document to remove fields with null values.",
|
||||
"source": "void handleMap(Map map) {\n for (def x : map.values()) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n map.values().removeIf(v -> v == null || (v instanceof String && v == \"-\"));\n}\nvoid handleList(List list) {\n for (def x : list) {\n if (x instanceof Map) {\n handleMap(x);\n } else if (x instanceof List) {\n handleList(x);\n }\n }\n}\nhandleMap(ctx);\n"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"tag": "append_preserve_original_event_on_error",
|
||||
"field": "tags",
|
||||
"value": "preserve_original_event",
|
||||
"allow_duplicates": false,
|
||||
"if": "ctx.error?.message != null"
|
||||
}
|
||||
},
|
||||
{
|
||||
"pipeline": {
|
||||
"name": "global@custom",
|
||||
@@ -405,7 +459,14 @@
|
||||
{
|
||||
"append": {
|
||||
"field": "error.message",
|
||||
"value": "{{{ _ingest.on_failure_message }}}"
|
||||
"value": "Processor '{{{ _ingest.on_failure_processor_type }}}' {{#_ingest.on_failure_processor_tag}}with tag '{{{ _ingest.on_failure_processor_tag }}}' {{/_ingest.on_failure_processor_tag}}in pipeline '{{{ _ingest.pipeline }}}' failed with message '{{{ _ingest.on_failure_message }}}'"
|
||||
}
|
||||
},
|
||||
{
|
||||
"append": {
|
||||
"field": "tags",
|
||||
"value": "preserve_original_event",
|
||||
"allow_duplicates": false
|
||||
}
|
||||
}
|
||||
]
|
||||
@@ -22,6 +22,12 @@
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"lowercase": {
|
||||
"field": "network.transport",
|
||||
"ignore_failure": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"rename": {
|
||||
"field": "message2.in_iface",
|
||||
|
||||
18
salt/elasticsearch/files/ingest/zeek.websocket
Normal file
18
salt/elasticsearch/files/ingest/zeek.websocket
Normal file
@@ -0,0 +1,18 @@
|
||||
{
|
||||
"description" : "zeek.websocket",
|
||||
"processors" : [
|
||||
{ "set": { "field": "event.dataset", "value": "websocket" } },
|
||||
{ "remove": { "field": ["host"], "ignore_failure": true } },
|
||||
{ "json": { "field": "message", "target_field": "message2", "ignore_failure": true } },
|
||||
{ "rename": { "field": "message2.host", "target_field": "websocket.host", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.uri", "target_field": "websocket.uri", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.user_agent", "target_field": "websocket.user_agent", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.subprotocol", "target_field": "websocket.subprotocol", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.client_protocols", "target_field": "websocket.client_protocols", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.client_extensions", "target_field": "websocket.client_extensions", "ignore_missing": true } },
|
||||
{ "rename": { "field": "message2.server_extensions", "target_field": "websocket.server_extensions", "ignore_missing": true } },
|
||||
{ "remove": { "field": "message2.tags", "ignore_failure": true } },
|
||||
{ "set": { "field": "network.transport", "value": "tcp" } },
|
||||
{ "pipeline": { "name": "zeek.common" } }
|
||||
]
|
||||
}
|
||||
@@ -1,8 +1,9 @@
|
||||
elasticsearch:
|
||||
enabled:
|
||||
description: Enables or disables the Elasticsearch process. This process provides the log event storage system. WARNING - Disabling this process is unsupported.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
version:
|
||||
description: "This specifies the version of the following containers: so-elastic-fleet-package-registry, so-elastic-agent, so-elastic-fleet, so-kibana, so-logstash and so-elasticsearch. Modifying this value in the Elasticsearch defaults.yaml will result in catastrophic grid failure."
|
||||
readonly: True
|
||||
@@ -10,15 +11,20 @@ elasticsearch:
|
||||
advanced: True
|
||||
esheap:
|
||||
description: Specify the memory heap size in (m)egabytes for Elasticsearch.
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
index_clean:
|
||||
description: Determines if indices should be considered for deletion by available disk space in the cluster. Otherwise, indices will only be deleted by the age defined in the ILM settings. This setting only applies to EVAL, STANDALONE, and HEAVY NODE installations. Other installations can only use ILM settings.
|
||||
forcedType: bool
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
vm:
|
||||
max_map_count:
|
||||
description: The maximum number of memory map areas a process may use. Elasticsearch uses a mmapfs directory by default to store its indices. The default operating system limits on mmap counts could be too low, which may result in out of memory exceptions.
|
||||
forcedType: int
|
||||
helpLink: elasticsearch
|
||||
retention:
|
||||
retention_pct:
|
||||
decription: Total percentage of space used by Elasticsearch for multi node clusters
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
global: True
|
||||
config:
|
||||
cluster:
|
||||
@@ -26,55 +32,102 @@ elasticsearch:
|
||||
description: The name of the Security Onion Elasticsearch cluster, for identification purposes.
|
||||
readonly: True
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
logsdb:
|
||||
enabled:
|
||||
description: Enables or disables the Elasticsearch logsdb index mode. When enabled, most logs-* datastreams will convert to logsdb from standard after rolling over.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
routing:
|
||||
allocation:
|
||||
disk:
|
||||
threshold_enabled:
|
||||
threshold_enabled:
|
||||
description: Specifies whether the Elasticsearch node will monitor the available disk space for low disk space conditions and take action to protect the cluster.
|
||||
helpLink: elasticsearch.html
|
||||
forcedType: bool
|
||||
helpLink: elasticsearch
|
||||
watermark:
|
||||
low:
|
||||
description: The lower percentage of used disk space representing a healthy node.
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
high:
|
||||
description: The higher percentage of used disk space representing an unhealthy node.
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
flood_stage:
|
||||
description: The max percentage of used disk space that will cause the node to take protective actions, such as blocking incoming events.
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
action:
|
||||
destructive_requires_name:
|
||||
description: Requires explicit index names when deleting indices. Prevents accidental deletion of indices via wildcard patterns.
|
||||
advanced: True
|
||||
forcedType: bool
|
||||
helpLink: elasticsearch
|
||||
script:
|
||||
max_compilations_rate:
|
||||
max_compilations_rate:
|
||||
description: Max rate of script compilations permitted in the Elasticsearch cluster. Larger values will consume more resources.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
indices:
|
||||
id_field_data:
|
||||
enabled:
|
||||
description: Enables or disables loading of field data on the _id field.
|
||||
advanced: True
|
||||
forcedType: bool
|
||||
helpLink: elasticsearch
|
||||
query:
|
||||
bool:
|
||||
max_clause_count:
|
||||
max_clause_count:
|
||||
description: Max number of boolean clauses per query.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
xpack:
|
||||
ml:
|
||||
enabled:
|
||||
description: Enables or disables machine learning on the node.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
helpLink: elasticsearch
|
||||
security:
|
||||
enabled:
|
||||
description: Enables or disables Elasticsearch security features.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
helpLink: elasticsearch
|
||||
authc:
|
||||
anonymous:
|
||||
authz_exception:
|
||||
description: Controls whether an authorization exception is thrown when anonymous user does not have the required privileges.
|
||||
advanced: True
|
||||
forcedType: bool
|
||||
helpLink: elasticsearch
|
||||
http:
|
||||
ssl:
|
||||
enabled:
|
||||
description: Enables or disables TLS/SSL for the HTTP layer.
|
||||
advanced: True
|
||||
forcedType: bool
|
||||
helpLink: elasticsearch
|
||||
transport:
|
||||
ssl:
|
||||
enabled:
|
||||
description: Enables or disables TLS/SSL for the transport layer.
|
||||
advanced: True
|
||||
forcedType: bool
|
||||
helpLink: elasticsearch
|
||||
pipelines:
|
||||
custom001: &pipelines
|
||||
description:
|
||||
description: Description of the ingest node pipeline
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
processors:
|
||||
description: Processors for the ingest node pipeline
|
||||
global: True
|
||||
advanced: True
|
||||
multiline: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
custom002: *pipelines
|
||||
custom003: *pipelines
|
||||
custom004: *pipelines
|
||||
@@ -94,24 +147,24 @@ elasticsearch:
|
||||
description: Number of replicas required for all indices. Multiple replicas protects against data loss, but also increases storage costs. This setting will be applied to all indices.
|
||||
forcedType: int
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
refresh_interval:
|
||||
description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
number_of_shards:
|
||||
description: Number of shards required for this index. Using multiple shards increases fault tolerance, but also increases storage and network costs.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
sort:
|
||||
field:
|
||||
description: The field to sort by. Must set index_sorting to True.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
order:
|
||||
description: The order to sort by. Must set index_sorting to True.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
@@ -121,16 +174,16 @@ elasticsearch:
|
||||
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
|
||||
forcedType: int
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
rollover:
|
||||
max_age:
|
||||
description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
max_primary_shard_size:
|
||||
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
@@ -178,13 +231,13 @@ elasticsearch:
|
||||
regex: ^[0-9]{1,5}d$
|
||||
forcedType: string
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
actions:
|
||||
set_priority:
|
||||
priority:
|
||||
description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
allocate:
|
||||
number_of_replicas:
|
||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
||||
@@ -197,14 +250,14 @@ elasticsearch:
|
||||
regex: ^[0-9]{1,5}d$
|
||||
forcedType: string
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
actions:
|
||||
set_priority:
|
||||
priority:
|
||||
description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities.
|
||||
forcedType: int
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
@@ -257,13 +310,14 @@ elasticsearch:
|
||||
regex: ^[0-9]{1,5}d$
|
||||
forcedType: string
|
||||
global: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
so-logs: &indexSettings
|
||||
index_sorting:
|
||||
index_sorting:
|
||||
description: Sorts the index by event time, at the cost of additional processing resource consumption.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
index_template:
|
||||
index_patterns:
|
||||
description: Patterns for matching multiple indices or tables.
|
||||
@@ -271,7 +325,7 @@ elasticsearch:
|
||||
multiline: True
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
template:
|
||||
settings:
|
||||
index:
|
||||
@@ -280,35 +334,35 @@ elasticsearch:
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
mapping:
|
||||
total_fields:
|
||||
limit:
|
||||
description: Max number of fields that can exist on a single index. Larger values will consume more resources.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
refresh_interval:
|
||||
description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
number_of_shards:
|
||||
description: Number of shards required for this index. Using multiple shards increases fault tolerance, but also increases storage and network costs.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
sort:
|
||||
field:
|
||||
description: The field to sort by. Must set index_sorting to True.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
order:
|
||||
description: The order to sort by. Must set index_sorting to True.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
mappings:
|
||||
_meta:
|
||||
package:
|
||||
@@ -316,43 +370,43 @@ elasticsearch:
|
||||
description: Meta settings for the mapping.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
managed_by:
|
||||
description: Meta settings for the mapping.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
managed:
|
||||
description: Meta settings for the mapping.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
composed_of:
|
||||
description: The index template is composed of these component templates.
|
||||
forcedType: "[]string"
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
priority:
|
||||
description: The priority of the index template.
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
data_stream:
|
||||
hidden:
|
||||
description: Hide the data stream.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
allow_custom_routing:
|
||||
description: Allow custom routing for the data stream.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
policy:
|
||||
phases:
|
||||
hot:
|
||||
@@ -360,7 +414,7 @@ elasticsearch:
|
||||
description: Minimum age of index. This determines when the index should be moved to the hot tier.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
actions:
|
||||
set_priority:
|
||||
priority:
|
||||
@@ -368,18 +422,18 @@ elasticsearch:
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
rollover:
|
||||
max_age:
|
||||
description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
max_primary_shard_size:
|
||||
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
@@ -428,7 +482,7 @@ elasticsearch:
|
||||
forcedType: string
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
actions:
|
||||
set_priority:
|
||||
priority:
|
||||
@@ -436,18 +490,18 @@ elasticsearch:
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
rollover:
|
||||
max_age:
|
||||
description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
max_primary_shard_size:
|
||||
description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
shrink:
|
||||
method:
|
||||
description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size.
|
||||
@@ -501,7 +555,7 @@ elasticsearch:
|
||||
forcedType: string
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
actions:
|
||||
set_priority:
|
||||
priority:
|
||||
@@ -509,7 +563,7 @@ elasticsearch:
|
||||
forcedType: int
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
allocate:
|
||||
number_of_replicas:
|
||||
description: Set the number of replicas. Remains the same as the previous phase by default.
|
||||
@@ -523,25 +577,25 @@ elasticsearch:
|
||||
forcedType: string
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
_meta:
|
||||
package:
|
||||
name:
|
||||
description: Meta settings for the mapping.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
managed_by:
|
||||
description: Meta settings for the mapping.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
managed:
|
||||
description: Meta settings for the mapping.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
so-logs-system_x_auth: *indexSettings
|
||||
so-logs-system_x_syslog: *indexSettings
|
||||
so-logs-system_x_system: *indexSettings
|
||||
@@ -604,20 +658,21 @@ elasticsearch:
|
||||
so-metrics-fleet_server_x_agent_status: &fleetMetricsSettings
|
||||
index_sorting:
|
||||
description: Sorts the index by event time, at the cost of additional processing resource consumption.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
index_template:
|
||||
ignore_missing_component_templates:
|
||||
description: Ignore component templates if they aren't in Elasticsearch.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
index_patterns:
|
||||
description: Patterns for matching multiple indices or tables.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
template:
|
||||
settings:
|
||||
index:
|
||||
@@ -625,33 +680,35 @@ elasticsearch:
|
||||
description: Type of mode used for this index. Time series indices can be used for metrics to reduce necessary storage.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
number_of_replicas:
|
||||
description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
composed_of:
|
||||
description: The index template is composed of these component templates.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
priority:
|
||||
description: The priority of the index template.
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
data_stream:
|
||||
hidden:
|
||||
description: Hide the data stream.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
allow_custom_routing:
|
||||
description: Allow custom routing for the data stream.
|
||||
forcedType: bool
|
||||
advanced: True
|
||||
readonly: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
so-metrics-fleet_server_x_agent_versions: *fleetMetricsSettings
|
||||
so_roles:
|
||||
so-manager: &soroleSettings
|
||||
@@ -662,7 +719,7 @@ elasticsearch:
|
||||
forcedType: "[]string"
|
||||
global: False
|
||||
advanced: True
|
||||
helpLink: elasticsearch.html
|
||||
helpLink: elasticsearch
|
||||
so-managersearch: *soroleSettings
|
||||
so-standalone: *soroleSettings
|
||||
so-searchnode: *soroleSettings
|
||||
|
||||
@@ -27,14 +27,12 @@ iptables_config:
|
||||
- source: salt://firewall/iptables.jinja
|
||||
- template: jinja
|
||||
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
disable_firewalld:
|
||||
service.dead:
|
||||
- name: firewalld
|
||||
- enable: False
|
||||
- require:
|
||||
- file: iptables_config
|
||||
{% endif %}
|
||||
|
||||
iptables_restore:
|
||||
cmd.run:
|
||||
@@ -44,7 +42,6 @@ iptables_restore:
|
||||
- onlyif:
|
||||
- iptables-restore --test {{ iptmap.configfile }}
|
||||
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
enable_firewalld:
|
||||
service.running:
|
||||
- name: firewalld
|
||||
@@ -52,7 +49,6 @@ enable_firewalld:
|
||||
- onfail:
|
||||
- file: iptables_config
|
||||
- cmd: iptables_restore
|
||||
{% endif %}
|
||||
|
||||
{% else %}
|
||||
|
||||
|
||||
@@ -1,14 +1,6 @@
|
||||
{% set iptmap = salt['grains.filter_by']({
|
||||
'Debian': {
|
||||
'service': 'netfilter-persistent',
|
||||
'iptpkg': 'iptables',
|
||||
'persistpkg': 'iptables-persistent',
|
||||
'configfile': '/etc/iptables/rules.v4'
|
||||
},
|
||||
'RedHat': {
|
||||
'service': 'iptables',
|
||||
'iptpkg': 'iptables-nft',
|
||||
'persistpkg': 'iptables-nft-services',
|
||||
'configfile': '/etc/sysconfig/iptables'
|
||||
},
|
||||
}) %}
|
||||
{% set iptmap = {
|
||||
'service': 'iptables',
|
||||
'iptpkg': 'iptables-nft',
|
||||
'persistpkg': 'iptables-nft-services',
|
||||
'configfile': '/etc/sysconfig/iptables'
|
||||
} %}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
{%- from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{%- from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{%- from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{%- from 'firewall/map.jinja' import FIREWALL_MERGED %}
|
||||
{%- set role = GLOBALS.role.split('-')[1] %}
|
||||
{%- from 'firewall/containers.map.jinja' import NODE_CONTAINERS %}
|
||||
@@ -8,9 +8,9 @@
|
||||
{%- set D1 = [] %}
|
||||
{%- set D2 = [] %}
|
||||
{%- for container in NODE_CONTAINERS %}
|
||||
{%- set IP = DOCKER.containers[container].ip %}
|
||||
{%- if DOCKER.containers[container].port_bindings is defined %}
|
||||
{%- for binding in DOCKER.containers[container].port_bindings %}
|
||||
{%- set IP = DOCKERMERGED.containers[container].ip %}
|
||||
{%- if DOCKERMERGED.containers[container].port_bindings is defined %}
|
||||
{%- for binding in DOCKERMERGED.containers[container].port_bindings %}
|
||||
{#- cant split int so we convert to string #}
|
||||
{%- set binding = binding|string %}
|
||||
{#- split the port binding by /. if proto not specified, default is tcp #}
|
||||
@@ -33,13 +33,13 @@
|
||||
{%- set hostPort = bsa[0] %}
|
||||
{%- set containerPort = bsa[1] %}
|
||||
{%- endif %}
|
||||
{%- do PR.append("-A POSTROUTING -s " ~ DOCKER.containers[container].ip ~ "/32 -d " ~ DOCKER.containers[container].ip ~ "/32 -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ containerPort ~ " -j MASQUERADE") %}
|
||||
{%- do PR.append("-A POSTROUTING -s " ~ DOCKERMERGED.containers[container].ip ~ "/32 -d " ~ DOCKERMERGED.containers[container].ip ~ "/32 -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ containerPort ~ " -j MASQUERADE") %}
|
||||
{%- if bindip | length and bindip != '0.0.0.0' %}
|
||||
{%- do D1.append("-A DOCKER -d " ~ bindip ~ "/32 ! -i sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ hostPort ~ " -j DNAT --to-destination " ~ DOCKER.containers[container].ip ~ ":" ~ containerPort) %}
|
||||
{%- do D1.append("-A DOCKER -d " ~ bindip ~ "/32 ! -i sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ hostPort ~ " -j DNAT --to-destination " ~ DOCKERMERGED.containers[container].ip ~ ":" ~ containerPort) %}
|
||||
{%- else %}
|
||||
{%- do D1.append("-A DOCKER ! -i sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ hostPort ~ " -j DNAT --to-destination " ~ DOCKER.containers[container].ip ~ ":" ~ containerPort) %}
|
||||
{%- do D1.append("-A DOCKER ! -i sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ hostPort ~ " -j DNAT --to-destination " ~ DOCKERMERGED.containers[container].ip ~ ":" ~ containerPort) %}
|
||||
{%- endif %}
|
||||
{%- do D2.append("-A DOCKER -d " ~ DOCKER.containers[container].ip ~ "/32 ! -i sobridge -o sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ containerPort ~ " -j ACCEPT") %}
|
||||
{%- do D2.append("-A DOCKER -d " ~ DOCKERMERGED.containers[container].ip ~ "/32 ! -i sobridge -o sobridge -p " ~ proto ~ " -m " ~ proto ~ " --dport " ~ containerPort ~ " -j ACCEPT") %}
|
||||
{%- endfor %}
|
||||
{%- endif %}
|
||||
{%- endfor %}
|
||||
@@ -52,7 +52,7 @@
|
||||
:DOCKER - [0:0]
|
||||
-A PREROUTING -m addrtype --dst-type LOCAL -j DOCKER
|
||||
-A OUTPUT ! -d 127.0.0.0/8 -m addrtype --dst-type LOCAL -j DOCKER
|
||||
-A POSTROUTING -s {{DOCKER.range}} ! -o sobridge -j MASQUERADE
|
||||
-A POSTROUTING -s {{DOCKERMERGED.range}} ! -o sobridge -j MASQUERADE
|
||||
{%- for rule in PR %}
|
||||
{{ rule }}
|
||||
{%- endfor %}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% import_yaml 'firewall/defaults.yaml' as FIREWALL_DEFAULT %}
|
||||
|
||||
{# add our ip to self #}
|
||||
{% do FIREWALL_DEFAULT.firewall.hostgroups.self.append(GLOBALS.node_ip) %}
|
||||
{# add dockernet range #}
|
||||
{% do FIREWALL_DEFAULT.firewall.hostgroups.dockernet.append(DOCKER.range) %}
|
||||
{% do FIREWALL_DEFAULT.firewall.hostgroups.dockernet.append(DOCKERMERGED.range) %}
|
||||
|
||||
{% if GLOBALS.role == 'so-idh' %}
|
||||
{% from 'idh/opencanary_config.map.jinja' import IDH_PORTGROUPS %}
|
||||
|
||||
@@ -3,7 +3,7 @@ firewall:
|
||||
analyst: &hostgroupsettings
|
||||
description: List of IP or CIDR blocks to allow access to this hostgroup.
|
||||
forcedType: "[]string"
|
||||
helpLink: firewall.html
|
||||
helpLink: firewall
|
||||
multiline: True
|
||||
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
|
||||
regexFailureMessage: You must enter a valid IP address or CIDR.
|
||||
@@ -11,7 +11,7 @@ firewall:
|
||||
anywhere: &hostgroupsettingsadv
|
||||
description: List of IP or CIDR blocks to allow access to this hostgroup.
|
||||
forcedType: "[]string"
|
||||
helpLink: firewall.html
|
||||
helpLink: firewall
|
||||
multiline: True
|
||||
advanced: True
|
||||
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
|
||||
@@ -22,7 +22,7 @@ firewall:
|
||||
dockernet: &ROhostgroupsettingsadv
|
||||
description: List of IP or CIDR blocks to allow access to this hostgroup.
|
||||
forcedType: "[]string"
|
||||
helpLink: firewall.html
|
||||
helpLink: firewall
|
||||
multiline: True
|
||||
advanced: True
|
||||
readonly: True
|
||||
@@ -53,7 +53,7 @@ firewall:
|
||||
customhostgroup0: &customhostgroupsettings
|
||||
description: List of IP or CIDR blocks to allow to this hostgroup.
|
||||
forcedType: "[]string"
|
||||
helpLink: firewall.html
|
||||
helpLink: firewall
|
||||
advanced: True
|
||||
multiline: True
|
||||
regex: ^(([0-9]{1,3}\.){3}[0-9]{1,3}(\/([0-9]|[1-2][0-9]|3[0-2]))?)?$
|
||||
@@ -73,14 +73,14 @@ firewall:
|
||||
tcp: &tcpsettings
|
||||
description: List of TCP ports for this port group.
|
||||
forcedType: "[]string"
|
||||
helpLink: firewall.html
|
||||
helpLink: firewall
|
||||
advanced: True
|
||||
multiline: True
|
||||
duplicates: True
|
||||
udp: &udpsettings
|
||||
description: List of UDP ports for this port group.
|
||||
forcedType: "[]string"
|
||||
helpLink: firewall.html
|
||||
helpLink: firewall
|
||||
advanced: True
|
||||
multiline: True
|
||||
duplicates: True
|
||||
@@ -206,7 +206,7 @@ firewall:
|
||||
advanced: True
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: firewall.html
|
||||
helpLink: firewall
|
||||
duplicates: True
|
||||
sensor:
|
||||
portgroups: *portgroupsdocker
|
||||
@@ -262,7 +262,7 @@ firewall:
|
||||
advanced: True
|
||||
multiline: True
|
||||
forcedType: "[]string"
|
||||
helpLink: firewall.html
|
||||
helpLink: firewall
|
||||
duplicates: True
|
||||
dockernet:
|
||||
portgroups: *portgroupshost
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
global:
|
||||
pcapengine: STENO
|
||||
pcapengine: SURICATA
|
||||
pipeline: REDIS
|
||||
@@ -18,13 +18,11 @@ global:
|
||||
regexFailureMessage: You must enter either ZEEK or SURICATA.
|
||||
global: True
|
||||
pcapengine:
|
||||
description: Which engine to use for generating pcap. Options are STENO, SURICATA or TRANSITION.
|
||||
regex: ^(STENO|SURICATA|TRANSITION)$
|
||||
description: Which engine to use for generating pcap. Currently only SURICATA is supported.
|
||||
regex: ^(SURICATA)$
|
||||
options:
|
||||
- STENO
|
||||
- SURICATA
|
||||
- TRANSITION
|
||||
regexFailureMessage: You must enter either STENO, SURICATA or TRANSITION.
|
||||
regexFailureMessage: You must enter either SURICATA.
|
||||
global: True
|
||||
ids:
|
||||
description: Which IDS engine to use. Currently only Suricata is supported.
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
host:
|
||||
mainint:
|
||||
description: Main interface of the grid host.
|
||||
helpLink: host.html
|
||||
helpLink: ip-address
|
||||
mainip:
|
||||
description: Main IP address of the grid host.
|
||||
helpLink: host.html
|
||||
helpLink: ip-address
|
||||
|
||||
@@ -11,7 +11,7 @@
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% if 'api' in salt['pillar.get']('features', []) %}
|
||||
|
||||
@@ -26,32 +26,38 @@ so-hydra:
|
||||
- name: so-hydra
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-hydra'].ip }}
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-hydra'].ip }}
|
||||
- binds:
|
||||
- /opt/so/conf/hydra/:/hydra-conf:ro
|
||||
- /opt/so/log/hydra/:/hydra-log:rw
|
||||
- /nsm/hydra/db:/hydra-data:rw
|
||||
{% if DOCKER.containers['so-hydra'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-hydra'].custom_bind_mounts %}
|
||||
{% if DOCKERMERGED.containers['so-hydra'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-hydra'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-hydra'].port_bindings %}
|
||||
{% for BINDING in DOCKERMERGED.containers['so-hydra'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
{% if DOCKER.containers['so-hydra'].extra_hosts %}
|
||||
{% if DOCKERMERGED.containers['so-hydra'].extra_hosts %}
|
||||
- extra_hosts:
|
||||
{% for XTRAHOST in DOCKER.containers['so-hydra'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-hydra'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-hydra'].extra_env %}
|
||||
{% if DOCKERMERGED.containers['so-hydra'].extra_env %}
|
||||
- environment:
|
||||
{% for XTRAENV in DOCKER.containers['so-hydra'].extra_env %}
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-hydra'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-hydra'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-hydra'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- restart_policy: unless-stopped
|
||||
- watch:
|
||||
- file: hydraconfig
|
||||
@@ -67,7 +73,7 @@ delete_so-hydra_so-status.disabled:
|
||||
|
||||
wait_for_hydra:
|
||||
http.wait_for_successful_query:
|
||||
- name: 'http://{{ GLOBALS.manager }}:4444/'
|
||||
- name: 'http://{{ GLOBALS.manager }}:4444/health/alive'
|
||||
- ssl: True
|
||||
- verify_ssl: False
|
||||
- status:
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
hydra:
|
||||
enabled:
|
||||
description: Enables or disables the API authentication system, used for service account authentication. Enabling this feature requires a valid Security Onion license key. Defaults to False.
|
||||
helpLink: connect.html
|
||||
description: Enables or disables the API authentication system, used for service account authentication. Enabling this feature requires a valid Security Onion license key. Defaults to False.
|
||||
forcedType: bool
|
||||
helpLink: connect-api
|
||||
global: True
|
||||
config:
|
||||
ttl:
|
||||
@@ -9,16 +10,16 @@ hydra:
|
||||
description: Amount of time that the generated access token will be valid. Specified in the form of 2h, which means 2 hours.
|
||||
global: True
|
||||
forcedType: string
|
||||
helpLink: connect.html
|
||||
helpLink: connect-api
|
||||
log:
|
||||
level:
|
||||
description: Log level to use for Kratos logs.
|
||||
global: True
|
||||
helpLink: connect.html
|
||||
helpLink: connect-api
|
||||
format:
|
||||
description: Log output format for Kratos logs.
|
||||
global: True
|
||||
helpLink: connect.html
|
||||
helpLink: connect-api
|
||||
secrets:
|
||||
system:
|
||||
description: Secrets used for token generation. Generated during installation.
|
||||
@@ -26,4 +27,4 @@ hydra:
|
||||
sensitive: True
|
||||
advanced: True
|
||||
forcedType: "[]string"
|
||||
helpLink: connect.html
|
||||
helpLink: connect-api
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
|
||||
include:
|
||||
- idh.config
|
||||
@@ -20,25 +20,31 @@ so-idh:
|
||||
- network_mode: host
|
||||
- binds:
|
||||
- /nsm/idh:/var/tmp:rw
|
||||
- /opt/so/conf/idh/http-skins:/usr/local/lib/python3.12/site-packages/opencanary/modules/data/http/skin:ro
|
||||
- /opt/so/conf/idh/http-skins:/opt/opencanary/http-skins:ro
|
||||
- /opt/so/conf/idh/opencanary.conf:/etc/opencanaryd/opencanary.conf:ro
|
||||
{% if DOCKER.containers['so-idh'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-idh'].custom_bind_mounts %}
|
||||
{% if DOCKERMERGED.containers['so-idh'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-idh'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-idh'].extra_hosts %}
|
||||
{% if DOCKERMERGED.containers['so-idh'].extra_hosts %}
|
||||
- extra_hosts:
|
||||
{% for XTRAHOST in DOCKER.containers['so-idh'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-idh'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKER.containers['so-idh'].extra_env %}
|
||||
{% if DOCKERMERGED.containers['so-idh'].extra_env %}
|
||||
- environment:
|
||||
{% for XTRAENV in DOCKER.containers['so-idh'].extra_env %}
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-idh'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-idh'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-idh'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: opencanary_config
|
||||
- require:
|
||||
|
||||
@@ -28,6 +28,7 @@
|
||||
{% set HTTPPROXYSKINLIST = OPENCANARYCONFIG.pop('httpproxy_x_skinlist') %}
|
||||
{% do OPENCANARYCONFIG.update({'http_x_skin_x_list': HTTPSKINLIST}) %}
|
||||
{% do OPENCANARYCONFIG.update({'httpproxy_x_skin_x_list': HTTPPROXYSKINLIST}) %}
|
||||
{% do OPENCANARYCONFIG.update({'http_x_skindir': '/opt/opencanary/http-skins/' ~ OPENCANARYCONFIG['http_x_skin']}) %}
|
||||
|
||||
{% set OPENSSH = salt['pillar.get']('idh:openssh', default=IDHCONFIG.idh.openssh, merge=True) %}
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
include:
|
||||
- idh.openssh
|
||||
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
idh_sshd_selinux:
|
||||
selinux.port_policy_present:
|
||||
- port: {{ openssh_map.config.port }}
|
||||
@@ -13,7 +12,6 @@ idh_sshd_selinux:
|
||||
- file: openssh_config
|
||||
- require:
|
||||
- pkg: python_selinux_mgmt_tools
|
||||
{% endif %}
|
||||
|
||||
openssh_config:
|
||||
file.replace:
|
||||
|
||||
@@ -16,8 +16,6 @@ openssh:
|
||||
- name: {{ openssh_map.service }}
|
||||
{% endif %}
|
||||
|
||||
{% if grains.os_family == 'RedHat' %}
|
||||
python_selinux_mgmt_tools:
|
||||
pkg.installed:
|
||||
- name: policycoreutils-python-utils
|
||||
{% endif %}
|
||||
|
||||
29
salt/idh/skins/http/opencanary/basicLogin/redirect.html
Normal file
29
salt/idh/skins/http/opencanary/basicLogin/redirect.html
Normal file
@@ -0,0 +1,29 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>Redirect</title>
|
||||
<style>
|
||||
body {
|
||||
width: 100%;
|
||||
}
|
||||
.outer {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
width: 25em;
|
||||
height: 100%;
|
||||
}
|
||||
.inner{
|
||||
display: table-cell;
|
||||
vertical-align: middle;
|
||||
height: 30em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class='outer'>
|
||||
<div class='inner'>
|
||||
<a href="/index">Click here</a>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
29
salt/idh/skins/http/opencanary/nasLogin/redirect.html
Normal file
29
salt/idh/skins/http/opencanary/nasLogin/redirect.html
Normal file
@@ -0,0 +1,29 @@
|
||||
<html>
|
||||
<head>
|
||||
<title>Redirect</title>
|
||||
<style>
|
||||
body {
|
||||
width: 100%;
|
||||
}
|
||||
.outer {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
width: 25em;
|
||||
height: 100%;
|
||||
}
|
||||
.inner{
|
||||
display: table-cell;
|
||||
vertical-align: middle;
|
||||
height: 30em;
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class='outer'>
|
||||
<div class='inner'>
|
||||
<a href="/index">Click here</a>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
|
||||
@@ -1,7 +1,12 @@
|
||||
idh:
|
||||
enabled:
|
||||
description: Enables or disables the Intrusion Detection Honeypot (IDH) process.
|
||||
helpLink: idh.html
|
||||
description: Enables or disables the Intrusion Detection Honeypot (IDH) process.
|
||||
forcedType: bool
|
||||
helpLink: idh
|
||||
restrict_management_ip:
|
||||
description: Restricts management IP access to the IDH node.
|
||||
forcedType: bool
|
||||
helpLink: idh
|
||||
opencanary:
|
||||
config:
|
||||
logger:
|
||||
@@ -10,7 +15,7 @@ idh:
|
||||
readonly: True
|
||||
advanced: True
|
||||
global: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
kwargs:
|
||||
formatters:
|
||||
plain:
|
||||
@@ -24,53 +29,54 @@ idh:
|
||||
filename: *loggingOptions
|
||||
portscan_x_enabled: &serviceOptions
|
||||
description: To enable this opencanary module, set this value to true. To disable set to false. This option only applies to IDH nodes within your grid.
|
||||
helpLink: idh.html
|
||||
forcedType: bool
|
||||
helpLink: idh
|
||||
portscan_x_logfile: *loggingOptions
|
||||
portscan_x_synrate:
|
||||
description: Portscan - syn rate limiting
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
portscan_x_nmaposrate:
|
||||
description: Portscan - nmap OS rate limiting
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
portscan_x_lorate:
|
||||
description: Portscan - lo rate limiting
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
tcpbanner_x_maxnum:
|
||||
description: Portscan - maxnum
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
tcpbanner_x_enabled: *serviceOptions
|
||||
tcpbanner_1_x_enabled: *serviceOptions
|
||||
tcpbanner_1_x_port: &portOptions
|
||||
description: Port the service should listen on.
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
tcpbanner_1_x_datareceivedbanner: &bannerOptions
|
||||
description: Data Received Banner
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
tcpbanner_1_x_initbanner: *bannerOptions
|
||||
tcpbanner_1_x_alertstring_x_enabled: *serviceOptions
|
||||
tcpbanner_1_x_keep_alive_x_enabled: *serviceOptions
|
||||
tcpbanner_1_x_keep_alive_secret:
|
||||
description: Keep Alive Secret
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
tcpbanner_1_x_keep_alive_probes:
|
||||
description: Keep Alive Probes
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
tcpbanner_1_x_keep_alive_interval:
|
||||
description: Keep Alive Interval
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
tcpbanner_1_x_keep_alive_idle:
|
||||
description: Keep Alive Idle
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
ftp_x_enabled: *serviceOptions
|
||||
ftp_x_port: *portOptions
|
||||
ftp_x_banner: *bannerOptions
|
||||
@@ -82,11 +88,11 @@ idh:
|
||||
http_x_skin: &skinOptions
|
||||
description: HTTP skin
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
http_x_skinlist: &skinlistOptions
|
||||
description: List of skins to use for the service.
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
httpproxy_x_enabled: *serviceOptions
|
||||
httpproxy_x_port: *portOptions
|
||||
httpproxy_x_skin: *skinOptions
|
||||
@@ -95,7 +101,7 @@ idh:
|
||||
mssql_x_version: &versionOptions
|
||||
description: Specify the version the service should present.
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
mssql_x_port: *portOptions
|
||||
mysql_x_enabled: *serviceOptions
|
||||
mysql_x_port: *portOptions
|
||||
@@ -119,16 +125,17 @@ idh:
|
||||
telnet_x_honeycreds:
|
||||
description: Credentials list for the telnet service.
|
||||
advanced: True
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
tftp_x_enabled: *serviceOptions
|
||||
tftp_x_port: *portOptions
|
||||
vnc_x_enabled: *serviceOptions
|
||||
vnc_x_port: *portOptions
|
||||
openssh:
|
||||
enable:
|
||||
enable:
|
||||
description: This is the real SSH service for the host machine.
|
||||
helpLink: idh.html
|
||||
forcedType: bool
|
||||
helpLink: idh
|
||||
config:
|
||||
port:
|
||||
description: Port that the real SSH service will listen on and will only be accessible from the manager.
|
||||
helpLink: idh.html
|
||||
helpLink: idh
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% set PASSWORD = salt['pillar.get']('secrets:influx_pass') %}
|
||||
{% set TOKEN = salt['pillar.get']('influxdb:token') %}
|
||||
|
||||
@@ -21,7 +21,7 @@ so-influxdb:
|
||||
- hostname: influxdb
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }}
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-influxdb'].ip }}
|
||||
- environment:
|
||||
- INFLUXD_CONFIG_PATH=/conf/config.yaml
|
||||
- INFLUXDB_HTTP_LOG_ENABLED=false
|
||||
@@ -31,8 +31,8 @@ so-influxdb:
|
||||
- DOCKER_INFLUXDB_INIT_ORG=Security Onion
|
||||
- DOCKER_INFLUXDB_INIT_BUCKET=telegraf/so_short_term
|
||||
- DOCKER_INFLUXDB_INIT_ADMIN_TOKEN={{ TOKEN }}
|
||||
{% if DOCKER.containers['so-influxdb'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-influxdb'].extra_env %}
|
||||
{% if DOCKERMERGED.containers['so-influxdb'].extra_env %}
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-influxdb'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -43,21 +43,27 @@ so-influxdb:
|
||||
- /nsm/influxdb:/var/lib/influxdb2:rw
|
||||
- /etc/pki/influxdb.crt:/conf/influxdb.crt:ro
|
||||
- /etc/pki/influxdb.key:/conf/influxdb.key:ro
|
||||
{% if DOCKER.containers['so-influxdb'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-influxdb'].custom_bind_mounts %}
|
||||
{% if DOCKERMERGED.containers['so-influxdb'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-influxdb'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-influxdb'].port_bindings %}
|
||||
{% for BINDING in DOCKERMERGED.containers['so-influxdb'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
{% if DOCKER.containers['so-influxdb'].extra_hosts %}
|
||||
{% if DOCKERMERGED.containers['so-influxdb'].extra_hosts %}
|
||||
- extra_hosts:
|
||||
{% for XTRAHOST in DOCKER.containers['so-influxdb'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-influxdb'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-influxdb'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-influxdb'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: influxdbconf
|
||||
- x509: influxdb_key
|
||||
|
||||
@@ -1,358 +1,372 @@
|
||||
influxdb:
|
||||
enabled:
|
||||
description: Enables the grid metrics collection storage system. Security Onion grid health monitoring requires this process to remain enabled. WARNING - Disabling the process is unsupported, and will cause unexpected results.
|
||||
helpLink: influxdb.html
|
||||
forcedType: bool
|
||||
helpLink: influxdb
|
||||
config:
|
||||
assets-path:
|
||||
description: Path to the InfluxDB user interface assets located inside the so-influxdb container.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
bolt-path:
|
||||
description: Path to the bolt DB file located inside the so-influxdb container.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
engine-path:
|
||||
description: Path to the engine directory located inside the so-influxdb container. This directory stores the time series data.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
feature-flags:
|
||||
description: List of key=value flags to enable.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
flux-log-enabled:
|
||||
description: Controls whether detailed flux query logging is enabled.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
hardening-enabled:
|
||||
description: If true, enforces outbound connections from the InfluxDB process must never attempt to reach an internal, private network address.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
http-bind-address:
|
||||
description: The URL and port on which InfluxDB will listen for new connections.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
http-idle-timeout:
|
||||
description: Keep-alive timeout while a connection waits for new requests. A value of 0 is the same as no timeout enforced.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
http-read-header-timeout:
|
||||
description: The duration to wait for a request header before closing the connection. A value of 0 is the same as no timeout enforced.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
http-read-timeout:
|
||||
description: The duration to wait for the request to be fully read before closing the connection. A value of 0 is the same as no timeout enforced.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
http-write-timeout:
|
||||
description: The duration to wait for the response to be fully written before closing the connection. A value of 0 is the same as no timeout enforced.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
influxql-max-select-buckets:
|
||||
description: Maximum number of group-by clauses in a SELECT statement. A value of 0 is the same as unlimited.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
influxql-max-select-point:
|
||||
description: Maximum number of points that can be queried in a SELECT statement. A value of 0 is the same as unlimited.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
influxql-max-select-series:
|
||||
description: Maximum number of series that can be returned in a SELECT statement. A value of 0 is the same as unlimited.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
instance-id:
|
||||
description: Unique instance ID for this server, to avoid collisions in a replicated cluster.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
log-level:
|
||||
description: The log level to use for outputting log statements. Allowed values are debug, info, or error.
|
||||
global: True
|
||||
advanced: false
|
||||
regex: ^(info|debug|error)$
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
metrics-disabled:
|
||||
description: If true, the HTTP endpoint that exposes internal InfluxDB metrics will be inaccessible.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
no-tasks:
|
||||
description: If true, the task system will not process any queued tasks. Useful for troubleshooting startup problems.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
pprof-disabled:
|
||||
description: If true, the profiling data HTTP endpoint will be inaccessible.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
query-concurrency:
|
||||
description: Maximum number of queries to execute concurrently. A value of 0 is the same as unlimited.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
query-initial-memory-bytes:
|
||||
description: The initial number of bytes of memory to allocate for a new query.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
query-max-memory-bytes:
|
||||
description: The number of bytes of memory to allocate to all running queries. Should typically be the query bytes times the max concurrent queries.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
query-memory-bytes:
|
||||
description: Maximum number of bytes of memory to allocate to a query.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
query-queue-size:
|
||||
description: Maximum number of queries that can be queued at one time. If this value is reached, new queries will not be queued. A value of 0 is the same as unlimited.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
reporting-disabled:
|
||||
description: If true, prevents InfluxDB from sending telemetry updates to InfluxData's servers.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
secret-store:
|
||||
description: Determines the type of storage used for secrets. Allowed values are bolt or vault.
|
||||
global: True
|
||||
advanced: True
|
||||
regex: ^(bolt|vault)$
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
session-length:
|
||||
description: Number of minutes that a user login session can remain authenticated.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
session-renew-disabled:
|
||||
description: If true, user login sessions will renew after each request.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
sqlite-path:
|
||||
description: Path to the Sqlite3 database inside the container. This database stored user data and other information about the database.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-cache-max-memory-size:
|
||||
description: Maximum number of bytes to allocate to cache data per shard. If exceeded, new data writes will be rejected.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-cache-snapshot-memory-size:
|
||||
description: Number of bytes to allocate to cache snapshot data. When the cache reaches this size, it will be written to disk to increase available memory.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-cache-snapshot-write-cold-duration:
|
||||
description: Duration between snapshot writes to disk when the shard data hasn't been modified.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-compact-full-write-cold-duration:
|
||||
description: Duration between shard compactions when the shard data hasn't been modified.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-compact-throughput-burst:
|
||||
description: Maximum throughput (number of bytes per second) that compactions be written to disk.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-max-concurrent-compactions:
|
||||
description: Maximum number of concurrent compactions. A value of 0 is the same as half the available CPU processors (procs).
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-max-index-log-file-size:
|
||||
description: Maximum number of bytes of a write-ahead log (WAL) file before it will be compacted into an index on disk.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-no-validate-field-size:
|
||||
description: If true, incoming requests will skip the field size validation.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-retention-check-interval:
|
||||
description: Interval between reviewing each bucket's retention policy and the age of the associated data.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-series-file-max-concurrent-snapshot-compactions:
|
||||
description: Maximum number of concurrent snapshot compactions across all database partitions.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-series-id-set-cache-size:
|
||||
description: Maximum size of the series cache results. Higher values may increase performance for repeated data lookups.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-shard-precreator-advance-period:
|
||||
description: The duration before a successor shard group is created after the end-time has been reached.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-shard-precreator-check-interval:
|
||||
description: Interval between checking if new shards should be created.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-tsm-use-madv-willneed:
|
||||
description: If true, InfluxDB will manage TSM memory paging.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-validate-keys:
|
||||
description: If true, validates incoming requests for supported characters.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-wal-fsync-delay:
|
||||
description: Duration to wait before calling fsync. Useful for handling conflicts on slower disks.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-wal-max-concurrent-writes:
|
||||
description: Maximum number of concurrent write-ahead log (WAL) writes to disk. The value of 0 is the same as CPU processors (procs) x 2.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-wal-max-write-delay:
|
||||
description: Maximum duration to wait before writing the write-ahead log (WAL) to disk, when the concurrency limit has been exceeded. A value of 0 is the same as no timeout.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
storage-write-timeout:
|
||||
description: Maximum time to wait for a write-ahead log (WAL) to write to disk before aborting.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
store:
|
||||
description: The type of data store to use for HTTP resources. Allowed values are disk or memory. Memory should not be used for production Security Onion installations.
|
||||
global: True
|
||||
advanced: True
|
||||
regex: ^(disk|memory)$
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
tls-cert:
|
||||
description: The container path to the certificate to use for TLS encryption of the HTTP requests and responses.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
tls-key:
|
||||
description: The container path to the certificate key to use for TLS encryption of the HTTP requests and responses.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
tls-min-version:
|
||||
description: The minimum supported version of TLS to be enforced on all incoming HTTP requests.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
tls-strict-ciphers:
|
||||
description: If true, the allowed ciphers used with TLS connections are ECDHE_RSA_WITH_AES_256_GCM_SHA384, ECDHE_RSA_WITH_AES_256_CBC_SHA, RSA_WITH_AES_256_GCM_SHA384, or RSA_WITH_AES_256_CBC_SHA.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
tracing-type:
|
||||
description: The tracing format for debugging purposes. Allowed values are log or jaeger, or leave blank to disable tracing.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
ui-disabled:
|
||||
helpLink: influxdb
|
||||
ui-disabled:
|
||||
description: If true, the InfluxDB HTTP user interface will be disabled. This will prevent use of the included InfluxDB dashboard visualizations.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
vault-addr:
|
||||
description: Vault server address.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
vault-cacert:
|
||||
description: Path to the Vault's single certificate authority certificate file within the container.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
vault-capath:
|
||||
description: Path to the Vault's certificate authority directory within the container.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
vault-client-cert:
|
||||
description: Vault client certificate path within the container.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
vault-client-key:
|
||||
description: Vault client certificate key path within the container.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
vault-client-timeout:
|
||||
description: Duration to wait for a response from the Vault server before aborting.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
vault-max-retries:
|
||||
description: Maximum number of retries when attempting to contact the Vault server. A value of 0 is the same as disabling retries.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
vault-skip-verify:
|
||||
helpLink: influxdb
|
||||
vault-skip-verify:
|
||||
description: Skip certification validation of the Vault server.
|
||||
forcedType: bool
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
vault-tls-server-name:
|
||||
description: SNI host to specify when using TLS to connect to the Vault server.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
vault-token:
|
||||
description: Vault token used for authentication.
|
||||
global: True
|
||||
advanced: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
buckets:
|
||||
so_short_term:
|
||||
duration:
|
||||
description: Amount of time (in seconds) to keep short term data.
|
||||
global: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
shard_duration:
|
||||
description: Amount of the time (in seconds) range covered by the shard group.
|
||||
global: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
so_long_term:
|
||||
duration:
|
||||
description: Amount of time (in seconds) to keep long term downsampled data.
|
||||
global: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
shard_duration:
|
||||
description: Amount of the time (in seconds) range covered by the shard group.
|
||||
global: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
downsample:
|
||||
so_long_term:
|
||||
resolution:
|
||||
description: Amount of time to turn into a single data point.
|
||||
global: True
|
||||
helpLink: influxdb.html
|
||||
helpLink: influxdb
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
[{
|
||||
"apiVersion": "influxdata.com/v2alpha1",
|
||||
"kind": "CheckThreshold",
|
||||
"metadata": {
|
||||
"name": "steno-packet-loss"
|
||||
},
|
||||
"spec": {
|
||||
"description": "Triggers when the average percent of packet loss is above the defined threshold. To tune this alert, modify the value for the appropriate alert level.",
|
||||
"every": "1m",
|
||||
"name": "Stenographer Packet Loss",
|
||||
"query": "from(bucket: \"telegraf/so_short_term\")\n |\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |\u003e filter(fn: (r) =\u003e r[\"_measurement\"] == \"stenodrop\")\n |\u003e filter(fn: (r) =\u003e r[\"_field\"] == \"drop\")\n |\u003e aggregateWindow(every: 1m, fn: mean, createEmpty: false)\n |\u003e yield(name: \"mean\")",
|
||||
"status": "active",
|
||||
"statusMessageTemplate": "Stenographer Packet Loss on node ${r.host} has reached the ${ r._level } threshold. The current packet loss is ${ r.drop }%.",
|
||||
"thresholds": [
|
||||
{
|
||||
"level": "CRIT",
|
||||
"type": "greater",
|
||||
"value": 5
|
||||
},
|
||||
{
|
||||
"level": "WARN",
|
||||
"type": "greater",
|
||||
"value": 3
|
||||
}
|
||||
]
|
||||
}
|
||||
}]
|
||||
File diff suppressed because one or more lines are too long
@@ -12,7 +12,7 @@
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% set KAFKANODES = salt['pillar.get']('kafka:nodes') %}
|
||||
{% set KAFKA_EXTERNAL_ACCESS = salt['pillar.get']('kafka:config:external_access:enabled', default=False) %}
|
||||
{% if 'gmd' in salt['pillar.get']('features', []) %}
|
||||
@@ -31,22 +31,22 @@ so-kafka:
|
||||
- name: so-kafka
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-kafka'].ip }}
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-kafka'].ip }}
|
||||
- user: kafka
|
||||
- environment:
|
||||
KAFKA_HEAP_OPTS: -Xmx2G -Xms1G
|
||||
KAFKA_OPTS: "-javaagent:/opt/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKER.containers['so-kafka'].ip }},policyLocation=file:/opt/jolokia/jolokia.xml {%- if KAFKA_EXTERNAL_ACCESS %} -Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf {% endif -%}"
|
||||
KAFKA_OPTS: "-javaagent:/opt/jolokia/agents/jolokia-agent-jvm-javaagent.jar=port=8778,host={{ DOCKERMERGED.containers['so-kafka'].ip }},policyLocation=file:/opt/jolokia/jolokia.xml {%- if KAFKA_EXTERNAL_ACCESS %} -Djava.security.auth.login.config=/opt/kafka/config/kafka_server_jaas.conf {% endif -%}"
|
||||
- extra_hosts:
|
||||
{% for node in KAFKANODES %}
|
||||
- {{ node }}:{{ KAFKANODES[node].ip }}
|
||||
{% endfor %}
|
||||
{% if DOCKER.containers['so-kafka'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-kafka'].extra_hosts %}
|
||||
{% if DOCKERMERGED.containers['so-kafka'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-kafka'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-kafka'].port_bindings %}
|
||||
{% for BINDING in DOCKERMERGED.containers['so-kafka'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
- binds:
|
||||
@@ -60,6 +60,12 @@ so-kafka:
|
||||
{% if KAFKA_EXTERNAL_ACCESS %}
|
||||
- /opt/so/conf/kafka/kafka_server_jaas.conf:/opt/kafka/config/kafka_server_jaas.conf:ro
|
||||
{% endif %}
|
||||
{% if DOCKERMERGED.containers['so-kafka'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-kafka'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
{% for sc in ['server', 'client'] %}
|
||||
- file: kafka_kraft_{{sc}}_properties
|
||||
|
||||
@@ -1,257 +1,258 @@
|
||||
kafka:
|
||||
enabled:
|
||||
description: Set to True to enable Kafka. To avoid grid problems, do not enable Kafka until the related configuration is in place. Requires a valid Security Onion license key.
|
||||
helpLink: kafka.html
|
||||
forcedType: bool
|
||||
helpLink: kafka
|
||||
cluster_id:
|
||||
description: The ID of the Kafka cluster.
|
||||
readonly: True
|
||||
advanced: True
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
controllers:
|
||||
description: A comma-separated list of hostnames that will act as Kafka controllers. These hosts will be responsible for managing the Kafka cluster. Note that only manager and receiver nodes are eligible to run Kafka. This configuration needs to be set before enabling Kafka. Failure to do so may result in Kafka topics becoming unavailable requiring manual intervention to restore functionality or reset Kafka, either of which can result in data loss.
|
||||
forcedType: string
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
reset:
|
||||
description: Disable and reset the Kafka cluster. This will remove all Kafka data including logs that may have not yet been ingested into Elasticsearch and reverts the grid to using REDIS as the global pipeline. This is useful when testing different Kafka configurations such as rearranging Kafka brokers / controllers allowing you to reset the cluster rather than manually fixing any issues arising from attempting to reassign a Kafka broker into a controller. Enter 'YES_RESET_KAFKA' and submit to disable and reset Kafka. Make any configuration changes required and re-enable Kafka when ready. This action CANNOT be reversed.
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
logstash:
|
||||
description: By default logstash is disabled when Kafka is enabled. This option allows you to specify any hosts you would like to re-enable logstash on alongside Kafka.
|
||||
forcedType: "[]string"
|
||||
multiline: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
config:
|
||||
password:
|
||||
description: The password used for the Kafka certificates.
|
||||
readonly: True
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
trustpass:
|
||||
description: The password used for the Kafka truststore.
|
||||
readonly: True
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
broker:
|
||||
auto_x_create_x_topics_x_enable:
|
||||
description: Enable the auto creation of topics.
|
||||
title: auto.create.topics.enable
|
||||
forcedType: bool
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
default_x_replication_x_factor:
|
||||
description: The default replication factor for automatically created topics. This value must be less than the amount of brokers in the cluster. Hosts specified in controllers should not be counted towards total broker count.
|
||||
title: default.replication.factor
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
inter_x_broker_x_listener_x_name:
|
||||
description: The name of the listener used for inter-broker communication.
|
||||
title: inter.broker.listener.name
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
listeners:
|
||||
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
listener_x_security_x_protocol_x_map:
|
||||
description: Comma-seperated mapping of listener name and security protocols.
|
||||
title: listener.security.protocol.map
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
log_x_dirs:
|
||||
description: Where Kafka logs are stored within the Docker container.
|
||||
title: log.dirs
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
log_x_retention_x_check_x_interval_x_ms:
|
||||
description: Frequency at which log files are checked if they are qualified for deletion.
|
||||
title: log.retention.check.interval.ms
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
log_x_retention_x_hours:
|
||||
description: How long, in hours, a log file is kept.
|
||||
title: log.retention.hours
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
log_x_segment_x_bytes:
|
||||
description: The maximum allowable size for a log file.
|
||||
title: log.segment.bytes
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
num_x_io_x_threads:
|
||||
description: The number of threads used by Kafka.
|
||||
title: num.io.threads
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
num_x_network_x_threads:
|
||||
description: The number of threads used for network communication.
|
||||
title: num.network.threads
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
num_x_partitions:
|
||||
description: The number of log partitions assigned per topic.
|
||||
title: num.partitions
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
num_x_recovery_x_threads_x_per_x_data_x_dir:
|
||||
description: The number of threads used for log recuperation at startup and purging at shutdown. This ammount of threads is used per data directory.
|
||||
title: num.recovery.threads.per.data.dir
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
offsets_x_topic_x_replication_x_factor:
|
||||
description: The offsets topic replication factor.
|
||||
title: offsets.topic.replication.factor
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
process_x_roles:
|
||||
description: The role performed by Kafka brokers.
|
||||
title: process.roles
|
||||
readonly: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
socket_x_receive_x_buffer_x_bytes:
|
||||
description: Size, in bytes of the SO_RCVBUF buffer. A value of -1 will use the OS default.
|
||||
title: socket.receive.buffer.bytes
|
||||
#forcedType: int - soc needs to allow -1 as an int before we can use this
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
socket_x_request_x_max_x_bytes:
|
||||
description: The maximum bytes allowed for a request to the socket.
|
||||
title: socket.request.max.bytes
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
socket_x_send_x_buffer_x_bytes:
|
||||
description: Size, in bytes of the SO_SNDBUF buffer. A value of -1 will use the OS default.
|
||||
title: socket.send.buffer.byte
|
||||
#forcedType: int - soc needs to allow -1 as an int before we can use this
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_keystore_x_location:
|
||||
description: The key store file location within the Docker container.
|
||||
title: ssl.keystore.location
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_keystore_x_password:
|
||||
description: The key store file password. Invalid for PEM format.
|
||||
title: ssl.keystore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_keystore_x_type:
|
||||
description: The key store file format.
|
||||
title: ssl.keystore.type
|
||||
regex: ^(JKS|PKCS12|PEM)$
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_truststore_x_location:
|
||||
description: The trust store file location within the Docker container.
|
||||
title: ssl.truststore.location
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_truststore_x_type:
|
||||
description: The trust store file format.
|
||||
title: ssl.truststore.type
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_truststore_x_password:
|
||||
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
|
||||
title: ssl.truststore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
transaction_x_state_x_log_x_min_x_isr:
|
||||
description: Overrides min.insync.replicas for the transaction topic. When a producer configures acks to "all" (or "-1"), this setting determines the minimum number of replicas required to acknowledge a write as successful. Failure to meet this minimum triggers an exception (either NotEnoughReplicas or NotEnoughReplicasAfterAppend). When used in conjunction, min.insync.replicas and acks enable stronger durability guarantees. For instance, creating a topic with a replication factor of 3, setting min.insync.replicas to 2, and using acks of "all" ensures that the producer raises an exception if a majority of replicas fail to receive a write.
|
||||
title: transaction.state.log.min.isr
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
transaction_x_state_x_log_x_replication_x_factor:
|
||||
description: Set the replication factor higher for the transaction topic to ensure availability. Internal topic creation will not proceed until the cluster size satisfies this replication factor prerequisite.
|
||||
title: transaction.state.log.replication.factor
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
client:
|
||||
security_x_protocol:
|
||||
description: 'Broker communication protocol. Options are: SASL_SSL, PLAINTEXT, SSL, SASL_PLAINTEXT'
|
||||
title: security.protocol
|
||||
regex: ^(SASL_SSL|PLAINTEXT|SSL|SASL_PLAINTEXT)
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_keystore_x_location:
|
||||
description: The key store file location within the Docker container.
|
||||
title: ssl.keystore.location
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_keystore_x_password:
|
||||
description: The key store file password. Invalid for PEM format.
|
||||
title: ssl.keystore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_keystore_x_type:
|
||||
description: The key store file format.
|
||||
title: ssl.keystore.type
|
||||
regex: ^(JKS|PKCS12|PEM)$
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_truststore_x_location:
|
||||
description: The trust store file location within the Docker container.
|
||||
title: ssl.truststore.location
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_truststore_x_type:
|
||||
description: The trust store file format.
|
||||
title: ssl.truststore.type
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
ssl_x_truststore_x_password:
|
||||
description: The trust store file password. If null, the trust store file is still use, but integrity checking is disabled. Invalid for PEM format.
|
||||
title: ssl.truststore.password
|
||||
sensitive: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
controller:
|
||||
controller_x_listener_x_names:
|
||||
description: Set listeners used by the controller in a comma-seperated list.
|
||||
title: controller.listener.names
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
listeners:
|
||||
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
listener_x_security_x_protocol_x_map:
|
||||
description: Comma-seperated mapping of listener name and security protocols.
|
||||
title: listener.security.protocol.map
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
log_x_dirs:
|
||||
description: Where Kafka logs are stored within the Docker container.
|
||||
title: log.dirs
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
log_x_retention_x_check_x_interval_x_ms:
|
||||
description: Frequency at which log files are checked if they are qualified for deletion.
|
||||
title: log.retention.check.interval.ms
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
log_x_retention_x_hours:
|
||||
description: How long, in hours, a log file is kept.
|
||||
title: log.retention.hours
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
log_x_segment_x_bytes:
|
||||
description: The maximum allowable size for a log file.
|
||||
title: log.segment.bytes
|
||||
forcedType: int
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
process_x_roles:
|
||||
description: The role performed by controller node.
|
||||
title: process.roles
|
||||
readonly: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
external_access:
|
||||
enabled:
|
||||
description: Enables or disables access to Kafka topics using user/password authentication. Used for producing / consuming messages via an external client.
|
||||
forcedType: bool
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
listeners:
|
||||
description: Set of URIs that is listened on and the listener names in a comma-seperated list.
|
||||
title: listeners
|
||||
readonly: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
listener_x_security_x_protocol_x_map:
|
||||
description: External listener name and mapped security protocol.
|
||||
title: listener.security.protocol.map
|
||||
readonly: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
sasl_x_enabled_x_mechanisms:
|
||||
description: SASL/PLAIN is a simple username/password authentication mechanism, used with TLS to implement secure authentication.
|
||||
title: sasl.enabled.mechanisms
|
||||
readonly: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
sasl_x_mechanism_x_inter_x_broker_x_protocol:
|
||||
description: SASL mechanism used for inter-broker communication
|
||||
title: sasl.mechanism.inter.broker.protocol
|
||||
readonly: True
|
||||
advanced: True
|
||||
helpLink: kafka.html
|
||||
helpLink: kafka
|
||||
remote_users:
|
||||
user01: &remote_user
|
||||
username:
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
|
||||
{% from 'allowed_states.map.jinja' import allowed_states %}
|
||||
{% if sls.split('.')[0] in allowed_states %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKER %}
|
||||
{% from 'docker/docker.map.jinja' import DOCKERMERGED %}
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
|
||||
include:
|
||||
@@ -20,20 +20,20 @@ so-kibana:
|
||||
- user: kibana
|
||||
- networks:
|
||||
- sobridge:
|
||||
- ipv4_address: {{ DOCKER.containers['so-kibana'].ip }}
|
||||
- ipv4_address: {{ DOCKERMERGED.containers['so-kibana'].ip }}
|
||||
- environment:
|
||||
- ELASTICSEARCH_HOST={{ GLOBALS.manager }}
|
||||
- ELASTICSEARCH_PORT=9200
|
||||
- MANAGER={{ GLOBALS.manager }}
|
||||
{% if DOCKER.containers['so-kibana'].extra_env %}
|
||||
{% for XTRAENV in DOCKER.containers['so-kibana'].extra_env %}
|
||||
{% if DOCKERMERGED.containers['so-kibana'].extra_env %}
|
||||
{% for XTRAENV in DOCKERMERGED.containers['so-kibana'].extra_env %}
|
||||
- {{ XTRAENV }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- extra_hosts:
|
||||
- {{ GLOBALS.manager }}:{{ GLOBALS.manager_ip }}
|
||||
{% if DOCKER.containers['so-kibana'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKER.containers['so-kibana'].extra_hosts %}
|
||||
{% if DOCKERMERGED.containers['so-kibana'].extra_hosts %}
|
||||
{% for XTRAHOST in DOCKERMERGED.containers['so-kibana'].extra_hosts %}
|
||||
- {{ XTRAHOST }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
@@ -42,15 +42,21 @@ so-kibana:
|
||||
- /opt/so/log/kibana:/var/log/kibana:rw
|
||||
- /opt/so/conf/kibana/customdashboards:/usr/share/kibana/custdashboards:ro
|
||||
- /sys/fs/cgroup:/sys/fs/cgroup:ro
|
||||
{% if DOCKER.containers['so-kibana'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKER.containers['so-kibana'].custom_bind_mounts %}
|
||||
{% if DOCKERMERGED.containers['so-kibana'].custom_bind_mounts %}
|
||||
{% for BIND in DOCKERMERGED.containers['so-kibana'].custom_bind_mounts %}
|
||||
- {{ BIND }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- port_bindings:
|
||||
{% for BINDING in DOCKER.containers['so-kibana'].port_bindings %}
|
||||
{% for BINDING in DOCKERMERGED.containers['so-kibana'].port_bindings %}
|
||||
- {{ BINDING }}
|
||||
{% endfor %}
|
||||
{% if DOCKERMERGED.containers['so-kibana'].ulimits %}
|
||||
- ulimits:
|
||||
{% for ULIMIT in DOCKERMERGED.containers['so-kibana'].ulimits %}
|
||||
- {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
- watch:
|
||||
- file: kibanaconfig
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@
|
||||
|
||||
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
||||
{% import_yaml 'kibana/defaults.yaml' as KIBANADEFAULTS with context %}
|
||||
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
|
||||
|
||||
{% do KIBANADEFAULTS.kibana.config.server.update({'publicBaseUrl': 'https://' ~ GLOBALS.url_base ~ '/kibana'}) %}
|
||||
{% do KIBANADEFAULTS.kibana.config.elasticsearch.update({'hosts': ['https://' ~ GLOBALS.manager ~ ':9200']}) %}
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
# https://securityonion.net/license; you may not use this file except in compliance with the
|
||||
# Elastic License 2.0.
|
||||
|
||||
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
|
||||
include:
|
||||
- kibana.enabled
|
||||
|
||||
@@ -29,27 +28,3 @@ so-kibana-dashboard-load:
|
||||
- require:
|
||||
- sls: kibana.enabled
|
||||
- file: dashboard_saved_objects_template
|
||||
{%- if HIGHLANDER %}
|
||||
dashboard_saved_objects_template_hl:
|
||||
file.managed:
|
||||
- name: /opt/so/conf/kibana/hl.ndjson.template
|
||||
- source: salt://kibana/files/hl.ndjson
|
||||
- user: 932
|
||||
- group: 939
|
||||
- show_changes: False
|
||||
|
||||
dashboard_saved_objects_hl_changes:
|
||||
file.absent:
|
||||
- names:
|
||||
- /opt/so/state/kibana_hl.txt
|
||||
- onchanges:
|
||||
- file: dashboard_saved_objects_template_hl
|
||||
|
||||
so-kibana-dashboard-load_hl:
|
||||
cmd.run:
|
||||
- name: /usr/sbin/so-kibana-config-load -i /opt/so/conf/kibana/hl.ndjson.template
|
||||
- cwd: /opt/so
|
||||
- require:
|
||||
- sls: kibana.enabled
|
||||
- file: dashboard_saved_objects_template_hl
|
||||
{%- endif %}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user