diff --git a/.github/.gitleaks.toml b/.github/.gitleaks.toml new file mode 100644 index 000000000..3ac46149b --- /dev/null +++ b/.github/.gitleaks.toml @@ -0,0 +1,546 @@ +title = "gitleaks config" + +# Gitleaks rules are defined by regular expressions and entropy ranges. +# Some secrets have unique signatures which make detecting those secrets easy. +# Examples of those secrets would be GitLab Personal Access Tokens, AWS keys, and GitHub Access Tokens. +# All these examples have defined prefixes like `glpat`, `AKIA`, `ghp_`, etc. +# +# Other secrets might just be a hash which means we need to write more complex rules to verify +# that what we are matching is a secret. +# +# Here is an example of a semi-generic secret +# +# discord_client_secret = "8dyfuiRyq=vVc3RRr_edRk-fK__JItpZ" +# +# We can write a regular expression to capture the variable name (identifier), +# the assignment symbol (like '=' or ':='), and finally the actual secret. +# The structure of a rule to match this example secret is below: +# +# Beginning string +# quotation +# │ End string quotation +# │ │ +# ▼ ▼ +# (?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_\-]{32})['\"] +# +# ▲ ▲ ▲ +# │ │ │ +# │ │ │ +# identifier assignment symbol +# Secret +# +[[rules]] +id = "gitlab-pat" +description = "GitLab Personal Access Token" +regex = '''glpat-[0-9a-zA-Z\-\_]{20}''' + +[[rules]] +id = "aws-access-token" +description = "AWS" +regex = '''(A3T[A-Z0-9]|AKIA|AGPA|AIDA|AROA|AIPA|ANPA|ANVA|ASIA)[A-Z0-9]{16}''' + +# Cryptographic keys +[[rules]] +id = "PKCS8-PK" +description = "PKCS8 private key" +regex = '''-----BEGIN PRIVATE KEY-----''' + +[[rules]] +id = "RSA-PK" +description = "RSA private key" +regex = '''-----BEGIN RSA PRIVATE KEY-----''' + +[[rules]] +id = "OPENSSH-PK" +description = "SSH private key" +regex = '''-----BEGIN OPENSSH PRIVATE KEY-----''' + +[[rules]] +id = "PGP-PK" +description = "PGP private key" +regex = '''-----BEGIN PGP PRIVATE KEY BLOCK-----''' + +[[rules]] +id = "github-pat" +description = "GitHub Personal Access Token" +regex = '''ghp_[0-9a-zA-Z]{36}''' + +[[rules]] +id = "github-oauth" +description = "GitHub OAuth Access Token" +regex = '''gho_[0-9a-zA-Z]{36}''' + +[[rules]] +id = "SSH-DSA-PK" +description = "SSH (DSA) private key" +regex = '''-----BEGIN DSA PRIVATE KEY-----''' + +[[rules]] +id = "SSH-EC-PK" +description = "SSH (EC) private key" +regex = '''-----BEGIN EC PRIVATE KEY-----''' + + +[[rules]] +id = "github-app-token" +description = "GitHub App Token" +regex = '''(ghu|ghs)_[0-9a-zA-Z]{36}''' + +[[rules]] +id = "github-refresh-token" +description = "GitHub Refresh Token" +regex = '''ghr_[0-9a-zA-Z]{76}''' + +[[rules]] +id = "shopify-shared-secret" +description = "Shopify shared secret" +regex = '''shpss_[a-fA-F0-9]{32}''' + +[[rules]] +id = "shopify-access-token" +description = "Shopify access token" +regex = '''shpat_[a-fA-F0-9]{32}''' + +[[rules]] +id = "shopify-custom-access-token" +description = "Shopify custom app access token" +regex = '''shpca_[a-fA-F0-9]{32}''' + +[[rules]] +id = "shopify-private-app-access-token" +description = "Shopify private app access token" +regex = '''shppa_[a-fA-F0-9]{32}''' + +[[rules]] +id = "slack-access-token" +description = "Slack token" +regex = '''xox[baprs]-([0-9a-zA-Z]{10,48})?''' + +[[rules]] +id = "stripe-access-token" +description = "Stripe" +regex = '''(?i)(sk|pk)_(test|live)_[0-9a-z]{10,32}''' + +[[rules]] +id = "pypi-upload-token" +description = "PyPI upload token" +regex = '''pypi-AgEIcHlwaS5vcmc[A-Za-z0-9\-_]{50,1000}''' + +[[rules]] +id = "gcp-service-account" +description = "Google (GCP) Service-account" +regex = '''\"type\": \"service_account\"''' + +[[rules]] +id = "heroku-api-key" +description = "Heroku API Key" +regex = ''' (?i)(heroku[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9A-F]{8}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{4}-[0-9A-F]{12})['\"]''' +secretGroup = 3 + +[[rules]] +id = "slack-web-hook" +description = "Slack Webhook" +regex = '''https://hooks.slack.com/services/T[a-zA-Z0-9_]{8}/B[a-zA-Z0-9_]{8,12}/[a-zA-Z0-9_]{24}''' + +[[rules]] +id = "twilio-api-key" +description = "Twilio API Key" +regex = '''SK[0-9a-fA-F]{32}''' + +[[rules]] +id = "age-secret-key" +description = "Age secret key" +regex = '''AGE-SECRET-KEY-1[QPZRY9X8GF2TVDW0S3JN54KHCE6MUA7L]{58}''' + +[[rules]] +id = "facebook-token" +description = "Facebook token" +regex = '''(?i)(facebook[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]''' +secretGroup = 3 + +[[rules]] +id = "twitter-token" +description = "Twitter token" +regex = '''(?i)(twitter[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{35,44})['\"]''' +secretGroup = 3 + +[[rules]] +id = "adobe-client-id" +description = "Adobe Client ID (Oauth Web)" +regex = '''(?i)(adobe[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]''' +secretGroup = 3 + +[[rules]] +id = "adobe-client-secret" +description = "Adobe Client Secret" +regex = '''(p8e-)(?i)[a-z0-9]{32}''' + +[[rules]] +id = "alibaba-access-key-id" +description = "Alibaba AccessKey ID" +regex = '''(LTAI)(?i)[a-z0-9]{20}''' + +[[rules]] +id = "alibaba-secret-key" +description = "Alibaba Secret Key" +regex = '''(?i)(alibaba[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{30})['\"]''' +secretGroup = 3 + +[[rules]] +id = "asana-client-id" +description = "Asana Client ID" +regex = '''(?i)(asana[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9]{16})['\"]''' +secretGroup = 3 + +[[rules]] +id = "asana-client-secret" +description = "Asana Client Secret" +regex = '''(?i)(asana[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{32})['\"]''' +secretGroup = 3 + +[[rules]] +id = "atlassian-api-token" +description = "Atlassian API token" +regex = '''(?i)(atlassian[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{24})['\"]''' +secretGroup = 3 + +[[rules]] +id = "bitbucket-client-id" +description = "Bitbucket client ID" +regex = '''(?i)(bitbucket[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{32})['\"]''' +secretGroup = 3 + +[[rules]] +id = "bitbucket-client-secret" +description = "Bitbucket client secret" +regex = '''(?i)(bitbucket[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9_\-]{64})['\"]''' +secretGroup = 3 + +[[rules]] +id = "beamer-api-token" +description = "Beamer API token" +regex = '''(?i)(beamer[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](b_[a-z0-9=_\-]{44})['\"]''' +secretGroup = 3 + +[[rules]] +id = "clojars-api-token" +description = "Clojars API token" +regex = '''(CLOJARS_)(?i)[a-z0-9]{60}''' + +[[rules]] +id = "contentful-delivery-api-token" +description = "Contentful delivery API token" +regex = '''(?i)(contentful[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9\-=_]{43})['\"]''' +secretGroup = 3 + +[[rules]] +id = "databricks-api-token" +description = "Databricks API token" +regex = '''dapi[a-h0-9]{32}''' + +[[rules]] +id = "discord-api-token" +description = "Discord API key" +regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{64})['\"]''' +secretGroup = 3 + +[[rules]] +id = "discord-client-id" +description = "Discord client ID" +regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9]{18})['\"]''' +secretGroup = 3 + +[[rules]] +id = "discord-client-secret" +description = "Discord client secret" +regex = '''(?i)(discord[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_\-]{32})['\"]''' +secretGroup = 3 + +[[rules]] +id = "doppler-api-token" +description = "Doppler API token" +regex = '''['\"](dp\.pt\.)(?i)[a-z0-9]{43}['\"]''' + +[[rules]] +id = "dropbox-api-secret" +description = "Dropbox API secret/key" +regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{15})['\"]''' + +[[rules]] +id = "dropbox--api-key" +description = "Dropbox API secret/key" +regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{15})['\"]''' + +[[rules]] +id = "dropbox-short-lived-api-token" +description = "Dropbox short lived API token" +regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](sl\.[a-z0-9\-=_]{135})['\"]''' + +[[rules]] +id = "dropbox-long-lived-api-token" +description = "Dropbox long lived API token" +regex = '''(?i)(dropbox[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"][a-z0-9]{11}(AAAAAAAAAA)[a-z0-9\-_=]{43}['\"]''' + +[[rules]] +id = "duffel-api-token" +description = "Duffel API token" +regex = '''['\"]duffel_(test|live)_(?i)[a-z0-9_-]{43}['\"]''' + +[[rules]] +id = "dynatrace-api-token" +description = "Dynatrace API token" +regex = '''['\"]dt0c01\.(?i)[a-z0-9]{24}\.[a-z0-9]{64}['\"]''' + +[[rules]] +id = "easypost-api-token" +description = "EasyPost API token" +regex = '''['\"]EZAK(?i)[a-z0-9]{54}['\"]''' + +[[rules]] +id = "easypost-test-api-token" +description = "EasyPost test API token" +regex = '''['\"]EZTK(?i)[a-z0-9]{54}['\"]''' + +[[rules]] +id = "fastly-api-token" +description = "Fastly API token" +regex = '''(?i)(fastly[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9\-=_]{32})['\"]''' +secretGroup = 3 + +[[rules]] +id = "finicity-client-secret" +description = "Finicity client secret" +regex = '''(?i)(finicity[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{20})['\"]''' +secretGroup = 3 + +[[rules]] +id = "finicity-api-token" +description = "Finicity API token" +regex = '''(?i)(finicity[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]''' +secretGroup = 3 + +[[rules]] +id = "flutterwave-public-key" +description = "Flutterwave public key" +regex = '''FLWPUBK_TEST-(?i)[a-h0-9]{32}-X''' + +[[rules]] +id = "flutterwave-secret-key" +description = "Flutterwave secret key" +regex = '''FLWSECK_TEST-(?i)[a-h0-9]{32}-X''' + +[[rules]] +id = "flutterwave-enc-key" +description = "Flutterwave encrypted key" +regex = '''FLWSECK_TEST[a-h0-9]{12}''' + +[[rules]] +id = "frameio-api-token" +description = "Frame.io API token" +regex = '''fio-u-(?i)[a-z0-9\-_=]{64}''' + +[[rules]] +id = "gocardless-api-token" +description = "GoCardless API token" +regex = '''['\"]live_(?i)[a-z0-9\-_=]{40}['\"]''' + +[[rules]] +id = "grafana-api-token" +description = "Grafana API token" +regex = '''['\"]eyJrIjoi(?i)[a-z0-9\-_=]{72,92}['\"]''' + +[[rules]] +id = "hashicorp-tf-api-token" +description = "HashiCorp Terraform user/org API token" +regex = '''['\"](?i)[a-z0-9]{14}\.atlasv1\.[a-z0-9\-_=]{60,70}['\"]''' + +[[rules]] +id = "hubspot-api-token" +description = "HubSpot API token" +regex = '''(?i)(hubspot[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]''' +secretGroup = 3 + +[[rules]] +id = "intercom-api-token" +description = "Intercom API token" +regex = '''(?i)(intercom[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9=_]{60})['\"]''' +secretGroup = 3 + +[[rules]] +id = "intercom-client-secret" +description = "Intercom client secret/ID" +regex = '''(?i)(intercom[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]''' +secretGroup = 3 + +[[rules]] +id = "ionic-api-token" +description = "Ionic API token" +regex = '''(?i)(ionic[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](ion_[a-z0-9]{42})['\"]''' + +[[rules]] +id = "linear-api-token" +description = "Linear API token" +regex = '''lin_api_(?i)[a-z0-9]{40}''' + +[[rules]] +id = "linear-client-secret" +description = "Linear client secret/ID" +regex = '''(?i)(linear[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32})['\"]''' +secretGroup = 3 + +[[rules]] +id = "lob-api-key" +description = "Lob API Key" +regex = '''(?i)(lob[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]((live|test)_[a-f0-9]{35})['\"]''' +secretGroup = 3 + +[[rules]] +id = "lob-pub-api-key" +description = "Lob Publishable API Key" +regex = '''(?i)(lob[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]((test|live)_pub_[a-f0-9]{31})['\"]''' +secretGroup = 3 + +[[rules]] +id = "mailchimp-api-key" +description = "Mailchimp API key" +regex = '''(?i)(mailchimp[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-f0-9]{32}-us20)['\"]''' +secretGroup = 3 + +[[rules]] +id = "mailgun-private-api-token" +description = "Mailgun private API token" +regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](key-[a-f0-9]{32})['\"]''' +secretGroup = 3 + +[[rules]] +id = "mailgun-pub-key" +description = "Mailgun public validation key" +regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"](pubkey-[a-f0-9]{32})['\"]''' +secretGroup = 3 + +[[rules]] +id = "mailgun-signing-key" +description = "Mailgun webhook signing key" +regex = '''(?i)(mailgun[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{32}-[a-h0-9]{8}-[a-h0-9]{8})['\"]''' +secretGroup = 3 + +[[rules]] +id = "mapbox-api-token" +description = "Mapbox API token" +regex = '''(?i)(pk\.[a-z0-9]{60}\.[a-z0-9]{22})''' + +[[rules]] +id = "messagebird-api-token" +description = "MessageBird API token" +regex = '''(?i)(messagebird[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{25})['\"]''' +secretGroup = 3 + +[[rules]] +id = "messagebird-client-id" +description = "MessageBird API client ID" +regex = '''(?i)(messagebird[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-h0-9]{8}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{4}-[a-h0-9]{12})['\"]''' +secretGroup = 3 + +[[rules]] +id = "new-relic-user-api-key" +description = "New Relic user API Key" +regex = '''['\"](NRAK-[A-Z0-9]{27})['\"]''' + +[[rules]] +id = "new-relic-user-api-id" +description = "New Relic user API ID" +regex = '''(?i)(newrelic[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([A-Z0-9]{64})['\"]''' +secretGroup = 3 + +[[rules]] +id = "new-relic-browser-api-token" +description = "New Relic ingest browser API token" +regex = '''['\"](NRJS-[a-f0-9]{19})['\"]''' + +[[rules]] +id = "npm-access-token" +description = "npm access token" +regex = '''['\"](npm_(?i)[a-z0-9]{36})['\"]''' + +[[rules]] +id = "planetscale-password" +description = "PlanetScale password" +regex = '''pscale_pw_(?i)[a-z0-9\-_\.]{43}''' + +[[rules]] +id = "planetscale-api-token" +description = "PlanetScale API token" +regex = '''pscale_tkn_(?i)[a-z0-9\-_\.]{43}''' + +[[rules]] +id = "postman-api-token" +description = "Postman API token" +regex = '''PMAK-(?i)[a-f0-9]{24}\-[a-f0-9]{34}''' + +[[rules]] +id = "pulumi-api-token" +description = "Pulumi API token" +regex = '''pul-[a-f0-9]{40}''' + +[[rules]] +id = "rubygems-api-token" +description = "Rubygem API token" +regex = '''rubygems_[a-f0-9]{48}''' + +[[rules]] +id = "sendgrid-api-token" +description = "SendGrid API token" +regex = '''SG\.(?i)[a-z0-9_\-\.]{66}''' + +[[rules]] +id = "sendinblue-api-token" +description = "Sendinblue API token" +regex = '''xkeysib-[a-f0-9]{64}\-(?i)[a-z0-9]{16}''' + +[[rules]] +id = "shippo-api-token" +description = "Shippo API token" +regex = '''shippo_(live|test)_[a-f0-9]{40}''' + +[[rules]] +id = "linkedin-client-secret" +description = "LinkedIn Client secret" +regex = '''(?i)(linkedin[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z]{16})['\"]''' +secretGroup = 3 + +[[rules]] +id = "linkedin-client-id" +description = "LinkedIn Client ID" +regex = '''(?i)(linkedin[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{14})['\"]''' +secretGroup = 3 + +[[rules]] +id = "twitch-api-token" +description = "Twitch API token" +regex = '''(?i)(twitch[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([a-z0-9]{30})['\"]''' +secretGroup = 3 + +[[rules]] +id = "typeform-api-token" +description = "Typeform API token" +regex = '''(?i)(typeform[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}(tfp_[a-z0-9\-_\.=]{59})''' +secretGroup = 3 + +[[rules]] +id = "generic-api-key" +description = "Generic API Key" +regex = '''(?i)((key|api[^Version]|token|secret|password)[a-z0-9_ .\-,]{0,25})(=|>|:=|\|\|:|<=|=>|:).{0,5}['\"]([0-9a-zA-Z\-_=]{8,64})['\"]''' +entropy = 3.7 +secretGroup = 4 + + +[allowlist] +description = "global allow lists" +regexes = ['''219-09-9999''', '''078-05-1120''', '''(9[0-9]{2}|666)-\d{2}-\d{4}'''] +paths = [ + '''gitleaks.toml''', + '''(.*?)(jpg|gif|doc|pdf|bin|svg|socket)$''', + '''(go.mod|go.sum)$''', + + '''salt/nginx/files/enterprise-attack.json''' +] diff --git a/.github/workflows/leaktest.yml b/.github/workflows/leaktest.yml index 590e220d0..c2f7f8010 100644 --- a/.github/workflows/leaktest.yml +++ b/.github/workflows/leaktest.yml @@ -13,3 +13,5 @@ jobs: - name: Gitleaks uses: zricethezav/gitleaks-action@master + with: + config-path: .github/.gitleaks.toml diff --git a/HOTFIX b/HOTFIX index 644f9e9ee..d3f5a12fa 100644 --- a/HOTFIX +++ b/HOTFIX @@ -1 +1 @@ -04012022 04052022 04072022 + diff --git a/README.md b/README.md index 0dff3fba4..0662e05be 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -## Security Onion 2.3.110 +## Security Onion 2.3.120 -Security Onion 2.3.110 is here! +Security Onion 2.3.120 is here! ## Screenshots diff --git a/VERIFY_ISO.md b/VERIFY_ISO.md index c8e0158f9..ce56cd48c 100644 --- a/VERIFY_ISO.md +++ b/VERIFY_ISO.md @@ -1,18 +1,18 @@ -### 2.3.110-20220407 ISO image built on 2022/04/07 +### 2.3.120-20220425 ISO image built on 2022/04/25 ### Download and Verify -2.3.110-20220407 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.3.110-20220407.iso +2.3.120-20220425 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.3.120-20220425.iso -MD5: 928D589709731EFE9942CA134A6F4C6B -SHA1: CA588A684586CC0D5BDE5E0E41C935FFB939B6C7 -SHA256: CBF8743838AF2C7323E629FB6B28D5DD00AE6658B0E29E4D0916411D2D526BD2 +MD5: C99729E452B064C471BEF04532F28556 +SHA1: 60BF07D5347C24568C7B793BFA9792E98479CFBF +SHA256: CD17D0D7CABE21D45FA45E1CF91C5F24EB9608C79FF88480134E5592AFDD696E Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.110-20220407.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.120-20220425.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/master/KEYS @@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/ma Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.110-20220407.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/master/sigs/securityonion-2.3.120-20220425.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.3.110-20220407.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.3.120-20220425.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.3.110-20220407.iso.sig securityonion-2.3.110-20220407.iso +gpg --verify securityonion-2.3.120-20220425.iso.sig securityonion-2.3.120-20220425.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Thu 07 Apr 2022 03:30:03 PM EDT using RSA key ID FE507013 +gpg: Signature made Mon 25 Apr 2022 08:20:40 AM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. diff --git a/VERSION b/VERSION index 18161cf24..fb75c1af3 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.3.110 +2.3.120 diff --git a/pillar/elasticsearch/index_templates.sls b/pillar/elasticsearch/index_templates.sls new file mode 100644 index 000000000..a02a1818c --- /dev/null +++ b/pillar/elasticsearch/index_templates.sls @@ -0,0 +1,2 @@ +elasticsearch: + index_settings: diff --git a/pillar/logstash/search.sls b/pillar/logstash/search.sls index 917657e1f..ebe133056 100644 --- a/pillar/logstash/search.sls +++ b/pillar/logstash/search.sls @@ -13,4 +13,5 @@ logstash: - so/9600_output_ossec.conf.jinja - so/9700_output_strelka.conf.jinja - so/9800_output_logscan.conf.jinja + - so/9801_output_rita.conf.jinja - so/9900_output_endgame.conf.jinja diff --git a/pillar/top.sls b/pillar/top.sls index 8ab666d0d..1cf3bdc8a 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -15,12 +15,12 @@ base: - logstash - logstash.manager - logstash.search - - elasticsearch.search + - elasticsearch.index_templates '*_manager': - logstash - logstash.manager - - elasticsearch.manager + - elasticsearch.index_templates '*_manager or *_managersearch': - match: compound @@ -46,7 +46,7 @@ base: - zeeklogs - secrets - healthcheck.eval - - elasticsearch.eval + - elasticsearch.index_templates {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} - elasticsearch.auth {% endif %} @@ -60,7 +60,7 @@ base: - logstash - logstash.manager - logstash.search - - elasticsearch.search + - elasticsearch.index_templates {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} - elasticsearch.auth {% endif %} @@ -106,7 +106,7 @@ base: '*_searchnode': - logstash - logstash.search - - elasticsearch.search + - elasticsearch.index_templates - elasticsearch.auth - global - minions.{{ grains.id }} @@ -122,7 +122,7 @@ base: '*_import': - zeeklogs - secrets - - elasticsearch.eval + - elasticsearch.index_templates {% if salt['file.file_exists']('/opt/so/saltstack/local/pillar/elasticsearch/auth.sls') %} - elasticsearch.auth {% endif %} @@ -131,3 +131,6 @@ base: {% endif %} - global - minions.{{ grains.id }} + + '*_workstation': + - minions.{{ grains.id }} diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 36fd86321..3dbc6d24a 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -1,6 +1,5 @@ {% set ZEEKVER = salt['pillar.get']('global:mdengine', '') %} {% set WAZUH = salt['pillar.get']('global:wazuh', '0') %} -{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %} {% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %} {% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %} {% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %} @@ -218,6 +217,8 @@ 'schedule', 'docker_clean' ], + 'so-workstation': [ + ], }, grain='role') %} {% if FILEBEAT and grains.role in ['so-helixsensor', 'so-eval', 'so-manager', 'so-standalone', 'so-node', 'so-managersearch', 'so-heavynode', 'so-import', 'so-receiver'] %} @@ -273,10 +274,6 @@ {% do allowed_states.append('elastalert') %} {% endif %} - {% if (THEHIVE != 0) and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %} - {% do allowed_states.append('thehive') %} - {% endif %} - {% if (PLAYBOOK !=0) and grains.role in ['so-eval', 'so-manager', 'so-standalone', 'so-managersearch'] %} {% do allowed_states.append('playbook') %} {% endif %} diff --git a/salt/common/init.sls b/salt/common/init.sls index d1acca878..0eaf5e77e 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -300,8 +300,17 @@ sostatus_log: - month: '*' - dayweek: '*' - {% if role in ['eval', 'manager', 'managersearch', 'standalone'] %} +# Install cron job to determine size of influxdb for telegraf +'du -s -k /nsm/influxdb | cut -f1 > /opt/so/log/telegraf/influxdb_size.log 2>&1': + cron.present: + - user: root + - minute: '*/1' + - hour: '*' + - daymonth: '*' + - month: '*' + - dayweek: '*' + # Lock permissions on the backup directory backupdir: file.directory: diff --git a/salt/common/tools/sbin/so-analyst-install b/salt/common/tools/sbin/so-analyst-install index 6917725fc..12b940897 100755 --- a/salt/common/tools/sbin/so-analyst-install +++ b/salt/common/tools/sbin/so-analyst-install @@ -15,295 +15,86 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -if [ "$(id -u)" -ne 0 ]; then - echo "This script must be run using sudo!" - exit 1 -fi +doc_workstation_url="https://docs.securityonion.net/en/2.3/analyst-vm.html" +{# we only want the script to install the workstation if it is CentOS -#} +{% if grains.os == 'CentOS' -%} +{# if this is a manager -#} +{% if grains.master == grains.id.split('_')|first -%} -INSTALL_LOG=/root/so-analyst-install.log -exec &> >(tee -a "$INSTALL_LOG") +source /usr/sbin/so-common +pillar_file="/opt/so/saltstack/local/pillar/minions/{{grains.id}}.sls" -log() { - msg=$1 - level=${2:-I} - now=$(TZ=GMT date +"%Y-%m-%dT%H:%M:%SZ") - echo -e "$now | $level | $msg" >> "$INSTALL_LOG" 2>&1 -} +if [ -f "$pillar_file" ]; then + if ! grep -q "^workstation:$" "$pillar_file"; then -error() { - log "$1" "E" -} - -info() { - log "$1" "I" -} - -title() { - echo -e "\n-----------------------------\n $1\n-----------------------------\n" >> "$INSTALL_LOG" 2>&1 -} - -logCmd() { - cmd=$1 - info "Executing command: $cmd" - $cmd >> "$INSTALL_LOG" 2>&1 -} - -analyze_system() { - title "System Characteristics" - logCmd "uptime" - logCmd "uname -a" - logCmd "free -h" - logCmd "lscpu" - logCmd "df -h" - logCmd "ip a" -} - -analyze_system - -OS=$(grep PRETTY_NAME /etc/os-release | grep 'CentOS Linux 7') -if [ $? -ne 0 ]; then - echo "This is an unsupported OS. Please use CentOS 7 to install the analyst node." - exit 1 -fi - -if [[ "$manufacturer" == "Security Onion Solutions" && "$family" == "Automated" ]]; then - INSTALL=yes - CURLCONTINUE=no -else - INSTALL='' - CURLCONTINUE='' -fi - -FIRSTPASS=yes -while [[ $INSTALL != "yes" ]] && [[ $INSTALL != "no" ]]; do - if [[ "$FIRSTPASS" == "yes" ]]; then - clear - echo "###########################################" - echo "## ** W A R N I N G ** ##" - echo "## _______________________________ ##" - echo "## ##" - echo "## Installing the Security Onion ##" - echo "## analyst node on this device will ##" - echo "## make permanent changes to ##" - echo "## the system. ##" - echo "## ##" - echo "###########################################" - echo "Do you wish to continue? (Type the entire word 'yes' to proceed or 'no' to exit)" - FIRSTPASS=no - else - echo "Please type 'yes' to continue or 'no' to exit." - fi - read INSTALL -done - -if [[ $INSTALL == "no" ]]; then - echo "Exiting analyst node installation." - exit 0 -fi - -echo "Testing for internet connection with curl https://securityonionsolutions.com/" -CANCURL=$(curl -sI https://securityonionsolutions.com/ | grep "200 OK") - if [ $? -ne 0 ]; then FIRSTPASS=yes - while [[ $CURLCONTINUE != "yes" ]] && [[ $CURLCONTINUE != "no" ]]; do + while [[ $INSTALL != "yes" ]] && [[ $INSTALL != "no" ]]; do if [[ "$FIRSTPASS" == "yes" ]]; then - echo "We could not access https://securityonionsolutions.com/." - echo "Since packages are downloaded from the internet, internet access is required." - echo "If you would like to ignore this warning and continue anyway, please type 'yes'." - echo "Otherwise, type 'no' to exit." + echo "###########################################" + echo "## ** W A R N I N G ** ##" + echo "## _______________________________ ##" + echo "## ##" + echo "## Installing the Security Onion ##" + echo "## analyst node on this device will ##" + echo "## make permanent changes to ##" + echo "## the system. ##" + echo "## A system reboot will be required ##" + echo "## to complete the install. ##" + echo "## ##" + echo "###########################################" + echo "Do you wish to continue? (Type the entire word 'yes' to proceed or 'no' to exit)" FIRSTPASS=no else echo "Please type 'yes' to continue or 'no' to exit." - fi - read CURLCONTINUE + fi + read INSTALL done - if [[ "$CURLCONTINUE" == "no" ]]; then + + if [[ $INSTALL == "no" ]]; then echo "Exiting analyst node installation." exit 0 fi - else - echo "We were able to curl https://securityonionsolutions.com/." - sleep 3 + + # Add workstation pillar to the minion's pillar file + printf '%s\n'\ + "workstation:"\ + " gui:"\ + " enabled: true"\ + "" >> "$pillar_file" + echo "Applying the workstation state. This could take some time since there are many packages that need to be installed." + if salt-call state.apply workstation -linfo queue=True; then # make sure the state ran successfully + echo "" + echo "Analyst workstation has been installed!" + echo "Press ENTER to reboot or Ctrl-C to cancel." + read pause + + reboot; + else + echo "There was an issue applying the workstation state. Please review the log above or at /opt/so/logs/salt/minion." + fi + else # workstation is already added + echo "The workstation pillar already exists in $pillar_file." + echo "To enable/disable the gui, set 'workstation:gui:enabled' to true or false in $pillar_file." + echo "Additional documentation can be found at $doc_workstation_url." fi - -# Install a GUI text editor -yum -y install gedit - -# Install misc utils -yum -y install wget curl unzip epel-release yum-plugin-versionlock; - -# Install xWindows -yum -y groupinstall "X Window System"; -yum -y install gnome-classic-session gnome-terminal nautilus-open-terminal control-center liberation-mono-fonts; -unlink /etc/systemd/system/default.target; -ln -sf /lib/systemd/system/graphical.target /etc/systemd/system/default.target; -yum -y install file-roller - -# Install Mono - prereq for NetworkMiner -yum -y install mono-core mono-basic mono-winforms expect - -# Install NetworkMiner -yum -y install libcanberra-gtk2; -wget https://www.netresec.com/?download=NetworkMiner -O /tmp/nm.zip; -mkdir -p /opt/networkminer/ -unzip /tmp/nm.zip -d /opt/networkminer/; -rm /tmp/nm.zip; -mv /opt/networkminer/NetworkMiner_*/* /opt/networkminer/ -chmod +x /opt/networkminer/NetworkMiner.exe; -chmod -R go+w /opt/networkminer/AssembledFiles/; -chmod -R go+w /opt/networkminer/Captures/; -# Create networkminer shim -cat << EOF >> /bin/networkminer -#!/bin/bash -/bin/mono /opt/networkminer/NetworkMiner.exe --noupdatecheck "\$@" -EOF -chmod +x /bin/networkminer -# Convert networkminer ico file to png format -yum -y install ImageMagick -convert /opt/networkminer/networkminericon.ico /opt/networkminer/networkminericon.png -# Create menu entry -cat << EOF >> /usr/share/applications/networkminer.desktop -[Desktop Entry] -Name=NetworkMiner -Comment=NetworkMiner -Encoding=UTF-8 -Exec=/bin/networkminer %f -Icon=/opt/networkminer/networkminericon-4.png -StartupNotify=true -Terminal=false -X-MultipleArgs=false -Type=Application -MimeType=application/x-pcap; -Categories=Network; -EOF - -# Set default monospace font to Liberation -cat << EOF >> /etc/fonts/local.conf - - - monospace - - - Liberation Mono - - -EOF - -# Install Wireshark for Gnome -yum -y install wireshark-gnome; - -# Install dnsiff -yum -y install dsniff; - -# Install hping3 -yum -y install hping3; - -# Install netsed -yum -y install netsed; - -# Install ngrep -yum -y install ngrep; - -# Install scapy -yum -y install python36-scapy; - -# Install ssldump -yum -y install ssldump; - -# Install tcpdump -yum -y install tcpdump; - -# Install tcpflow -yum -y install tcpflow; - -# Install tcpxtract -yum -y install tcpxtract; - -# Install whois -yum -y install whois; - -# Install foremost -yum -y install https://forensics.cert.org/centos/cert/7/x86_64//foremost-1.5.7-13.1.el7.x86_64.rpm; - -# Install chromium -yum -y install chromium; - -# Install tcpstat -yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-tcpstat-1.5.0/securityonion-tcpstat-1.5.0.rpm; - -# Install tcptrace -yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-tcptrace-6.6.7/securityonion-tcptrace-6.6.7.rpm; - -# Install sslsplit -yum -y install libevent; -yum -y install sslsplit; - -# Install Bit-Twist -yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-bittwist-2.0.0/securityonion-bittwist-2.0.0.rpm; - -# Install chaosreader -yum -y install perl-IO-Compress perl-Net-DNS; -yum -y install https://github.com/Security-Onion-Solutions/securityonion-docker-rpm/releases/download/securityonion-chaosreader-0.95.10/securityonion-chaosreader-0.95.10.rpm; -chmod +x /bin/chaosreader; - -if [ -f ../../files/analyst/README ]; then - cp ../../files/analyst/README /; - cp ../../files/analyst/so-wallpaper.jpg /usr/share/backgrounds/; - cp ../../files/analyst/so-lockscreen.jpg /usr/share/backgrounds/; - cp ../../files/analyst/so-login-logo-dark.svg /usr/share/pixmaps/; -else - cp /opt/so/saltstack/default/salt/common/files/analyst/README /; - cp /opt/so/saltstack/default/salt/common/files/analyst/so-wallpaper.jpg /usr/share/backgrounds/; - cp /opt/so/saltstack/default/salt/common/files/analyst/so-lockscreen.jpg /usr/share/backgrounds/; - cp /opt/so/saltstack/default/salt/common/files/analyst/so-login-logo-dark.svg /usr/share/pixmaps/; +else # if the pillar file doesn't exist + echo "Could not find $pillar_file and add the workstation pillar." fi -# Set background wallpaper -cat << EOF >> /etc/dconf/db/local.d/00-background -# Specify the dconf path -[org/gnome/desktop/background] +{#- if this is not a manager #} +{% else -%} -# Specify the path to the desktop background image file -picture-uri='file:///usr/share/backgrounds/so-wallpaper.jpg' -# Specify one of the rendering options for the background image: -# 'none', 'wallpaper', 'centered', 'scaled', 'stretched', 'zoom', 'spanned' -picture-options='zoom' -# Specify the left or top color when drawing gradients or the solid color -primary-color='000000' -# Specify the right or bottom color when drawing gradients -secondary-color='FFFFFF' -EOF +echo "Since this is not a manager, the pillar values to enable analyst workstation must be set manually. Please view the documentation at $doc_workstation_url." -# Set lock screen -cat << EOF >> /etc/dconf/db/local.d/00-screensaver -[org/gnome/desktop/session] -idle-delay=uint32 180 +{#- endif if this is a manager #} +{% endif -%} -[org/gnome/desktop/screensaver] -lock-enabled=true -lock-delay=uint32 120 -picture-options='zoom' -picture-uri='file:///usr/share/backgrounds/so-lockscreen.jpg' -EOF +{#- if not CentOS #} +{%- else %} -cat << EOF >> /etc/dconf/db/local.d/locks/screensaver -/org/gnome/desktop/session/idle-delay -/org/gnome/desktop/screensaver/lock-enabled -/org/gnome/desktop/screensaver/lock-delay -EOF +echo "The Analyst Workstation can only be installed on CentOS. Please view the documentation at $doc_workstation_url." -# Do not show the user list at login screen -cat << EOF >> /etc/dconf/db/local.d/00-login-screen -[org/gnome/login-screen] -logo='/usr/share/pixmaps/so-login-logo-dark.svg' -disable-user-list=true -EOF +{#- endif grains.os == CentOS #} +{% endif -%} -dconf update; - -echo -echo "Analyst workstation has been installed!" -echo "Press ENTER to reboot or Ctrl-C to cancel." -read pause - -reboot; +exit 0 diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index a7677a754..7b5f29c00 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -120,6 +120,30 @@ check_elastic_license() { fi } +check_salt_master_status() { + local timeout=$1 + echo "Checking if we can talk to the salt master" + salt-call state.show_top concurrent=true + + return +} + +check_salt_minion_status() { + local timeout=$1 + echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 + salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 + local status=$? + if [ $status -gt 0 ]; then + echo " Minion did not respond" >> "$setup_log" 2>&1 + else + echo " Received job response from salt minion" >> "$setup_log" 2>&1 + fi + + return $status +} + + + copy_new_files() { # Copy new files over to the salt dir cd $UPDATE_DIR @@ -367,6 +391,7 @@ run_check_net_err() { exit $exit_code fi } + set_cron_service_name() { if [[ "$OS" == "centos" ]]; then cron_service_name="crond" diff --git a/salt/common/tools/sbin/so-cortex-restart b/salt/common/tools/sbin/so-cortex-restart index 20f978804..3ebf42430 100755 --- a/salt/common/tools/sbin/so-cortex-restart +++ b/salt/common/tools/sbin/so-cortex-restart @@ -17,5 +17,4 @@ . /usr/sbin/so-common -/usr/sbin/so-stop cortex $1 -/usr/sbin/so-start thehive $1 +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-cortex-start b/salt/common/tools/sbin/so-cortex-start index bd651b54a..787393583 100755 --- a/salt/common/tools/sbin/so-cortex-start +++ b/salt/common/tools/sbin/so-cortex-start @@ -17,4 +17,4 @@ . /usr/sbin/so-common -/usr/sbin/so-start thehive $1 +echo "TheHive and its components are no longer part of Security Onion" diff --git a/salt/common/tools/sbin/so-cortex-stop b/salt/common/tools/sbin/so-cortex-stop index 8ffd10136..73745a1fc 100755 --- a/salt/common/tools/sbin/so-cortex-stop +++ b/salt/common/tools/sbin/so-cortex-stop @@ -17,4 +17,4 @@ . /usr/sbin/so-common -/usr/sbin/so-stop cortex $1 +echo "TheHive and its components are no longer part of Security Onion" diff --git a/salt/common/tools/sbin/so-cortex-user-add b/salt/common/tools/sbin/so-cortex-user-add index df7f6b0de..3ebf42430 100755 --- a/salt/common/tools/sbin/so-cortex-user-add +++ b/salt/common/tools/sbin/so-cortex-user-add @@ -17,38 +17,4 @@ . /usr/sbin/so-common -usage() { - echo "Usage: $0 " - echo "" - echo "Adds a new user to Cortex. The new password will be read from STDIN." - exit 1 -} - -if [ $# -ne 1 ]; then - usage -fi - -USER=$1 - -CORTEX_KEY=$(lookup_pillar cortexorguserkey) -CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api" -CORTEX_ORG_NAME=$(lookup_pillar cortexorgname) -CORTEX_USER=$USER - -# Read password for new user from stdin -test -t 0 -if [[ $? == 0 ]]; then - echo "Enter new password:" -fi -read -rs CORTEX_PASS - -# Create new user in Cortex -resp=$(curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_USER\",\"password\" : \"$CORTEX_PASS\" }") -if [[ "$resp" =~ \"status\":\"Ok\" ]]; then - echo "Successfully added user to Cortex." -else - echo "Unable to add user to Cortex; user might already exist." - echo $resp - exit 2 -fi - +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-cortex-user-enable b/salt/common/tools/sbin/so-cortex-user-enable index c20e8b3ad..3ebf42430 100755 --- a/salt/common/tools/sbin/so-cortex-user-enable +++ b/salt/common/tools/sbin/so-cortex-user-enable @@ -17,41 +17,4 @@ . /usr/sbin/so-common -usage() { - echo "Usage: $0 " - echo "" - echo "Enables or disables a user in Cortex." - exit 1 -} - -if [ $# -ne 2 ]; then - usage -fi - -USER=$1 - -CORTEX_KEY=$(lookup_pillar cortexorguserkey) -CORTEX_API_URL="$(lookup_pillar url_base)/cortex/api" -CORTEX_USER=$USER - -case "${2^^}" in - FALSE | NO | 0) - CORTEX_STATUS=Locked - ;; - TRUE | YES | 1) - CORTEX_STATUS=Ok - ;; - *) - usage - ;; -esac - -resp=$(curl -sk -XPATCH -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "https://$CORTEX_API_URL/user/${CORTEX_USER}" -d "{\"status\":\"${CORTEX_STATUS}\" }") -if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then - echo "Successfully updated user in Cortex." -else - echo "Failed to update user in Cortex." - echo $resp - exit 2 -fi - +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-elasticsearch-indices-rw b/salt/common/tools/sbin/so-elasticsearch-indices-rw index 166c4b284..5aa24f91a 100755 --- a/salt/common/tools/sbin/so-elasticsearch-indices-rw +++ b/salt/common/tools/sbin/so-elasticsearch-indices-rw @@ -17,9 +17,7 @@ # along with this program. If not, see . IP={{ salt['grains.get']('ip_interfaces').get(salt['pillar.get']('sensor:mainint', salt['pillar.get']('manager:mainint', salt['pillar.get']('elasticsearch:mainint', salt['pillar.get']('host:mainint')))))[0] }} ESPORT=9200 -THEHIVEESPORT=9400 echo "Removing read only attributes for indices..." echo {{ ELASTICCURL }} -s -k -XPUT -H "Content-Type: application/json" -L https://$IP:9200/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi; -{{ ELASTICCURL }} -XPUT -H "Content-Type: application/json" -L http://$IP:9400/_all/_settings -d '{"index.blocks.read_only_allow_delete": null}' 2>&1 | if grep -q ack; then echo "Index settings updated..."; else echo "There was any issue updating the read-only attribute. Please ensure Elasticsearch is running.";fi; diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 140d4c63b..00d4233d0 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -75,9 +75,6 @@ container_list() { "so-strelka-manager" "so-suricata" "so-telegraf" - "so-thehive" - "so-thehive-cortex" - "so-thehive-es" "so-wazuh" "so-zeek" ) diff --git a/salt/common/tools/sbin/so-playbook-sync b/salt/common/tools/sbin/so-playbook-sync index c63ee38ad..c2d20766e 100755 --- a/salt/common/tools/sbin/so-playbook-sync +++ b/salt/common/tools/sbin/so-playbook-sync @@ -18,7 +18,7 @@ . /usr/sbin/so-common # Check to see if we are already running -IS_RUNNING=$(ps aux | pgrep -f "so-playbook-sync" | wc -l) -[ "$IS_RUNNING" -gt 3 ] && echo "$(date) - Multiple Playbook Sync processes already running...exiting." && exit 0 +NUM_RUNNING=$(pgrep -cf "/bin/bash /usr/sbin/so-playbook-sync") +[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING Playbook sync processes running...exiting." && exit 0 docker exec so-soctopus python3 playbook_play-sync.py diff --git a/salt/common/tools/sbin/so-restart b/salt/common/tools/sbin/so-restart index ecb58c301..dda4baf57 100755 --- a/salt/common/tools/sbin/so-restart +++ b/salt/common/tools/sbin/so-restart @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -# Usage: so-restart filebeat | kibana | playbook | thehive +# Usage: so-restart filebeat | kibana | playbook . /usr/sbin/so-common @@ -31,7 +31,6 @@ if [ $# -ge 1 ]; then fi case $1 in - "cortex") docker stop so-thehive-cortex so-thehive && docker rm so-thehive-cortex so-thehive && salt-call state.apply hive queue=True;; "steno") docker stop so-steno && docker rm so-steno && salt-call state.apply pcap queue=True;; *) docker stop so-$1 ; docker rm so-$1 ; salt-call state.apply $1 queue=True;; esac diff --git a/salt/common/tools/sbin/so-saltstack-update b/salt/common/tools/sbin/so-saltstack-update index e68af9a7d..81b00ace5 100755 --- a/salt/common/tools/sbin/so-saltstack-update +++ b/salt/common/tools/sbin/so-saltstack-update @@ -32,11 +32,17 @@ copy_new_files() { # Copy new files over to the salt dir cd /tmp/sogh/securityonion git checkout $BRANCH + VERSION=$(cat VERSION) + # We need to overwrite if there is a repo file + if [ -d /opt/so/repo ]; then + tar -czf /opt/so/repo/"$VERSION".tar.gz -C "$(pwd)/.." . + fi rsync -a salt $default_salt_dir/ rsync -a pillar $default_salt_dir/ chown -R socore:socore $default_salt_dir/salt chown -R socore:socore $default_salt_dir/pillar chmod 755 $default_salt_dir/pillar/firewall/addfirewall.sh + rm -rf /tmp/sogh } diff --git a/salt/common/tools/sbin/so-sensor-clean b/salt/common/tools/sbin/so-sensor-clean index 3b871ad80..624ff8106 100755 --- a/salt/common/tools/sbin/so-sensor-clean +++ b/salt/common/tools/sbin/so-sensor-clean @@ -115,8 +115,8 @@ clean() { } # Check to see if we are already running -IS_RUNNING=$(ps aux | pgrep -f "so-sensor-clean" | wc -l) -[ "$IS_RUNNING" -gt 3 ] && echo "$(date) - $IS_RUNNING sensor clean script processes running...exiting." >>$LOG && exit 0 +NUM_RUNNING=$(pgrep -cf "/bin/bash /usr/sbin/so-sensor-clean") +[ "$NUM_RUNNING" -gt 1 ] && echo "$(date) - $NUM_RUNNING sensor clean script processes running...exiting." >>$LOG && exit 0 if [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; then while [ "$CUR_USAGE" -gt "$CRIT_DISK_USAGE" ]; do diff --git a/salt/common/tools/sbin/so-start b/salt/common/tools/sbin/so-start index 7859e8820..a592388d4 100755 --- a/salt/common/tools/sbin/so-start +++ b/salt/common/tools/sbin/so-start @@ -15,7 +15,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -# Usage: so-start all | filebeat | kibana | playbook | thehive +# Usage: so-start all | filebeat | kibana | playbook . /usr/sbin/so-common diff --git a/salt/common/tools/sbin/so-thehive-es-restart b/salt/common/tools/sbin/so-thehive-es-restart index 7c506246e..73745a1fc 100755 --- a/salt/common/tools/sbin/so-thehive-es-restart +++ b/salt/common/tools/sbin/so-thehive-es-restart @@ -17,5 +17,4 @@ . /usr/sbin/so-common -/usr/sbin/so-stop thehive-es $1 -/usr/sbin/so-start thehive $1 +echo "TheHive and its components are no longer part of Security Onion" diff --git a/salt/common/tools/sbin/so-thehive-es-start b/salt/common/tools/sbin/so-thehive-es-start index bd651b54a..97b575a40 100755 --- a/salt/common/tools/sbin/so-thehive-es-start +++ b/salt/common/tools/sbin/so-thehive-es-start @@ -17,4 +17,4 @@ . /usr/sbin/so-common -/usr/sbin/so-start thehive $1 +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-thehive-es-stop b/salt/common/tools/sbin/so-thehive-es-stop index fa60bd7a5..3ebf42430 100755 --- a/salt/common/tools/sbin/so-thehive-es-stop +++ b/salt/common/tools/sbin/so-thehive-es-stop @@ -17,4 +17,4 @@ . /usr/sbin/so-common -/usr/sbin/so-stop thehive-es $1 +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-thehive-restart b/salt/common/tools/sbin/so-thehive-restart index e123d3d9e..3ebf42430 100755 --- a/salt/common/tools/sbin/so-thehive-restart +++ b/salt/common/tools/sbin/so-thehive-restart @@ -17,4 +17,4 @@ . /usr/sbin/so-common -/usr/sbin/so-restart thehive $1 +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-thehive-start b/salt/common/tools/sbin/so-thehive-start index bd651b54a..97b575a40 100755 --- a/salt/common/tools/sbin/so-thehive-start +++ b/salt/common/tools/sbin/so-thehive-start @@ -17,4 +17,4 @@ . /usr/sbin/so-common -/usr/sbin/so-start thehive $1 +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-thehive-stop b/salt/common/tools/sbin/so-thehive-stop index fc482c5bd..3ebf42430 100755 --- a/salt/common/tools/sbin/so-thehive-stop +++ b/salt/common/tools/sbin/so-thehive-stop @@ -17,4 +17,4 @@ . /usr/sbin/so-common -/usr/sbin/so-stop thehive $1 +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-thehive-user-add b/salt/common/tools/sbin/so-thehive-user-add index 45851a89d..3ebf42430 100755 --- a/salt/common/tools/sbin/so-thehive-user-add +++ b/salt/common/tools/sbin/so-thehive-user-add @@ -17,38 +17,4 @@ . /usr/sbin/so-common -usage() { - echo "Usage: $0 " - echo "" - echo "Adds a new user to TheHive. The new password will be read from STDIN." - exit 1 -} - -if [ $# -ne 1 ]; then - usage -fi - -USER=$1 - -THEHIVE_KEY=$(lookup_pillar hivekey) -THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api" -THEHIVE_USER=$USER - -# Read password for new user from stdin -test -t 0 -if [[ $? == 0 ]]; then - echo "Enter new password:" -fi -read -rs THEHIVE_PASS - -check_password_and_exit "$THEHIVE_PASS" - -# Create new user in TheHive -resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASS\"}") -if [[ "$resp" =~ \"status\":\"Ok\" ]]; then - echo "Successfully added user to TheHive" -else - echo "Unable to add user to TheHive; user might already exist" - echo $resp - exit 2 -fi +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-thehive-user-enable b/salt/common/tools/sbin/so-thehive-user-enable index 2102f83ec..3ebf42430 100755 --- a/salt/common/tools/sbin/so-thehive-user-enable +++ b/salt/common/tools/sbin/so-thehive-user-enable @@ -17,41 +17,4 @@ . /usr/sbin/so-common -usage() { - echo "Usage: $0 " - echo "" - echo "Enables or disables a user in TheHive." - exit 1 -} - -if [ $# -ne 2 ]; then - usage -fi - -USER=$1 - -THEHIVE_KEY=$(lookup_pillar hivekey) -THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api" -THEHIVE_USER=$USER - -case "${2^^}" in - FALSE | NO | 0) - THEHIVE_STATUS=Locked - ;; - TRUE | YES | 1) - THEHIVE_STATUS=Ok - ;; - *) - usage - ;; -esac - -resp=$(curl -sk -XPATCH -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user/${THEHIVE_USER}" -d "{\"status\":\"${THEHIVE_STATUS}\" }") -if [[ "$resp" =~ \"status\":\"Locked\" || "$resp" =~ \"status\":\"Ok\" ]]; then - echo "Successfully updated user in TheHive" -else - echo "Failed to update user in TheHive" - echo "$resp" - exit 2 -fi - +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-thehive-user-update b/salt/common/tools/sbin/so-thehive-user-update index 62d861138..3ebf42430 100755 --- a/salt/common/tools/sbin/so-thehive-user-update +++ b/salt/common/tools/sbin/so-thehive-user-update @@ -17,41 +17,4 @@ . /usr/sbin/so-common -usage() { - echo "Usage: $0 " - echo "" - echo "Update password for an existing TheHive user. The new password will be read from STDIN." - exit 1 -} - -if [ $# -ne 1 ]; then - usage -fi - -USER=$1 - -THEHIVE_KEY=$(lookup_pillar hivekey) -THEHVIE_API_URL="$(lookup_pillar url_base)/thehive/api" -THEHIVE_USER=$USER - -# Read password for new user from stdin -test -t 0 -if [[ $? == 0 ]]; then - echo "Enter new password:" -fi -read -rs THEHIVE_PASS - -if ! check_password "$THEHIVE_PASS"; then - echo "Password is invalid. Please exclude single quotes, double quotes, dollar signs, and backslashes from the password." - exit 2 -fi - -# Change password for user in TheHive -resp=$(curl -sk -XPOST -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -L "https://$THEHVIE_API_URL/user/${THEHIVE_USER}/password/set" -d "{\"password\" : \"$THEHIVE_PASS\"}") -if [[ -z "$resp" ]]; then - echo "Successfully updated TheHive user password" -else - echo "Unable to update TheHive user password" - echo $resp - exit 2 -fi +echo "TheHive and its components are no longer part of Security Onion" \ No newline at end of file diff --git a/salt/common/tools/sbin/so-user b/salt/common/tools/sbin/so-user index 10eca3196..b1a717ce8 100755 --- a/salt/common/tools/sbin/so-user +++ b/salt/common/tools/sbin/so-user @@ -476,7 +476,6 @@ case "${operation}" in createUser "$email" "${role:-$DEFAULT_ROLE}" syncAll echo "Successfully added new user to SOC" - check_container thehive && echo "$password" | so-thehive-user-add "$email" check_container fleet && echo "$password" | so-fleet-user-add "$email" ;; @@ -528,7 +527,6 @@ case "${operation}" in updateStatus "$email" 'active' syncAll echo "Successfully enabled user" - check_container thehive && so-thehive-user-enable "$email" true echo "Fleet user will need to be recreated manually with so-fleet-user-add" ;; @@ -540,7 +538,6 @@ case "${operation}" in updateStatus "$email" 'locked' syncAll echo "Successfully disabled user" - check_container thehive && so-thehive-user-enable "$email" false check_container fleet && so-fleet-user-delete "$email" ;; @@ -552,7 +549,6 @@ case "${operation}" in deleteUser "$email" syncAll echo "Successfully deleted user" - check_container thehive && so-thehive-user-enable "$email" false check_container fleet && so-fleet-user-delete "$email" ;; diff --git a/salt/common/tools/sbin/soup b/salt/common/tools/sbin/soup index 7181b1b9e..fe84bf383 100755 --- a/salt/common/tools/sbin/soup +++ b/salt/common/tools/sbin/soup @@ -34,7 +34,15 @@ check_err() { local err_msg="Unhandled error occured, please check $SOUP_LOG for details." [[ $ERR_HANDLED == true ]] && exit $exit_code + if [[ $exit_code -ne 0 ]]; then + + set +e + systemctl_func "start" "$cron_service_name" + systemctl_func "start" "salt-master" + systemctl_func "start" "salt-minion" + enable_highstate + printf '%s' "Soup failed with error $exit_code: " case $exit_code in 2) @@ -91,9 +99,7 @@ check_err() { if [[ $exit_code -ge 64 && $exit_code -le 113 ]]; then echo "$err_msg" fi - set +e - systemctl_func "start" "$cron_service_name" - enable_highstate + exit $exit_code fi @@ -416,6 +422,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.3.80 ]] && up_to_2.3.90 [[ "$INSTALLEDVERSION" == 2.3.90 || "$INSTALLEDVERSION" == 2.3.91 ]] && up_to_2.3.100 [[ "$INSTALLEDVERSION" == 2.3.100 ]] && up_to_2.3.110 + [[ "$INSTALLEDVERISON" == 2.3.110 ]] && up_to_2.3.120 true } @@ -429,6 +436,8 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.3.60 || "$POSTVERSION" == 2.3.61 || "$POSTVERSION" == 2.3.70 || "$POSTVERSION" == 2.3.80 ]] && post_to_2.3.90 [[ "$POSTVERSION" == 2.3.90 || "$POSTVERSION" == 2.3.91 ]] && post_to_2.3.100 [[ "$POSTVERSION" == 2.3.100 ]] && post_to_2.3.110 + [[ "$POSTVERSION" == 2.3.110 ]] && post_to_2.3.120 + true } @@ -492,6 +501,14 @@ post_to_2.3.110() { POSTVERSION=2.3.110 } +post_to_2.3.120() { + echo "Post Processing for 2.3.120" + POSTVERSION=2.3.120 + sed -i '/so-thehive-es/d;/so-thehive/d;/so-cortex/d' /opt/so/conf/so-status/so-status.conf +} + + + stop_salt_master() { # kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts set +e @@ -728,9 +745,6 @@ up_to_2.3.90() { up_to_2.3.100() { fix_wazuh - echo "Removing /opt/so/state files for patched Salt InfluxDB module and state. This is due to Salt being upgraded and needing to patch the files again." - rm -vrf /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdb_retention_policy.py.patched /opt/so/state/influxdbmod.py.patched - echo "Adding receiver hostgroup with so-firewall" if so-firewall addhostgroup receiver 2>&1 | grep -q 'Already exists'; then echo 'receiver hostgroup already exists' @@ -743,11 +757,16 @@ up_to_2.3.100() { } up_to_2.3.110() { - echo "Updating to Security Onion 2.3.110" - echo "Updating shard settings for Elasticsearch index templates" sed -i 's|shards|index_template:\n template:\n settings:\n index:\n number_of_shards|g' /opt/so/saltstack/local/pillar/global.sls } +up_to_2.3.120() { + # Stop thehive services since these will be broken in .120 + so-thehive-stop + so-thehive-es-stop + so-cortex-stop + } + verify_upgradespace() { CURRENTSPACE=$(df -BG / | grep -v Avail | awk '{print $4}' | sed 's/.$//') if [ "$CURRENTSPACE" -lt "10" ]; then @@ -770,29 +789,6 @@ upgrade_space() { fi } -thehive_maint() { - echo -n "Waiting for TheHive..." - COUNT=0 - THEHIVE_CONNECTED="no" - while [[ "$COUNT" -le 240 ]]; do - curl --output /dev/null --silent --head --fail -k "https://localhost/thehive/api/alert" - if [ $? -eq 0 ]; then - THEHIVE_CONNECTED="yes" - echo "connected!" - break - else - ((COUNT+=1)) - sleep 1 - echo -n "." - fi - done - if [ "$THEHIVE_CONNECTED" == "yes" ]; then - echo "Migrating thehive databases if needed." - curl -v -k -XPOST -L "https://localhost/thehive/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1 - curl -v -k -XPOST -L "https://localhost/cortex/api/maintenance/migrate" >> "$SOUP_LOG" 2>&1 - fi -} - unmount_update() { cd /tmp umount /tmp/soagupdate @@ -908,6 +904,8 @@ upgrade_salt() { else echo "Salt upgrade success." echo "" + echo "Removing /opt/so/state files for patched Salt InfluxDB module and state. This is due to Salt being upgraded and needing to patch the files again." + rm -vrf /opt/so/state/influxdb_continuous_query.py.patched /opt/so/state/influxdb_retention_policy.py.patched /opt/so/state/influxdbmod.py.patched fi } @@ -1037,6 +1035,17 @@ main() { echo "### Preparing soup at $(date) ###" echo "" + set_os + set_cron_service_name + if ! check_salt_master_status; then + echo "Could not talk to salt master" + echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master." + echo "SOUP will now attempt to start the salt-master service and exit." + exit 1 + fi + + echo "This node can communicate with the salt-master." + echo "Checking to see if this is a manager." echo "" require_manager @@ -1072,8 +1081,6 @@ main() { echo "Verifying we have the latest soup script." verify_latest_update_script echo "" - set_os - set_cron_service_name set_palette check_elastic_license echo "" @@ -1222,7 +1229,6 @@ main() { salt-call state.highstate -l info queue=True postupgrade_changes [[ $is_airgap -eq 0 ]] && unmount_update - thehive_maint echo "" echo "Upgrade to $NEWVERSION complete." diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index f2bb90792..03fc3facf 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -4067,7 +4067,7 @@ elasticsearch: field: "@timestamp" order: desc refresh_interval: 30s - number_of_shards: 1 + number_of_shards: 2 number_of_replicas: 0 composed_of: - agent-mappings diff --git a/salt/elasticsearch/files/ingest/rita.beacon b/salt/elasticsearch/files/ingest/rita.beacon new file mode 100644 index 000000000..ab53be763 --- /dev/null +++ b/salt/elasticsearch/files/ingest/rita.beacon @@ -0,0 +1,127 @@ +{ + "description": "RITA Beacons", + "processors": [ + { + "set": { + "field": "_index", + "value": "so-rita", + "override": true + } + }, + { + "csv": { + "field": "message", + "target_fields": [ + "beacon.score", + "source.ip", + "destination.ip", + "network.connections", + "network.average_bytes", + "beacon.interval.range", + "beacon.size.range", + "beacon.interval.top", + "beacon.size.top", + "beacon.interval.top_count", + "beacon.size.top_count", + "beacon.interval.skew", + "beacon.size.skew", + "beacon.interval.dispersion", + "beacon.size.dispersion", + "network.bytes" + ] + } + }, + { + "convert": { + "field": "beacon.score", + "type": "float" + } + }, + { + "convert": { + "field": "network.connections", + "type": "integer" + } + }, + { + "convert": { + "field": "network.average_bytes", + "type": "integer" + } + }, + { + "convert": { + "field": "beacon.interval.range", + "type": "integer" + } + }, + { + "convert": { + "field": "beacon.size.range", + "type": "integer" + } + }, + { + "convert": { + "field": "beacon.interval.top", + "type": "integer" + } + }, + { + "convert": { + "field": "beacon.size.top", + "type": "integer" + } + }, + { + "convert": { + "field": "beacon.interval.top_count", + "type": "integer" + } + }, + { + "convert": { + "field": "beacon.size.top_count", + "type": "integer" + } + }, + { + "convert": { + "field": "beacon.interval.skew", + "type": "float" + } + }, + { + "convert": { + "field": "beacon.size.skew", + "type": "float" + } + }, + { + "convert": { + "field": "beacon.interval.dispersion", + "type": "integer" + } + }, + { + "convert": { + "field": "beacon.size.dispersion", + "type": "integer" + } + }, + { + "convert": { + "field": "network.bytes", + "type": "integer" + } + }, + { "set": { "if": "ctx.beacon?.score == 1", "field": "dataset", "value": "alert", "override": true }}, + { "set": { "if": "ctx.beacon?.score == 1", "field": "rule.name", "value": "Potential C2 Beacon Activity", "override": true }}, + { "set": { "if": "ctx.beacon?.score == 1", "field": "event.severity", "value": 3, "override": true }}, + { + "pipeline": { + "name": "common" + } + } + ] +} diff --git a/salt/elasticsearch/files/ingest/rita.connection b/salt/elasticsearch/files/ingest/rita.connection new file mode 100644 index 000000000..58cc921f4 --- /dev/null +++ b/salt/elasticsearch/files/ingest/rita.connection @@ -0,0 +1,36 @@ +{ + "description": "RITA Connections", + "processors": [ + { + "set": { + "field": "_index", + "value": "so-rita", + "override": true + } + }, + { + "dissect": { + "field": "message", + "pattern": "%{source.ip},%{destination.ip},%{network.port}:%{network.protocol}:%{network.service},%{connection.duration},%{connection.state}" + } + }, + { + "convert": { + "field": "connection.duration", + "type": "float" + } + }, + { + "set": { + "field": "event.duration", + "value": "{{ connection.duration }}", + "override": true + } + }, + { + "pipeline": { + "name": "common" + } + } + ] +} diff --git a/salt/elasticsearch/files/ingest/rita.dns b/salt/elasticsearch/files/ingest/rita.dns new file mode 100644 index 000000000..7583bc320 --- /dev/null +++ b/salt/elasticsearch/files/ingest/rita.dns @@ -0,0 +1,39 @@ +{ + "description": "RITA DNS", + "processors": [ + { + "set": { + "field": "_index", + "value": "so-rita", + "override": true + } + }, + { + "csv": { + "field": "message", + "target_fields": [ + "dns.question.name", + "dns.question.subdomain_count", + "dns.question.count" + ] + } + }, + { + "convert": { + "field": "dns.question.subdomain_count", + "type": "integer" + } + }, + { + "convert": { + "field": "dns.question.count", + "type": "integer" + } + }, + { + "pipeline": { + "name": "common" + } + } + ] +} diff --git a/salt/elasticsearch/files/ingest/syslog b/salt/elasticsearch/files/ingest/syslog index bf40efec5..8919c3e1f 100644 --- a/salt/elasticsearch/files/ingest/syslog +++ b/salt/elasticsearch/files/ingest/syslog @@ -1,36 +1,157 @@ { - "description" : "syslog", + "description" : "syslog pipeline", "processors" : [ { - "dissect": { - "field": "message", - "pattern" : "%{message}", - "on_failure": [ { "drop" : { } } ] - }, - "remove": { - "field": [ "type", "agent" ], - "ignore_failure": true - } + "dissect": { + "field": "message", + "pattern" : "%{message}", + "on_failure": [ { "drop" : { } } ] + }, + "remove": { + "field": [ "type", "agent" ], + "ignore_failure": true + } + }, { + "grok": { + "field": "message", + "patterns": [ + "^<%{INT:syslog.priority:int}>%{TIMESTAMP_ISO8601:syslog.timestamp} +%{IPORHOST:syslog.host} +%{PROG:syslog.program}(?:\\[%{POSINT:syslog.pid:int}\\])?: %{GREEDYDATA:real_message}$", + + "^<%{INT:syslog.priority}>%{DATA:syslog.timestamp} %{WORD:source.application}(\\[%{DATA:pid}\\])?: %{GREEDYDATA:real_message}$", + + "^%{SYSLOGTIMESTAMP:syslog.timestamp} %{SYSLOGHOST:syslog.host} %{SYSLOGPROG:syslog.program}: CEF:0\\|%{DATA:vendor}\\|%{DATA:product}\\|%{GREEDYDATA:message2}$" + ], + "ignore_failure": true + } }, { - "grok": - { - "field": "message", - "patterns": [ - "^<%{INT:syslog.priority}>%{DATA:syslog.timestamp} %{WORD:source.application}(\\[%{DATA:pid}\\])?: %{GREEDYDATA:real_message}$", - "^%{SYSLOGTIMESTAMP:syslog.timestamp} %{SYSLOGHOST:syslog.host} %{SYSLOGPROG:syslog.program}: CEF:0\\|%{DATA:vendor}\\|%{DATA:product}\\|%{GREEDYDATA:message2}$" - ], - "ignore_failure": true - } + "convert" : { + "if": "ctx?.syslog?.priority != null", + "field" : "syslog.priority", + "type": "integer" + } }, - { "set": { "if": "ctx.source?.application == 'filterlog'", "field": "dataset", "value": "firewall", "ignore_failure": true } }, - { "set": { "if": "ctx.vendor != null", "field": "module", "value": "{{ vendor }}", "ignore_failure": true } }, - { "set": { "if": "ctx.product != null", "field": "dataset", "value": "{{ product }}", "ignore_failure": true } }, - { "set": { "field": "event.ingested", "value": "{{ @timestamp }}" } }, - { "date": { "if": "ctx.syslog?.timestamp != null", "field": "syslog.timestamp", "target_field": "@timestamp", "formats": ["MMM d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601", "UNIX"], "ignore_failure": true } }, - { "remove": { "field": ["pid", "program"], "ignore_missing": true, "ignore_failure": true } }, - { "pipeline": { "if": "ctx.vendor != null && ctx.product != null", "name": "{{ vendor }}.{{ product }}", "ignore_failure": true } }, - { "pipeline": { "if": "ctx.dataset == 'firewall'", "name": "filterlog", "ignore_failure": true } }, - { "pipeline": { "name": "common" } } + { + "script": { + "description": "Map syslog priority into facility and level", + "lang": "painless", + "params" : { + "level": [ + "emerg", + "alert", + "crit", + "err", + "warn", + "notice", + "info", + "debug" + ], + "facility" : [ + "kern", + "user", + "mail", + "daemon", + "auth", + "syslog", + "lpr", + "news", + "uucp", + "cron", + "authpriv", + "ftp", + "ntp", + "security", + "console", + "solaris-cron", + "local0", + "local1", + "local2", + "local3", + "local4", + "local5", + "local6", + "local7" + ] + }, + "source": "if (ctx['syslog'] != null && ctx['syslog']['priority'] != null) { int p = ctx['syslog']['priority']; int f = p / 8; int l = p - (f * 8); ctx['syslog']['facility_label'] = [ : ]; ctx['syslog']['severity_label'] = [ : ]; ctx['syslog'].put('severity', l); ctx['syslog'].put('severity_label', params.level[l].toUpperCase()); ctx['syslog'].put('facility', f); ctx['syslog'].put('facility_label', params.facility[f].toUpperCase()); }" + + } + }, + { + "set": { + "if": "ctx.syslog?.host != null", + "field": "host.name", + "value": "{{ syslog.host }}", + "ignore_failure": true + } + }, { + "set": { + "if": "ctx.syslog?.program != null", + "field": "process.name", + "value": "{{ syslog.program }}", + "ignore_failure": true + } + }, { + "set": { + "if": "ctx.syslog?.pid != null", + "field": "process.id", + "value": "{{ syslog.pid }}", + "ignore_failure": true + } + }, { + "set": { + "if": "ctx.source?.application == 'filterlog'", + "field": "dataset", + "value": "firewall", + "ignore_failure": true + } + }, { + "set": { + "if": "ctx.vendor != null", + "field": "module", + "value": "{{ vendor }}", + "ignore_failure": true + } + }, { + "set": { + "if": "ctx.product != null", + "field": "dataset", + "value": "{{ product }}", + "ignore_failure": true + } + }, { + "set": { + "field": "ingest.timestamp", + "value": "{{ @timestamp }}" + } + }, { + "date": { + "if": "ctx.syslog?.timestamp != null", + "field": "syslog.timestamp", + "target_field": "@timestamp", + "formats": ["MMM d HH:mm:ss", "MMM dd HH:mm:ss", "ISO8601", "UNIX"], + "ignore_failure": true + } + }, { + "remove": { + "field": ["pid", "program"], + "ignore_missing": true, + "ignore_failure": true + } + }, { + "pipeline": { + "if": "ctx.vendor != null && ctx.product != null", + "name": "{{ vendor }}.{{ product }}", + "ignore_failure": true + } + }, { + "pipeline": { + "if": "ctx.dataset == 'firewall'", + "name": "filterlog", + "ignore_failure": true + } + }, { + "pipeline": { "name": "common" } + } ] } diff --git a/salt/elasticsearch/files/log4j2.properties b/salt/elasticsearch/files/log4j2.properties index 85cf5d8fb..014fa61a1 100644 --- a/salt/elasticsearch/files/log4j2.properties +++ b/salt/elasticsearch/files/log4j2.properties @@ -11,10 +11,17 @@ appender.rolling.name = rolling appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log appender.rolling.layout.type = PatternLayout appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n -appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log +appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log.gz appender.rolling.policies.type = Policies appender.rolling.policies.time.type = TimeBasedTriggeringPolicy appender.rolling.policies.time.interval = 1 appender.rolling.policies.time.modulate = true +appender.rolling.strategy.type = DefaultRolloverStrategy +appender.rolling.strategy.action.type = Delete +appender.rolling.strategy.action.basepath = /var/log/elasticsearch +appender.rolling.strategy.action.condition.type = IfFileName +appender.rolling.strategy.action.condition.glob = *.gz +appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified +appender.rolling.strategy.action.condition.nested_condition.age = 7D rootLogger.level = info rootLogger.appenderRef.rolling.ref = rolling diff --git a/salt/elasticsearch/init.sls b/salt/elasticsearch/init.sls index 19f1aeacb..1d1518b3c 100644 --- a/salt/elasticsearch/init.sls +++ b/salt/elasticsearch/init.sls @@ -207,6 +207,7 @@ escomponenttemplates: # Auto-generate templates from defaults file {% for index, settings in ES_INDEX_SETTINGS.items() %} + {% if settings.index_template is defined %} es_index_template_{{index}}: file.managed: - name: /opt/so/conf/elasticsearch/templates/index/{{ index }}-template.json @@ -216,6 +217,7 @@ es_index_template_{{index}}: - template: jinja - onchanges_in: - cmd: so-elasticsearch-templates + {% endif %} {% endfor %} {% if TEMPLATES %} diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index 8168fec50..9c90cc28f 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -1,7 +1,9 @@ {% import_yaml 'elasticsearch/defaults.yaml' as ESCONFIG with context %} {%- set ES_INDEX_SETTINGS = salt['pillar.get']('elasticsearch:index_settings', default=ESCONFIG.elasticsearch.index_settings, merge=True) %} {% for index, settings in ES_INDEX_SETTINGS.items() %} - {% if settings.index_sorting, False %} - {% do settings.index_template.template.settings.index.pop('sort') %} + {% if settings.index_template is defined %} + {% if not settings.get('index_sorting', False) | to_bool and settings.index_template.template.settings.index.sort is defined %} + {% do settings.index_template.template.settings.index.pop('sort') %} + {% endif %} {% endif %} {% endfor %} diff --git a/salt/elasticsearch/templates/component/so/dtc-process-mappings.json b/salt/elasticsearch/templates/component/so/dtc-process-mappings.json index 2b8d8abfb..d3d22139a 100644 --- a/salt/elasticsearch/templates/component/so/dtc-process-mappings.json +++ b/salt/elasticsearch/templates/component/so/dtc-process-mappings.json @@ -60,6 +60,32 @@ }, "type": "wildcard" }, + "entity_id": { + "ignore_above": 1024, + "type": "keyword", + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + } + }, + "executable": { + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + }, + "ignore_above": 1024, + "type": "keyword" + }, "name": { "fields": { "keyword": { @@ -73,6 +99,133 @@ "ignore_above": 1024, "type": "keyword" }, + "parent": { + "properties": { + "command_line": { + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "text": { + "type": "match_only_text" + }, + "keyword": { + "type": "keyword" + } + }, + "type": "wildcard" + }, + "entity_id": { + "ignore_above": 1024, + "type": "keyword", + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + } + }, + "executable": { + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + }, + "ignore_above": 1024, + "type": "keyword" + } + } + }, + "pe": { + "properties": { + "architecture": { + "ignore_above": 1024, + "type": "keyword", + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + } + }, + "company": { + "ignore_above": 1024, + "type": "keyword", + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + } + }, + "description": { + "ignore_above": 1024, + "type": "keyword", + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + } + }, + "file_version": { + "ignore_above": 1024, + "type": "keyword", + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + } + }, + "original_file_name": { + "ignore_above": 1024, + "type": "keyword", + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + } + }, + "product": { + "ignore_above": 1024, + "type": "keyword", + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + } + } + } + }, "pid": { "type": "long", "fields": { @@ -88,6 +241,19 @@ "type": "keyword" } } + }, + "working_directory": { + "fields": { + "security": { + "type": "text", + "analyzer": "es_security_analyzer" + }, + "keyword": { + "type": "keyword" + } + }, + "ignore_above": 1024, + "type": "keyword" } } } diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-roles-load b/salt/elasticsearch/tools/sbin/so-elasticsearch-roles-load index 2da8f85e4..7ce907f87 100755 --- a/salt/elasticsearch/tools/sbin/so-elasticsearch-roles-load +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-roles-load @@ -33,6 +33,8 @@ while [[ "$COUNT" -le 240 ]]; do if [ $? -eq 0 ]; then ELASTICSEARCH_CONNECTED="yes" echo "connected!" + # Check cluster health once connected + so-elasticsearch-query _cluster/health?wait_for_status=yellow > /dev/null 2>&1 break else ((COUNT+=1)) @@ -48,7 +50,7 @@ fi cd ${ELASTICSEARCH_ROLES} -echo "Loading templates..." +echo "Loading roles..." for role in *; do name=$(echo "$role" | cut -d. -f1) so-elasticsearch-query _security/role/$name -XPUT -d @"$role" diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin/so-elasticsearch-templates-load index f3bcaa308..93c1c6298 100755 --- a/salt/elasticsearch/tools/sbin/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-templates-load @@ -30,7 +30,7 @@ echo -n "Waiting for ElasticSearch..." COUNT=0 ELASTICSEARCH_CONNECTED="no" while [[ "$COUNT" -le 240 ]]; do - {{ ELASTICCURL }} -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" + so-elasticsearch-query -k --output /dev/null --silent --head --fail if [ $? -eq 0 ]; then ELASTICSEARCH_CONNECTED="yes" echo "connected!" @@ -50,21 +50,20 @@ fi cd ${ELASTICSEARCH_TEMPLATES}/component/ecs echo "Loading ECS component templates..." -for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE-mappings"; {{ ELASTICCURL }} -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_component_template/$TEMPLATE-mappings -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done -echo +for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE-mappings"; so-elasticsearch-query _component_template/$TEMPLATE-mappings -d@$i -XPUT 2>/dev/null; echo; done # Load SO-specific component templates cd ${ELASTICSEARCH_TEMPLATES}/component/so echo "Loading Security Onion component templates..." -for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE"; {{ ELASTICCURL }} -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_component_template/$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done +for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done echo # Load SO index templates cd ${ELASTICSEARCH_TEMPLATES}/index echo "Loading Security Onion index templates..." -for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; {{ ELASTICCURL }} -k ${ELASTICSEARCH_AUTH} -s -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_index_template/so-$TEMPLATE -H 'Content-Type: application/json' -d@$i 2>/dev/null; echo; done +for i in *; do TEMPLATE=$(echo $i | cut -d '-' -f2); echo "so-$TEMPLATE"; so-elasticsearch-query _index_template/so-$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done echo cd - >/dev/null diff --git a/salt/filebeat/etc/filebeat.yml b/salt/filebeat/etc/filebeat.yml index e29b1a583..62a45e9c4 100644 --- a/salt/filebeat/etc/filebeat.yml +++ b/salt/filebeat/etc/filebeat.yml @@ -10,6 +10,7 @@ {%- set ZEEKVER = salt['pillar.get']('global:mdengine', 'COMMUNITY') %} {%- set WAZUHENABLED = salt['pillar.get']('global:wazuh', '0') %} {%- set STRELKAENABLED = salt['pillar.get']('strelka:enabled', '0') %} +{%- set RITAENABLED = salt['pillar.get']('rita:enabled', False) -%} {%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) -%} {%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) -%} {%- set FBMEMEVENTS = salt['pillar.get']('filebeat:mem_events', 2048) -%} @@ -264,6 +265,54 @@ filebeat.inputs: {%- endif %} +{%- if RITAENABLED %} +- type: filestream + paths: + - /nsm/rita/beacons.csv + exclude_lines: ['^Score', '^Source', '^Domain', '^No results'] + fields: + module: rita + dataset: beacon + category: network + processors: + - drop_fields: + fields: ["source", "prospector", "input", "offset", "beat"] + fields_under_root: true + pipeline: "rita.beacon" + index: "so-rita" + +- type: filestream + paths: + - /nsm/rita/long-connections.csv + - /nsm/rita/open-connections.csv + exclude_lines: ['^Source', '^No results'] + fields: + module: rita + dataset: connection + category: network + processors: + - drop_fields: + fields: ["source", "prospector", "input", "offset", "beat"] + fields_under_root: true + pipeline: "rita.connection" + index: "so-rita" + +- type: filestream + paths: + - /nsm/rita/exploded-dns.csv + exclude_lines: ['^Domain', '^No results'] + fields: + module: rita + dataset: dns + category: network + processors: + - drop_fields: + fields: ["source", "prospector", "input", "offset", "beat"] + fields_under_root: true + pipeline: "rita.dns" + index: "so-rita" +{%- endif %} + {%- if grains['role'] in ['so-eval', 'so-standalone', 'so-manager', 'so-managersearch', 'so-import'] %} - type: log paths: diff --git a/salt/firewall/assigned_hostgroups.map.yaml b/salt/firewall/assigned_hostgroups.map.yaml index 9e105e567..257c45808 100644 --- a/salt/firewall/assigned_hostgroups.map.yaml +++ b/salt/firewall/assigned_hostgroups.map.yaml @@ -349,6 +349,9 @@ role: osquery_endpoint: portgroups: - {{ portgroups.fleet_api }} + strelka_frontend: + portgroups: + - {{ portgroups.strelka_frontend }} syslog: portgroups: - {{ portgroups.syslog }} @@ -482,6 +485,9 @@ role: self: portgroups: - {{ portgroups.syslog}} + strelka_frontend: + portgroups: + - {{ portgroups.strelka_frontend }} INPUT: hostgroups: anywhere: @@ -511,6 +517,9 @@ role: self: portgroups: - {{ portgroups.syslog}} + strelka_frontend: + portgroups: + - {{ portgroups.strelka_frontend }} INPUT: hostgroups: anywhere: diff --git a/salt/idh/init.sls b/salt/idh/init.sls index 089ecc4df..70a5d370d 100644 --- a/salt/idh/init.sls +++ b/salt/idh/init.sls @@ -19,11 +19,37 @@ {% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} {% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} {% set MANAGER = salt['grains.get']('master') %} +{% set MAININT = salt['pillar.get']('host:mainint') %} +{% set MAINIP = salt['grains.get']('ip_interfaces').get(MAININT)[0] %} +{% set RESTRICTIDHSERVICES = salt['pillar.get']('idh:restrict_management_ip', False) %} include: - idh.openssh.config + - firewall -# IDH State + +# If True, block IDH Services from accepting connections on Managment IP +{% if RESTRICTIDHSERVICES %} + {% from 'idh/opencanary_config.map.jinja' import OPENCANARYCONFIG %} + {% set idh_services = salt['pillar.get']('idh:services', []) %} + + {% for service in idh_services %} + {% if service in ["smnp","ntp", "tftp"] %} + {% set proto = 'udp' %} + {% else %} + {% set proto = 'tcp' %} + {% endif %} +block_mgt_ip_idh_services_{{ proto }}_{{ OPENCANARYCONFIG[service~'.port'] }} : + iptables.insert: + - table: filter + - chain: INPUT + - jump: DROP + - position: 1 + - proto: {{ proto }} + - dport: {{ OPENCANARYCONFIG[service~'.port'] }} + - destination: {{ MAINIP }} + {% endfor %} +{% endif %} # Create a config directory temp: diff --git a/salt/influxdb/defaults.yaml b/salt/influxdb/defaults.yaml index 205c2ba67..c89257c53 100644 --- a/salt/influxdb/defaults.yaml +++ b/salt/influxdb/defaults.yaml @@ -1,4 +1,4 @@ -{% set measurements = salt['cmd.shell']('docker exec -t so-influxdb influx -format json -ssl -unsafeSsl -database telegraf -execute "show measurements" 2> /root/measurement_query.log | jq -r .results[0].series[0].values[]?[0] 2>> /root/measurement_query.log') %} +{% set measurements = salt['cmd.shell']('docker exec -t so-influxdb influx -format json -ssl -unsafeSsl -database telegraf -execute "show measurements" 2> /root/measurement_query.log | jq -r .results[0].series[0].values[]?[0] 2>> /root/measurement_query.log', shell='/bin/bash') %} influxdb: retention_policies: diff --git a/salt/kibana/bin/so-kibana-config-load b/salt/kibana/bin/so-kibana-config-load index 4752925b4..b21858369 100644 --- a/salt/kibana/bin/so-kibana-config-load +++ b/salt/kibana/bin/so-kibana-config-load @@ -59,7 +59,7 @@ update() { IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))' for i in "${LINES[@]}"; do - RESPONSE=$({{ ELASTICCURL }} -X PUT "localhost:5601/api/saved_objects/config/7.17.1" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ") + RESPONSE=$({{ ELASTICCURL }} -X PUT "localhost:5601/api/saved_objects/config/7.17.3" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ") echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi done diff --git a/salt/kibana/files/config_saved_objects.ndjson b/salt/kibana/files/config_saved_objects.ndjson index e2bd5fe2f..0267ec4fb 100644 --- a/salt/kibana/files/config_saved_objects.ndjson +++ b/salt/kibana/files/config_saved_objects.ndjson @@ -1 +1 @@ -{"attributes": {"buildNum": 39457,"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "7.17.1","id": "7.17.1","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="} +{"attributes": {"buildNum": 39457,"defaultIndex": "2289a0c0-6970-11ea-a0cd-ffa0f6a1bc29","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "7.17.3","id": "7.17.3","migrationVersion": {"config": "7.13.0"},"references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="} diff --git a/salt/logstash/etc/log4j2.properties b/salt/logstash/etc/log4j2.properties index 73a646b8d..739756061 100644 --- a/salt/logstash/etc/log4j2.properties +++ b/salt/logstash/etc/log4j2.properties @@ -18,7 +18,7 @@ appender.rolling.name = rolling appender.rolling.fileName = /var/log/logstash/logstash.log appender.rolling.layout.type = PatternLayout appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n -appender.rolling.filePattern = /var/log/logstash/logstash-%d{yyyy-MM-dd}.log +appender.rolling.filePattern = /var/log/logstash/logstash-%d{yyyy-MM-dd}.log.gz appender.rolling.policies.type = Policies appender.rolling.policies.time.type = TimeBasedTriggeringPolicy appender.rolling.policies.time.interval = 1 @@ -27,7 +27,7 @@ appender.rolling.strategy.type = DefaultRolloverStrategy appender.rolling.strategy.action.type = Delete appender.rolling.strategy.action.basepath = /var/log/logstash appender.rolling.strategy.action.condition.type = IfFileName -appender.rolling.strategy.action.condition.glob = logstash-*.log +appender.rolling.strategy.action.condition.glob = *.gz appender.rolling.strategy.action.condition.nested_condition.type = IfLastModified appender.rolling.strategy.action.condition.nested_condition.age = 7D rootLogger.level = info diff --git a/salt/logstash/pipelines/config/so/9801_output_rita.conf.jinja b/salt/logstash/pipelines/config/so/9801_output_rita.conf.jinja new file mode 100644 index 000000000..40c6ad33c --- /dev/null +++ b/salt/logstash/pipelines/config/so/9801_output_rita.conf.jinja @@ -0,0 +1,22 @@ +{%- if grains['role'] == 'so-eval' -%} +{%- set ES = salt['pillar.get']('manager:mainip', '') -%} +{%- else %} +{%- set ES = salt['pillar.get']('elasticsearch:mainip', '') -%} +{%- endif %} +{%- set ES_USER = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:user', '') %} +{%- set ES_PASS = salt['pillar.get']('elasticsearch:auth:users:so_elastic_user:pass', '') %} +output { + if [module] =~ "rita" and "import" not in [tags] { + elasticsearch { + pipeline => "%{module}.%{dataset}" + hosts => "{{ ES }}" +{% if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %} + user => "{{ ES_USER }}" + password => "{{ ES_PASS }}" +{% endif %} + index => "so-rita" + ssl => true + ssl_certificate_verification => false + } + } +} diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index 7f3731c75..2a5fe95f7 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -334,30 +334,6 @@ http { } {%- endif %} - - location /thehive/ { - proxy_pass http://{{ manager_ip }}:9000/thehive/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_http_version 1.1; # this is essential for chunked responses to work - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } - - location /cortex/ { - proxy_pass http://{{ manager_ip }}:9001/cortex/; - proxy_read_timeout 90; - proxy_connect_timeout 90; - proxy_http_version 1.1; # this is essential for chunked responses to work - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - } location /soctopus/ { auth_request /auth/sessions/whoami; diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index eb9f5ae89..389a95607 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -11,6 +11,7 @@ {% set PYTHON3INFLUX= 'influxdb == ' ~ PYTHONINFLUXVERSION %} {% set PYTHON3INFLUXDEPS= ['certifi', 'chardet', 'python-dateutil', 'pytz', 'requests'] %} {% set PYTHONINSTALLER = 'pip' %} + {% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %} {% else %} {% set SPLITCHAR = '-' %} {% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %} @@ -21,6 +22,7 @@ {% set PYTHON3INFLUX= 'securityonion-python3-influxdb' %} {% set PYTHON3INFLUXDEPS= ['python36-certifi', 'python36-chardet', 'python36-dateutil', 'python36-pytz', 'python36-requests'] %} {% set PYTHONINSTALLER = 'pkg' %} + {% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %} {% endif %} {% set INSTALLEDSALTVERSION = salt['pkg.version']('salt-minion').split(SPLITCHAR)[0] %} diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index a35746db7..15e203d82 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -3,6 +3,7 @@ {% from 'salt/map.jinja' import INSTALLEDSALTVERSION %} {% from 'salt/map.jinja' import SALTNOTHELD %} {% from 'salt/map.jinja' import SALTPACKAGES %} +{% from 'salt/map.jinja' import SYSTEMD_UNIT_FILE %} {% import_yaml 'salt/minion.defaults.yaml' as SALTMINION %} {% set service_start_delay = SALTMINION.salt.minion.service_start_delay %} @@ -82,7 +83,7 @@ set_log_levels: salt_minion_service_unit_file: file.managed: - - name: /etc/systemd/system/multi-user.target.wants/salt-minion.service + - name: {{ SYSTEMD_UNIT_FILE }} - source: salt://salt/service/salt-minion.service.jinja - template: jinja - defaults: @@ -110,6 +111,7 @@ salt_minion_service: - file: set_log_levels - file: salt_minion_service_unit_file {% endif %} + - order: last patch_pkg: diff --git a/salt/soc/files/soc/cases.eventfields.json b/salt/soc/files/soc/cases.eventfields.json index d719fb45a..f04c50b94 100644 --- a/salt/soc/files/soc/cases.eventfields.json +++ b/salt/soc/files/soc/cases.eventfields.json @@ -1,3 +1,3 @@ { - "default": ["soc_timestamp", "so_case.title", "so_case.status", "so_case.severity", "so_case.createTime"] + "default": ["soc_timestamp", "so_case.title", "so_case.status", "so_case.severity", "so_case.assigneeId", "so_case.createTime"] } \ No newline at end of file diff --git a/salt/soc/files/soc/hunt.eventfields.json b/salt/soc/files/soc/hunt.eventfields.json index 7964a360b..418cd4d87 100644 --- a/salt/soc/files/soc/hunt.eventfields.json +++ b/salt/soc/files/soc/hunt.eventfields.json @@ -1,5 +1,6 @@ { "default": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "log.id.uid", "network.community_id", "event.dataset" ], + ":kratos:audit": ["soc_timestamp", "http_request.headers.x-real-ip", "identity_id", "http_request.headers.user-agent" ], "::conn": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "network.transport", "network.protocol", "log.id.uid", "network.community_id" ], "::dce_rpc": ["soc_timestamp", "source.ip", "source.port", "destination.ip", "destination.port", "dce_rpc.endpoint", "dce_rpc.named_pipe", "dce_rpc.operation", "log.id.uid" ], "::dhcp": ["soc_timestamp", "client.address", "server.address", "host.domain", "host.hostname", "dhcp.message_types", "log.id.uid" ], diff --git a/salt/soc/files/soc/hunt.queries.json b/salt/soc/files/soc/hunt.queries.json index 3125b2f74..5a76e0fa1 100644 --- a/salt/soc/files/soc/hunt.queries.json +++ b/salt/soc/files/soc/hunt.queries.json @@ -1,6 +1,7 @@ [ { "name": "Default Query", "description": "Show all events grouped by the origin host", "query": "* | groupby observer.name"}, { "name": "Log Type", "description": "Show all events grouped by module and dataset", "query": "* | groupby event.module event.dataset"}, + { "name": "SOC Auth", "description": "Users authenticated to SOC grouped by IP address and identity", "query": "event.module:kratos AND event.dataset:audit AND msg:authenticated | groupby http_request.headers.x-real-ip identity_id"}, { "name": "Elastalerts", "description": "", "query": "_type:elastalert | groupby rule.name"}, { "name": "Alerts", "description": "Show all alerts grouped by alert source", "query": "event.dataset: alert | groupby event.module"}, { "name": "NIDS Alerts", "description": "Show all NIDS alerts grouped by alert", "query": "event.category: network AND event.dataset: alert | groupby rule.category rule.gid rule.uuid rule.name"}, diff --git a/salt/soc/files/soc/soc.json b/salt/soc/files/soc/soc.json index 4160a5a01..9b034ad57 100644 --- a/salt/soc/files/soc/soc.json +++ b/salt/soc/files/soc/soc.json @@ -1,8 +1,8 @@ {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} {%- set SENSORONIKEY = salt['pillar.get']('global:sensoronikey', '') %} {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %} +{%- set THEHIVEURL = salt['pillar.get']('global:hiveurl', '') %} {%- set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %} -{%- set THEHIVE = salt['pillar.get']('manager:thehive', '0') %} {%- set FLEETMANAGER = salt['pillar.get']('global:fleet_manager', False) %} {%- set FLEETNODE = salt['pillar.get']('global:fleet_node', False) %} {%- set GRAFANA = salt['pillar.get']('manager:grafana', '0') %} @@ -91,7 +91,7 @@ }, {%- if CASE_MODULE == 'thehive' and THEHIVEKEY != '' %} "thehive": { - "hostUrl": "http://{{ MANAGERIP }}:9000/thehive", + "hostUrl": "http://{{ HIVEURL }}:9000/thehive", "key": "{{ THEHIVEKEY }}", "verifyCert": false }, @@ -140,9 +140,6 @@ {%- if PLAYBOOK == 0 %} "toolPlaybook", {%- endif %} - {%- if THEHIVE == 0 %} - "toolTheHive", - {%- endif %} {%- if not FLEETMANAGER and not FLEETNODE %} "toolFleet", {%- endif %} diff --git a/salt/soc/files/soc/tools.json b/salt/soc/files/soc/tools.json index 96d9a3f50..b53f112e5 100644 --- a/salt/soc/files/soc/tools.json +++ b/salt/soc/files/soc/tools.json @@ -4,6 +4,5 @@ { "name": "toolCyberchef", "description": "toolCyberchefHelp", "icon": "fa-external-link-alt", "target": "so-cyberchef", "link": "/cyberchef/" }, { "name": "toolPlaybook", "description": "toolPlaybookHelp", "icon": "fa-external-link-alt", "target": "so-playbook", "link": "/playbook/projects/detection-playbooks/issues/" }, { "name": "toolFleet", "description": "toolFleetHelp", "icon": "fa-external-link-alt", "target": "so-fleet", "link": "/fleet/" }, - { "name": "toolTheHive", "description": "toolTheHiveHelp", "icon": "fa-external-link-alt", "target": "so-thehive", "link": "/thehive/" }, { "name": "toolNavigator", "description": "toolNavigatorHelp", "icon": "fa-external-link-alt", "target": "so-navigator", "link": "/navigator/" } ] \ No newline at end of file diff --git a/salt/soctopus/files/SOCtopus.conf b/salt/soctopus/files/SOCtopus.conf index f45d2f5e3..b6b6825eb 100644 --- a/salt/soctopus/files/SOCtopus.conf +++ b/salt/soctopus/files/SOCtopus.conf @@ -1,6 +1,7 @@ {%- set MANAGER = salt['pillar.get']('manager:mainip', '') %} {%- set URLBASE = salt['pillar.get']('global:url_base', '') %} {%- set HIVEKEY = salt['pillar.get']('global:hivekey', '') %} +{%- set THEHIVEURL = salt['pillar.get']('global:hiveurl', '') %} {%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} {%- set PLAYBOOK_KEY = salt['pillar.get']('playbook:api_key', '') %} {%- if salt['pillar.get']('elasticsearch:auth:enabled') is sameas true %} @@ -21,7 +22,7 @@ es_verifycert = no [cortex] auto_analyze_alerts = no -cortex_url = https://{{URLBASE}}/cortex/ +cortex_url = https://{{THEHIVEURL}}/cortex/ cortex_key = {{ CORTEXKEY }} supported_analyzers = Urlscan_io_Search,CERTatPassiveDNS @@ -42,7 +43,7 @@ grr_user = YOURGRRUSER grr_pass = YOURGRRPASS [hive] -hive_url = https://{{URLBASE}}/thehive/ +hive_url = https://{{THEHIVEURL}}/thehive/ hive_key = {{ HIVEKEY }} hive_tlp = 3 hive_verifycert = no diff --git a/salt/suricata/init.sls b/salt/suricata/init.sls index 95d9787f3..4c2347302 100644 --- a/salt/suricata/init.sls +++ b/salt/suricata/init.sls @@ -36,12 +36,12 @@ suricatagroup: - name: suricata - gid: 940 -# Add ES user +# Add Suricata user suricata: user.present: - uid: 940 - gid: 940 - - home: /opt/so/conf/suricata + - home: /nsm/suricata - createhome: False suridir: diff --git a/salt/telegraf/init.sls b/salt/telegraf/init.sls index e7b550259..3c46b4956 100644 --- a/salt/telegraf/init.sls +++ b/salt/telegraf/init.sls @@ -13,7 +13,12 @@ tgraflogdir: file.directory: - name: /opt/so/log/telegraf - makedirs: True - + - user: 939 + - group: 939 + - recurse: + - user + - group + tgrafetcdir: file.directory: - name: /opt/so/conf/telegraf/etc @@ -29,7 +34,7 @@ tgrafsyncscripts: - name: /opt/so/conf/telegraf/scripts - user: root - group: 939 - - file_mode: 700 + - file_mode: 770 - template: jinja - source: salt://telegraf/scripts {% if salt['pillar.get']('global:mdengine', 'ZEEK') == 'SURICATA' %} @@ -57,6 +62,8 @@ node_config: so-telegraf: docker_container.running: - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-telegraf:{{ VERSION }} + - user: 939 + - group_add: 939,920 - environment: - HOST_PROC=/host/proc - HOST_ETC=/host/etc diff --git a/salt/telegraf/scripts/influxdbsize.sh b/salt/telegraf/scripts/influxdbsize.sh index 87571629d..bf4431a10 100644 --- a/salt/telegraf/scripts/influxdbsize.sh +++ b/salt/telegraf/scripts/influxdbsize.sh @@ -18,9 +18,12 @@ # if this script isn't already running if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then - INFLUXSIZE=$(du -s -k /host/nsm/influxdb | awk {'print $1'}) - echo "influxsize kbytes=$INFLUXSIZE" - + INFLUXLOG=/var/log/telegraf/influxdb_size.log + + if [ -f "$INFLUXLOG" ]; then + INFLUXSTATUS=$(cat $INFLUXLOG) + echo "influxsize kbytes=$INFLUXSTATUS" + fi fi exit 0 diff --git a/salt/thehive/etc/application.conf b/salt/thehive/etc/application.conf deleted file mode 100644 index 675c5222c..000000000 --- a/salt/thehive/etc/application.conf +++ /dev/null @@ -1,219 +0,0 @@ -{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} -{%- set CORTEXKEY = salt['pillar.get']('global:cortexorguserkey', '') %} -{%- set HIVEPLAYSECRET = salt['pillar.get']('global:hiveplaysecret', '') %} - -# Secret Key -# The secret key is used to secure cryptographic functions. -# WARNING: If you deploy your application on several servers, make sure to use the same key. -play.http.secret.key="{{ HIVEPLAYSECRET }}" -play.http.context=/thehive/ -search.uri = "http://{{ MANAGERIP }}:9400" -# Elasticsearch -search { - # Name of the index - index = the_hive - # Name of the Elasticsearch cluster - cluster = thehive - # Address of the Elasticsearch instance - host = ["{{ MANAGERIP }}:9500"] - #search.uri = "http://{{ MANAGERIP }}:9500" - # Scroll keepalive - keepalive = 1m - # Size of the page for scroll - pagesize = 50 - # Number of shards - nbshards = 5 - # Number of replicas - nbreplicas = 0 - # Arbitrary settings - settings { - # Maximum number of nested fields - mapping.nested_fields.limit = 100 - } - - ### XPack SSL configuration - # Username for XPack authentication - #username - # Password for XPack authentication - #password - # Enable SSL to connect to ElasticSearch - ssl.enabled = false - # Path to certificate authority file - #ssl.ca - # Path to certificate file - #ssl.certificate - # Path to key file - #ssl.key - - ### SearchGuard configuration - # Path to JKS file containing client certificate - #guard.keyStore.path - # Password of the keystore - #guard.keyStore.password - # Path to JKS file containing certificate authorities - #guard.trustStore.path - ## Password of the truststore - #guard.trustStore.password - # Enforce hostname verification - #guard.hostVerification - # If hostname verification is enabled specify if hostname should be resolved - #guard.hostVerificationResolveHostname -} - -# Authentication -auth { - # "provider" parameter contains authentication provider. It can be multi-valued (useful for migration) - # available auth types are: - # services.LocalAuthSrv : passwords are stored in user entity (in Elasticsearch). No configuration is required. - # ad : use ActiveDirectory to authenticate users. Configuration is under "auth.ad" key - # ldap : use LDAP to authenticate users. Configuration is under "auth.ldap" key - provider = [local] - - # By default, basic authentication is disabled. You can enable it by setting "method.basic" to true. - #method.basic = true - - - ad { - # The Windows domain name in DNS format. This parameter is required if you do not use - # 'serverNames' below. - #domainFQDN = "mydomain.local" - - # Optionally you can specify the host names of the domain controllers instead of using 'domainFQDN - # above. If this parameter is not set, TheHive uses 'domainFQDN'. - #serverNames = [ad1.mydomain.local, ad2.mydomain.local] - - # The Windows domain name using short format. This parameter is required. - #domainName = "MYDOMAIN" - - # If 'true', use SSL to connect to the domain controller. - #useSSL = true - } - - ldap { - # The LDAP server name or address. The port can be specified using the 'host:port' - # syntax. This parameter is required if you don't use 'serverNames' below. - #serverName = "ldap.mydomain.local:389" - - # If you have multiple LDAP servers, use the multi-valued setting 'serverNames' instead. - #serverNames = [ldap1.mydomain.local, ldap2.mydomain.local] - - # Account to use to bind to the LDAP server. This parameter is required. - #bindDN = "cn=thehive,ou=services,dc=mydomain,dc=local" - - # Password of the binding account. This parameter is required. - #bindPW = "***secret*password***" - - # Base DN to search users. This parameter is required. - #baseDN = "ou=users,dc=mydomain,dc=local" - - # Filter to search user in the directory server. Please note that {0} is replaced - # by the actual user name. This parameter is required. - #filter = "(cn={0})" - - # If 'true', use SSL to connect to the LDAP directory server. - #useSSL = true - } -} - -# Maximum time between two requests without requesting authentication -session { - warning = 5m - inactivity = 1h -} - -# Max textual content length -play.http.parser.maxMemoryBuffer= 1M -# Max file size -play.http.parser.maxDiskBuffer = 1G - -# Cortex -# TheHive can connect to one or multiple Cortex instances. Give each -# Cortex instance a name and specify the associated URL. -# -# In order to use Cortex, first you need to enable the Cortex module by uncommenting the next line - -play.modules.enabled += connectors.cortex.CortexConnector - -cortex { - "CORTEX-SERVER-ID" { - url = "http://{{ MANAGERIP }}:9001/cortex/" - key = "{{ CORTEXKEY }}" - # # HTTP client configuration (SSL and proxy) - # ws {} - } -} - -# MISP -# TheHive can connect to one or multiple MISP instances. Give each MISP -# instance a name and specify the associated Authkey that must be used -# to poll events, the case template that should be used by default when -# importing events as well as the tags that must be added to cases upon -# import. - -# Prior to configuring the integration with a MISP instance, you must -# enable the MISP connector. This will allow you to import events to -# and/or export cases to the MISP instance(s). - -#play.modules.enabled += connectors.misp.MispConnector - -misp { - # Interval between consecutive MISP event imports in hours (h) or - # minutes (m). - interval = 1h - - #"MISP-SERVER-ID" { - # # MISP connection configuration requires at least an url and a key. The key must - # # be linked with a sync account on MISP. - # url = "" - # key = "" - # - # # Name of the case template in TheHive that shall be used to import - # # MISP events as cases by default. - # caseTemplate = "" - # - # # Optional tags to add to each observable imported from an event - # # available on this instance. - # tags = ["misp-server-id"] - # - # ## MISP event filters - # # MISP filters is used to exclude events from the import. - # # Filter criteria are: - # # The number of attribute - # max-attributes = 1000 - # # The size of its JSON representation - # max-size = 1 MiB - # # The age of the last publish date - # max-age = 7 days - # # Organization and tags - # exclusion { - # organisation = ["bad organisation", "other organisations"] - # tags = ["tag1", "tag2"] - # } - # - # ## HTTP client configuration (SSL and proxy) - # # Truststore to use to validate the X.509 certificate of the MISP - # # instance if the default truststore is not sufficient. - # # Proxy can also be used - # ws { - # ssl.trustManager.stores = [ { - # path = /path/to/truststore.jks - # } ] - # proxy { - # host = proxy.mydomain.org - # port = 3128 - # } - # } - # - # # MISP purpose defines if this instance can be used to import events (ImportOnly), export cases (ExportOnly) or both (ImportAndExport) - # # Default is ImportAndExport - # purpose = ImportAndExport - #} ## <-- Uncomment to complete the configuration -} -webhooks { - NodeRedWebHook { - url = "http://{{ MANAGERIP }}:1880/thehive" - } - #SOCtopusWebHook { - # url = "http://{{ MANAGERIP }}:7000/enrich" - #} -} diff --git a/salt/thehive/etc/cortex-application.conf b/salt/thehive/etc/cortex-application.conf deleted file mode 100644 index 88bea88df..000000000 --- a/salt/thehive/etc/cortex-application.conf +++ /dev/null @@ -1,148 +0,0 @@ -{%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} -{%- set CORTEXPLAYSECRET = salt['pillar.get']('global:cortexplaysecret', '') %} - -# Secret Key -# The secret key is used to secure cryptographic functions. -# WARNING: If you deploy your application on several servers, make sure to use the same key. -play.http.secret.key="{{ CORTEXPLAYSECRET }}" -play.http.context=/cortex/ -pidfile.path = "/dev/null" -search.uri = "http://{{ MANAGERIP }}:9400" - -# Elasticsearch -search { - # Name of the index - index = cortex - # Name of the Elasticsearch cluster - cluster = thehive - # Address of the Elasticsearch instance - host = ["{{ MANAGERIP }}:9500"] - # Scroll keepalive - keepalive = 1m - # Size of the page for scroll - pagesize = 50 - # Number of shards - nbshards = 5 - # Number of replicas - nbreplicas = 0 - # Arbitrary settings - settings { - # Maximum number of nested fields - mapping.nested_fields.limit = 100 - } - - ## Authentication configuration - #search.username = "" - #search.password = "" - - ## SSL configuration - #search.keyStore { - # path = "/path/to/keystore" - # type = "JKS" # or PKCS12 - # password = "keystore-password" - #} - #search.trustStore { - # path = "/path/to/trustStore" - # type = "JKS" # or PKCS12 - # password = "trustStore-password" - #} -} - -## Cache -# -# If an analyzer is executed against the same observable, the previous report can be returned without re-executing the -# analyzer. The cache is used only if the second job occurs within cache.job (the default is 10 minutes). -cache.job = 10 minutes - -## Authentication -auth { - # "provider" parameter contains the authentication provider(s). It can be multi-valued, which is useful - # for migration. - # The available auth types are: - # - services.LocalAuthSrv : passwords are stored in the user entity within ElasticSearch). No - # configuration are required. - # - ad : use ActiveDirectory to authenticate users. The associated configuration shall be done in - # the "ad" section below. - # - ldap : use LDAP to authenticate users. The associated configuration shall be done in the - # "ldap" section below. - provider = [local] - - ad { - # The Windows domain name in DNS format. This parameter is required if you do not use - # 'serverNames' below. - #domainFQDN = "mydomain.local" - - # Optionally you can specify the host names of the domain controllers instead of using 'domainFQDN - # above. If this parameter is not set, TheHive uses 'domainFQDN'. - #serverNames = [ad1.mydomain.local, ad2.mydomain.local] - - # The Windows domain name using short format. This parameter is required. - #domainName = "MYDOMAIN" - - # If 'true', use SSL to connect to the domain controller. - #useSSL = true - } - - ldap { - # The LDAP server name or address. The port can be specified using the 'host:port' - # syntax. This parameter is required if you don't use 'serverNames' below. - #serverName = "ldap.mydomain.local:389" - - # If you have multiple LDAP servers, use the multi-valued setting 'serverNames' instead. - #serverNames = [ldap1.mydomain.local, ldap2.mydomain.local] - - # Account to use to bind to the LDAP server. This parameter is required. - #bindDN = "cn=thehive,ou=services,dc=mydomain,dc=local" - - # Password of the binding account. This parameter is required. - #bindPW = "***secret*password***" - - # Base DN to search users. This parameter is required. - #baseDN = "ou=users,dc=mydomain,dc=local" - - # Filter to search user in the directory server. Please note that {0} is replaced - # by the actual user name. This parameter is required. - #filter = "(cn={0})" - - # If 'true', use SSL to connect to the LDAP directory server. - #useSSL = true - } -} - -## ANALYZERS -# -analyzer { - # Absolute path where you have pulled the Cortex-Analyzers repository. - path = ["/Cortex-Analyzers/analyzers"] - - # Sane defaults. Do not change unless you know what you are doing. - fork-join-executor { - - # Min number of threads available for analysis. - parallelism-min = 2 - - # Parallelism (threads) ... ceil(available processors * factor). - parallelism-factor = 2.0 - - # Max number of threads available for analysis. - parallelism-max = 4 - } -} - -## RESPONDERS -## -responder { - # Directory that holds responders - urls = ["/Cortex-Analyzers/responders", "/custom-responders"] - - fork-join-executor { - # Min number of threads available for analyze - parallelism-min = 2 - # Parallelism (threads) ... ceil(available processors * factor) - parallelism-factor = 2.0 - # Max number of threads available for analyze - parallelism-max = 4 - } -} - -# It's the end my friend. Happy hunting! diff --git a/salt/thehive/etc/es/elasticsearch.yml b/salt/thehive/etc/es/elasticsearch.yml deleted file mode 100644 index 8abeb2d93..000000000 --- a/salt/thehive/etc/es/elasticsearch.yml +++ /dev/null @@ -1,17 +0,0 @@ -cluster.name: thehive -network.host: 0.0.0.0 -discovery.zen.minimum_master_nodes: 1 -# This is a test -- if this is here, then the volume is mounted correctly. -path.logs: /var/log/elasticsearch -action.destructive_requires_name: true -discovery.type: single-node -script.allowed_types: inline -transport.bind_host: 0.0.0.0 -transport.publish_host: 0.0.0.0 -transport.publish_port: 9500 -http.host: 0.0.0.0 -http.port: 9400 -transport.tcp.port: 9500 -transport.host: 0.0.0.0 -thread_pool.search.queue_size: 100000 -thread_pool.write.queue_size: 100000 diff --git a/salt/thehive/etc/es/log4j2.properties b/salt/thehive/etc/es/log4j2.properties deleted file mode 100644 index 85cf5d8fb..000000000 --- a/salt/thehive/etc/es/log4j2.properties +++ /dev/null @@ -1,20 +0,0 @@ -status = error -#appender.console.type = Console -#appender.console.name = console -#appender.console.layout.type = PatternLayout -#appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n -#rootLogger.level = info -#rootLogger.appenderRef.console.ref = console -# This is a test -- if this here, then the volume is mounted correctly. -appender.rolling.type = RollingFile -appender.rolling.name = rolling -appender.rolling.fileName = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}.log -appender.rolling.layout.type = PatternLayout -appender.rolling.layout.pattern = [%d{ISO8601}][%-5p][%-25c] %.10000m%n -appender.rolling.filePattern = ${sys:es.logs.base_path}${sys:file.separator}${sys:es.logs.cluster_name}-%d{yyyy-MM-dd}.log -appender.rolling.policies.type = Policies -appender.rolling.policies.time.type = TimeBasedTriggeringPolicy -appender.rolling.policies.time.interval = 1 -appender.rolling.policies.time.modulate = true -rootLogger.level = info -rootLogger.appenderRef.rolling.ref = rolling diff --git a/salt/thehive/init.sls b/salt/thehive/init.sls deleted file mode 100644 index 7cbe34c90..000000000 --- a/salt/thehive/init.sls +++ /dev/null @@ -1,180 +0,0 @@ -{% from 'allowed_states.map.jinja' import allowed_states %} -{% if sls in allowed_states %} - -{% set MANAGERIP = salt['pillar.get']('manager:mainip', '') %} -{% set VERSION = salt['pillar.get']('global:soversion', 'HH1.2.2') %} -{% set IMAGEREPO = salt['pillar.get']('global:imagerepo') %} -{% set MANAGER = salt['grains.get']('master') %} -thehiveconfdir: - file.directory: - - name: /opt/so/conf/thehive/etc - - makedirs: True - - user: 939 - - group: 939 - -thehivelogdir: - file.directory: - - name: /opt/so/log/thehive - - makedirs: True - - user: 939 - - group: 939 - -thehiveconf: - file.recurse: - - name: /opt/so/conf/thehive/etc - - source: salt://thehive/etc - - user: 939 - - group: 939 - - template: jinja - -cortexconfdir: - file.directory: - - name: /opt/so/conf/cortex - - makedirs: True - - user: 939 - - group: 939 - -cortexlogdir: - file.directory: - - name: /opt/so/log/cortex - - makedirs: True - - user: 939 - - group: 939 - -cortexconf: - file.recurse: - - name: /opt/so/conf/cortex - - source: salt://thehive/etc - - user: 939 - - group: 939 - - template: jinja - -cortexanalyzers: - file.directory: - - name: /opt/so/conf/cortex/custom-analyzers - - user: 939 - - group: 939 - - template: jinja - -cortexresponders: - file.directory: - - name: /opt/so/conf/cortex/custom-responders - - user: 939 - - group: 939 - - template: jinja - -# Install Elasticsearch - -# Made directory for ES data to live in -thehiveesdata: - file.directory: - - name: /nsm/thehive/esdata - - makedirs: True - - user: 939 - - group: 939 - -thehive_elasticsearch_yml: - file.exists: - - name: /opt/so/conf/thehive/etc/es/elasticsearch.yml - -log4j2_properties: - file.exists: - - name: /opt/so/conf/thehive/etc/es/log4j2.properties - -so-thehive-es: - docker_container.running: - - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-es:{{ VERSION }} - - hostname: so-thehive-es - - name: so-thehive-es - - user: 939 - - interactive: True - - tty: True - - binds: - - /nsm/thehive/esdata:/usr/share/elasticsearch/data:rw - - /opt/so/conf/thehive/etc/es/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml:ro - - /opt/so/conf/thehive/etc/es/log4j2.properties:/usr/share/elasticsearch/config/log4j2.properties:ro - - /opt/so/log/thehive:/var/log/elasticsearch:rw - - environment: - - ES_JAVA_OPTS=-Xms512m -Xmx512m -Dlog4j2.formatMsgNoLookups=true - - port_bindings: - - 0.0.0.0:9400:9400 - - 0.0.0.0:9500:9500 - - require: - - file: thehive_elasticsearch_yml - - file: log4j2_properties - -append_so-thehive-es_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-thehive-es - -cortex_application_conf: - file.exists: - - name: /opt/so/conf/thehive/etc/cortex-application.conf - -application_conf: - file.exists: - - name: /opt/so/conf/thehive/etc/application.conf - -# Install Cortex -so-cortex: - docker_container.running: - - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive-cortex:{{ VERSION }} - - hostname: so-cortex - - name: so-cortex - - user: 939 - - binds: - - /opt/so/conf/thehive/etc/cortex-application.conf:/opt/cortex/conf/application.conf:ro - - /opt/so/conf/cortex/custom-analyzers:/custom-analyzers:ro - - /opt/so/conf/cortex/custom-responders:/custom-responders:ro - - port_bindings: - - 0.0.0.0:9001:9001 - - require: - - file: cortex_application_conf - -append_so-cortex_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-cortex - -cortexscript: - cmd.script: - - source: salt://thehive/scripts/cortex_init - - cwd: /opt/so - - template: jinja - - hide_output: False - -so-thehive: - docker_container.running: - - image: {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-thehive:{{ VERSION }} - - environment: - - ELASTICSEARCH_HOST={{ MANAGERIP }} - - hostname: so-thehive - - name: so-thehive - - user: 939 - - binds: - - /opt/so/conf/thehive/etc/application.conf:/opt/thehive/conf/application.conf:ro - - port_bindings: - - 0.0.0.0:9000:9000 - - require: - - file: application_conf - -append_so-thehive_so-status.conf: - file.append: - - name: /opt/so/conf/so-status/so-status.conf - - text: so-thehive - -thehivescript: - cmd.script: - - source: salt://thehive/scripts/hive_init - - cwd: /opt/so - - template: jinja - - hide_output: False - -{% else %} - -{{sls}}_state_not_allowed: - test.fail_without_changes: - - name: {{sls}}_state_not_allowed - -{% endif %} diff --git a/salt/thehive/scripts/cortex_init b/salt/thehive/scripts/cortex_init deleted file mode 100644 index fd0387131..000000000 --- a/salt/thehive/scripts/cortex_init +++ /dev/null @@ -1,75 +0,0 @@ -#!/bin/bash -# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} -# {%- set CORTEXUSER = salt['pillar.get']('global:cortexuser', 'cortexadmin') %} -# {%- set CORTEXPASSWORD = salt['pillar.get']('global:cortexpassword', 'cortexchangeme') %} -# {%- set CORTEXKEY = salt['pillar.get']('global:cortexkey', '') %} -# {%- set CORTEXORGNAME = salt['pillar.get']('global:cortexorgname', '') %} -# {%- set CORTEXORGUSER = salt['pillar.get']('global:cortexorguser', 'soadmin') %} -# {%- set CORTEXORGUSERKEY = salt['pillar.get']('global:cortexorguserkey', '') %} - -. /usr/sbin/so-common - -default_salt_dir=/opt/so/saltstack/default - -cortex_clean(){ - sed -i '/^ cortexuser:/d' /opt/so/saltstack/local/pillar/global.sls - sed -i '/^ cortexpassword:/d' /opt/so/saltstack/local/pillar/global.sls - sed -i '/^ cortexorguser:/d' /opt/so/saltstack/local/pillar/global.sls -} - -cortex_init(){ - CORTEX_URL="http://{{MANAGERIP}}:9001/cortex/" - CORTEX_API_URL="${CORTEX_URL}api" - CORTEX_USER="{{CORTEXUSER}}" - CORTEX_PASSWORD="{{CORTEXPASSWORD}}" - CORTEX_KEY="{{CORTEXKEY}}" - CORTEX_ORG_NAME="{{CORTEXORGNAME}}" - CORTEX_ORG_DESC="{{CORTEXORGNAME}} organization created by Security Onion setup" - CORTEX_ORG_USER="{{CORTEXORGUSER}}" - CORTEX_ORG_USER_KEY="{{CORTEXORGUSERKEY}}" - SOCTOPUS_CONFIG="$default_salt_dir/salt/soctopus/files/SOCtopus.conf" - - if wait_for_web_response $CORTEX_URL "Cortex" 120; then - # Migrate DB - curl -sk -XPOST -L "$CORTEX_API_URL/maintenance/migrate" - - # Create intial Cortex superadmin - curl -sk -L "$CORTEX_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$CORTEX_USER\",\"name\" : \"$CORTEX_USER\",\"roles\" : [\"superadmin\"],\"preferences\" : \"{}\",\"password\" : \"$CORTEX_PASSWORD\", \"key\": \"$CORTEX_KEY\"}" - - # Create user-supplied org - curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/organization" -d "{ \"name\": \"$CORTEX_ORG_NAME\",\"description\": \"$CORTEX_ORG_DESC\",\"status\": \"Active\"}" - - # Create user-supplied org user - curl -sk -XPOST -H "Authorization: Bearer $CORTEX_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/user" -d "{\"name\": \"$CORTEX_ORG_USER\",\"roles\": [\"read\",\"analyze\",\"orgadmin\"],\"organization\": \"$CORTEX_ORG_NAME\",\"login\": \"$CORTEX_ORG_USER\",\"key\": \"$CORTEX_ORG_USER_KEY\" }" - - # Enable URLScan.io Analyzer - curl -sv -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/organization/analyzer/Urlscan_io_Search_0_1_0" -d '{"name":"Urlscan_io_Search_0_1_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2}}' - - # Enable Cert PassiveDNS Analyzer - curl -sv -k -XPOST -H "Authorization: Bearer $CORTEX_ORG_USER_KEY" -H "Content-Type: application/json" -L "$CORTEX_API_URL/organization/analyzer/CERTatPassiveDNS_2_0" -d '{"name":"CERTatPassiveDNS_2_0","configuration":{"auto_extract_artifacts":false,"check_tlp":true,"max_tlp":2, "limit": 100}}' - - # Revoke $CORTEX_USER key - curl -sk -XDELETE -H "Authorization: Bearer $CORTEX_KEY" -L "$CORTEX_API_URL/user/$CORTEX_USER/key" - - # Update SOCtopus config with apikey value - #sed -i "s/cortex_key = .*/cortex_key = $CORTEX_KEY/" $SOCTOPUS_CONFIG - - touch /opt/so/state/cortex.txt - else - echo "We experienced an issue connecting to Cortex!" - exit 1 - fi -} - -if [ -f /opt/so/state/cortex.txt ]; then - cortex_clean - exit 0 -else - if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"' 120; then - cortex_init - cortex_clean - else - echo "TheHive Elasticsearch server is not ready; unable to proceed with Cortex init." - exit 1 - fi -fi diff --git a/salt/thehive/scripts/hive_init b/salt/thehive/scripts/hive_init deleted file mode 100755 index 7ace6137b..000000000 --- a/salt/thehive/scripts/hive_init +++ /dev/null @@ -1,53 +0,0 @@ -#!/bin/bash -# {%- set MANAGERIP = salt['pillar.get']('global:managerip', '') %} -# {%- set THEHIVEUSER = salt['pillar.get']('global:hiveuser', 'hiveadmin') %} -# {%- set THEHIVEPASSWORD = salt['pillar.get']('global:hivepassword', 'hivechangeme') %} -# {%- set THEHIVEKEY = salt['pillar.get']('global:hivekey', '') %} - -. /usr/sbin/so-common - -thehive_clean(){ - sed -i '/^ hiveuser:/d' /opt/so/saltstack/local/pillar/global.sls - sed -i '/^ hivepassword:/d' /opt/so/saltstack/local/pillar/global.sls -} - -thehive_init(){ - THEHIVE_URL="http://{{MANAGERIP}}:9000/thehive/" - THEHIVE_API_URL="${THEHIVE_URL}api" - THEHIVE_USER="{{THEHIVEUSER}}" - THEHIVE_PASSWORD="{{THEHIVEPASSWORD}}" - THEHIVE_KEY="{{THEHIVEKEY}}" - SOCTOPUS_CONFIG="/opt/so/saltstack/salt/soctopus/files/SOCtopus.conf" - - echo -n "Waiting for TheHive..." - if wait_for_web_response $THEHIVE_URL "TheHive" 120; then - # Migrate DB - curl -sk -XPOST -L "$THEHIVE_API_URL/maintenance/migrate" - - # Create intial TheHive user - curl -sk -L "$THEHIVE_API_URL/user" -H "Content-Type: application/json" -d "{\"login\" : \"$THEHIVE_USER\",\"name\" : \"$THEHIVE_USER\",\"roles\" : [\"read\",\"alert\",\"write\",\"admin\"],\"preferences\" : \"{}\",\"password\" : \"$THEHIVE_PASSWORD\", \"key\": \"$THEHIVE_KEY\"}" - - # Pre-load custom fields - # - # reputation - curl -sk -L "$THEHIVE_API_URL/list/custom_fields" -H "Authorization: Bearer $THEHIVE_KEY" -H "Content-Type: application/json" -d "{\"value\":{\"name\": \"reputation\", \"reference\": \"reputation\", \"description\": \"This field provides an overall reputation status for an address/domain.\", \"type\": \"string\", \"options\": []}}" - - touch /opt/so/state/thehive.txt - else - echo "We experienced an issue connecting to TheHive!" - exit 1 - fi -} - -if [ -f /opt/so/state/thehive.txt ]; then - thehive_clean - exit 0 -else - if wait_for_web_response http://{{MANAGERIP}}:9400/_cluster/health '"status":"green"' 120; then - thehive_init - thehive_clean - else - echo "TheHive Elasticsearch server is not ready; unable to proceed with TheHive init." - exit 1 - fi -fi diff --git a/salt/top.sls b/salt/top.sls index c20bf33b7..87f96143f 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -1,6 +1,5 @@ {% set ZEEKVER = salt['pillar.get']('global:mdengine', '') %} {% set WAZUH = salt['pillar.get']('global:wazuh', '0') %} -{% set THEHIVE = salt['pillar.get']('manager:thehive', '0') %} {% set PLAYBOOK = salt['pillar.get']('manager:playbook', '0') %} {% set FREQSERVER = salt['pillar.get']('manager:freq', '0') %} {% set DOMAINSTATS = salt['pillar.get']('manager:domainstats', '0') %} @@ -35,11 +34,14 @@ base: '* and G@saltversion:{{saltversion}}': - match: compound - salt.minion - - common - patch.os.schedule - motd - salt.minion-check - salt.lasthighstate + + 'not *_workstation and G@saltversion:{{saltversion}}': + - match: compound + - common '*_helixsensor and G@saltversion:{{saltversion}}': - match: compound @@ -142,9 +144,6 @@ base: - utility - schedule - soctopus - {%- if THEHIVE != 0 %} - - thehive - {%- endif %} {%- if PLAYBOOK != 0 %} - playbook - redis @@ -209,9 +208,6 @@ base: - fleet.install_package {%- endif %} - soctopus - {%- if THEHIVE != 0 %} - - thehive - {%- endif %} {%- if PLAYBOOK != 0 %} - playbook {%- endif %} @@ -283,9 +279,6 @@ base: - utility - schedule - soctopus - {%- if THEHIVE != 0 %} - - thehive - {%- endif %} {%- if PLAYBOOK != 0 %} - playbook {%- endif %} @@ -375,9 +368,6 @@ base: - fleet.install_package {%- endif %} - soctopus - {%- if THEHIVE != 0 %} - - thehive - {%- endif %} {%- if PLAYBOOK != 0 %} - playbook {%- endif %} @@ -519,3 +509,11 @@ base: - docker_clean - filebeat - idh + + 'J@workstation:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:CentOS )': + - match: compound + - workstation + + 'J@workstation:gui:enabled:^[Ff][Aa][Ll][Ss][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:CentOS )': + - match: compound + - workstation.remove_gui diff --git a/salt/workstation/init.sls b/salt/workstation/init.sls new file mode 100644 index 000000000..e270c0430 --- /dev/null +++ b/salt/workstation/init.sls @@ -0,0 +1,7 @@ +include: + - workstation.xwindows +{# If the master is 'salt' then the minion hasn't been configured and isn't connected to the grid. #} +{# We need this since the trusted-ca state uses mine data. #} +{% if grains.master != 'salt' %} + - workstation.trusted-ca +{% endif %} diff --git a/salt/workstation/packages.sls b/salt/workstation/packages.sls new file mode 100644 index 000000000..59b24ec2a --- /dev/null +++ b/salt/workstation/packages.sls @@ -0,0 +1,60 @@ +{# we only want this state to run it is CentOS #} +{% if grains.os == 'CentOS' %} + +xwindows_group: + pkg.group_installed: + - name: X Window System + +graphical_extras: + pkg.installed: + - pkgs: + - gnome-classic-session + - gnome-terminal + - gnome-terminal-nautilus + - control-center + - liberation-mono-fonts + - file-roller + +workstation_packages: + pkg.installed: + - pkgs: + - wget + - curl + - unzip + - gedit + - mono-core + - mono-basic + - mono-winforms + - expect + - wireshark-gnome + - dsniff + - hping3 + - netsed + - ngrep + - python36-scapy + - ssldump + - tcpdump + - tcpflow + - whois + - chromium + - libevent + - sslsplit + - perl-IO-Compress + - perl-Net-DNS + - securityonion-networkminer + - securityonion-chaosreader + - securityonion-analyst-extras + - securityonion-bittwist + - securityonion-tcpstat + - securityonion-tcptrace + - securityonion-foremost + - securityonion-strelka-oneshot + - securityonion-strelka-fileshot + +{% else %} + +workstation_packages_os_fail: + test.fail_without_changes: + - comment: 'SO Analyst Workstation can only be installed on CentOS' + +{% endif %} diff --git a/salt/workstation/remove_gui.sls b/salt/workstation/remove_gui.sls new file mode 100644 index 000000000..097e23151 --- /dev/null +++ b/salt/workstation/remove_gui.sls @@ -0,0 +1,15 @@ +{# we only want this state to run it is CentOS #} +{% if grains.os == 'CentOS' %} + +remove_graphical_target: + file.symlink: + - name: /etc/systemd/system/default.target + - target: /lib/systemd/system/multi-user.target + - force: True + +{% else %} +workstation_trusted-ca_os_fail: + test.fail_without_changes: + - comment: 'SO Analyst Workstation can only be installed on CentOS' + +{% endif %} diff --git a/salt/workstation/trusted-ca.sls b/salt/workstation/trusted-ca.sls new file mode 100644 index 000000000..008d3573f --- /dev/null +++ b/salt/workstation/trusted-ca.sls @@ -0,0 +1,35 @@ + +{# we only want this state to run it is CentOS #} +{% if grains.os == 'CentOS' %} + + {% set global_ca_text = [] %} + {% set global_ca_server = [] %} + {% set manager = salt['grains.get']('master') %} + {% set x509dict = salt['mine.get'](manager | lower~'*', 'x509.get_pem_entries') %} + {% for host in x509dict %} + {% if host.split('_')|last in ['manager', 'managersearch', 'standalone', 'import', 'eval'] %} + {% do global_ca_text.append(x509dict[host].get('/etc/pki/ca.crt')|replace('\n', '')) %} + {% do global_ca_server.append(host) %} + {% endif %} + {% endfor %} + {% set trusttheca_text = global_ca_text[0] %} + {% set ca_server = global_ca_server[0] %} + +trusted_ca: + x509.pem_managed: + - name: /etc/pki/ca-trust/source/anchors/ca.crt + - text: {{ trusttheca_text }} + +update_ca_certs: + cmd.run: + - name: update-ca-trust + - onchanges: + - x509: trusted_ca + +{% else %} + +workstation_trusted-ca_os_fail: + test.fail_without_changes: + - comment: 'SO Analyst Workstation can only be installed on CentOS' + +{% endif %} diff --git a/salt/workstation/xwindows.sls b/salt/workstation/xwindows.sls new file mode 100644 index 000000000..015fb0d3c --- /dev/null +++ b/salt/workstation/xwindows.sls @@ -0,0 +1,23 @@ + +{# we only want this state to run it is CentOS #} +{% if grains.os == 'CentOS' %} + +include: + - workstation.packages + +graphical_target: + file.symlink: + - name: /etc/systemd/system/default.target + - target: /lib/systemd/system/graphical.target + - force: True + - require: + - pkg: X Window System + - pkg: graphical_extras + +{% else %} + +workstation_xwindows_os_fail: + test.fail_without_changes: + - comment: 'SO Analyst Workstation can only be installed on CentOS' + +{% endif %} diff --git a/salt/zeek/policy/securityonion/file-extraction/extract.zeek b/salt/zeek/policy/securityonion/file-extraction/extract.zeek index e5b7db864..8cdaf42dd 100644 --- a/salt/zeek/policy/securityonion/file-extraction/extract.zeek +++ b/salt/zeek/policy/securityonion/file-extraction/extract.zeek @@ -38,12 +38,12 @@ event file_state_remove(f: fa_file) if ( !f$info?$extracted || FileExtract::prefix == "" ) { return; } - # Check some conditions so we know the file is intact: - # Check for MD5 - # Check for total_bytes - # Check for missing bytes - # Check if timed out - if ( !f$info?$md5 || !f?$total_bytes || f$missing_bytes > 0 || f$info$timedout) { + # Check if any of the following conditions exist: + # - missing MD5 + # - total_bytes exists (some protocols aren't populating this field) but is 0 + # - missing bytes + # - timed out + if ( !f$info?$md5 || (f?$total_bytes && f$total_bytes == 0) || f$missing_bytes > 0 || f$info$timedout) { # Delete the file if it didn't pass our requirements check. local nuke = fmt("rm %s/%s", FileExtract::prefix, f$info$extracted); diff --git a/setup/automation/distributed-airgap-manager b/setup/automation/distributed-airgap-manager index 66765c38a..b5d30ae33 100644 --- a/setup/automation/distributed-airgap-manager +++ b/setup/automation/distributed-airgap-manager @@ -70,7 +70,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-airgap-search b/setup/automation/distributed-airgap-search index 578bd0c03..3afc48d3b 100644 --- a/setup/automation/distributed-airgap-search +++ b/setup/automation/distributed-airgap-search @@ -71,7 +71,7 @@ PATCHSCHEDULENAME=auto SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser # STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-airgap-sensor b/setup/automation/distributed-airgap-sensor index 597cbd1d5..a96cbeb7d 100644 --- a/setup/automation/distributed-airgap-sensor +++ b/setup/automation/distributed-airgap-sensor @@ -71,7 +71,7 @@ PATCHSCHEDULENAME=auto SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser # STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-cloud-manager b/setup/automation/distributed-cloud-manager index c7f295c3b..8e298e4c2 100644 --- a/setup/automation/distributed-cloud-manager +++ b/setup/automation/distributed-cloud-manager @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-cloud-search b/setup/automation/distributed-cloud-search index f6717bef7..aabf24a7f 100644 --- a/setup/automation/distributed-cloud-search +++ b/setup/automation/distributed-cloud-search @@ -71,7 +71,7 @@ PATCHSCHEDULENAME=auto SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser #STRELKA=1 -#THEHIVE=1 +#THEHIVE=0 #WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-cloud-sensor b/setup/automation/distributed-cloud-sensor index f033b80cb..0ba42769c 100644 --- a/setup/automation/distributed-cloud-sensor +++ b/setup/automation/distributed-cloud-sensor @@ -71,7 +71,7 @@ PATCHSCHEDULENAME=auto SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser #STRELKA=1 -#THEHIVE=1 +#THEHIVE=0 #WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-iso-manager b/setup/automation/distributed-iso-manager index 137361bcb..bd1aec7b4 100644 --- a/setup/automation/distributed-iso-manager +++ b/setup/automation/distributed-iso-manager @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-iso-search b/setup/automation/distributed-iso-search index e026dc862..9bdeaaa34 100644 --- a/setup/automation/distributed-iso-search +++ b/setup/automation/distributed-iso-search @@ -71,7 +71,7 @@ PATCHSCHEDULENAME=auto SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser # STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-iso-sensor b/setup/automation/distributed-iso-sensor index b74ad1daf..90f17ffb5 100644 --- a/setup/automation/distributed-iso-sensor +++ b/setup/automation/distributed-iso-sensor @@ -72,7 +72,7 @@ SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser # STRELKA=1 SURIPINS=(2 3) -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-net-centos-manager b/setup/automation/distributed-net-centos-manager index 137361bcb..bd1aec7b4 100644 --- a/setup/automation/distributed-net-centos-manager +++ b/setup/automation/distributed-net-centos-manager @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-net-centos-search b/setup/automation/distributed-net-centos-search index ac02110ab..98c0af7c8 100644 --- a/setup/automation/distributed-net-centos-search +++ b/setup/automation/distributed-net-centos-search @@ -71,7 +71,7 @@ PATCHSCHEDULENAME=auto SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser # STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-net-centos-sensor b/setup/automation/distributed-net-centos-sensor index 7e2fec4d9..f8230152e 100644 --- a/setup/automation/distributed-net-centos-sensor +++ b/setup/automation/distributed-net-centos-sensor @@ -71,7 +71,7 @@ PATCHSCHEDULENAME=auto SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser # STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-net-ubuntu-manager b/setup/automation/distributed-net-ubuntu-manager index 9f38f3406..c7ffd9ebe 100644 --- a/setup/automation/distributed-net-ubuntu-manager +++ b/setup/automation/distributed-net-ubuntu-manager @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-net-ubuntu-search b/setup/automation/distributed-net-ubuntu-search index 25cdabf2d..5285f97e3 100644 --- a/setup/automation/distributed-net-ubuntu-search +++ b/setup/automation/distributed-net-ubuntu-search @@ -71,7 +71,7 @@ PATCHSCHEDULENAME=auto SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser # STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-net-ubuntu-sensor b/setup/automation/distributed-net-ubuntu-sensor index 321d53a7b..294b68480 100644 --- a/setup/automation/distributed-net-ubuntu-sensor +++ b/setup/automation/distributed-net-ubuntu-sensor @@ -71,7 +71,7 @@ PATCHSCHEDULENAME=auto SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser # STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-net-ubuntu-suricata-manager b/setup/automation/distributed-net-ubuntu-suricata-manager index 0c967faf2..e5c0c137f 100644 --- a/setup/automation/distributed-net-ubuntu-suricata-manager +++ b/setup/automation/distributed-net-ubuntu-suricata-manager @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-net-ubuntu-suricata-search b/setup/automation/distributed-net-ubuntu-suricata-search index 163e46faf..585de54af 100644 --- a/setup/automation/distributed-net-ubuntu-suricata-search +++ b/setup/automation/distributed-net-ubuntu-suricata-search @@ -71,7 +71,7 @@ PATCHSCHEDULENAME=auto SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser # STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/distributed-net-ubuntu-suricata-sensor b/setup/automation/distributed-net-ubuntu-suricata-sensor index 646daf471..ee8eba5e0 100644 --- a/setup/automation/distributed-net-ubuntu-suricata-sensor +++ b/setup/automation/distributed-net-ubuntu-suricata-sensor @@ -72,7 +72,7 @@ SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser # STRELKA=1 SURIPINS=(2 3) -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 # WEBUSER=onionuser@somewhere.invalid # WEBPASSWD1=0n10nus3r diff --git a/setup/automation/eval-airgap b/setup/automation/eval-airgap index 62209e93f..7e1df4dfc 100644 --- a/setup/automation/eval-airgap +++ b/setup/automation/eval-airgap @@ -70,7 +70,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/eval-cloud b/setup/automation/eval-cloud index fbd3ab3a4..cb8b0b1ae 100644 --- a/setup/automation/eval-cloud +++ b/setup/automation/eval-cloud @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/eval-iso b/setup/automation/eval-iso index fab65f4d0..e1461d95f 100644 --- a/setup/automation/eval-iso +++ b/setup/automation/eval-iso @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/eval-net-centos b/setup/automation/eval-net-centos index 3800f8d68..c86357a21 100644 --- a/setup/automation/eval-net-centos +++ b/setup/automation/eval-net-centos @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/eval-net-ubuntu b/setup/automation/eval-net-ubuntu index c7353e545..5d1cfb500 100644 --- a/setup/automation/eval-net-ubuntu +++ b/setup/automation/eval-net-ubuntu @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/import-airgap b/setup/automation/import-airgap index 47076634c..78cd42096 100644 --- a/setup/automation/import-airgap +++ b/setup/automation/import-airgap @@ -70,7 +70,7 @@ RULESETUP=ETOPEN # SOREMOTEPASS1=onionuser # SOREMOTEPASS2=onionuser STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/import-cloud b/setup/automation/import-cloud index 9ba4cf2a9..eb8b23905 100644 --- a/setup/automation/import-cloud +++ b/setup/automation/import-cloud @@ -69,7 +69,7 @@ RULESETUP=ETOPEN # SOREMOTEPASS1=onionuser # SOREMOTEPASS2=onionuser STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/import-iso b/setup/automation/import-iso index 7dc1688d9..8c8357f0f 100644 --- a/setup/automation/import-iso +++ b/setup/automation/import-iso @@ -69,7 +69,7 @@ RULESETUP=ETOPEN # SOREMOTEPASS1=onionuser # SOREMOTEPASS2=onionuser STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/import-net-centos b/setup/automation/import-net-centos index d55223625..e565b22e2 100644 --- a/setup/automation/import-net-centos +++ b/setup/automation/import-net-centos @@ -69,7 +69,7 @@ RULESETUP=ETOPEN # SOREMOTEPASS1=onionuser # SOREMOTEPASS2=onionuser STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/import-net-ubuntu b/setup/automation/import-net-ubuntu index 41c6a7c6c..e115232aa 100644 --- a/setup/automation/import-net-ubuntu +++ b/setup/automation/import-net-ubuntu @@ -69,7 +69,7 @@ RULESETUP=ETOPEN # SOREMOTEPASS1=onionuser # SOREMOTEPASS2=onionuser STRELKA=1 -# THEHIVE=1 +# THEHIVE=0 # WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/standalone-airgap b/setup/automation/standalone-airgap index b228c892e..a17d006c7 100644 --- a/setup/automation/standalone-airgap +++ b/setup/automation/standalone-airgap @@ -70,7 +70,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/standalone-cloud b/setup/automation/standalone-cloud index ab66253d3..77686b862 100644 --- a/setup/automation/standalone-cloud +++ b/setup/automation/standalone-cloud @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/standalone-iso b/setup/automation/standalone-iso index edf6853d4..fa47dd66d 100644 --- a/setup/automation/standalone-iso +++ b/setup/automation/standalone-iso @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/standalone-iso-logscan b/setup/automation/standalone-iso-logscan index 25a42348b..4038735d0 100644 --- a/setup/automation/standalone-iso-logscan +++ b/setup/automation/standalone-iso-logscan @@ -70,7 +70,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/standalone-iso-suricata b/setup/automation/standalone-iso-suricata index 0e3be059f..078190043 100644 --- a/setup/automation/standalone-iso-suricata +++ b/setup/automation/standalone-iso-suricata @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/standalone-net-centos b/setup/automation/standalone-net-centos index 6cc7bbf10..050bdde51 100644 --- a/setup/automation/standalone-net-centos +++ b/setup/automation/standalone-net-centos @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/standalone-net-centos-proxy b/setup/automation/standalone-net-centos-proxy index 4e180d07b..9f8e1b6b6 100644 --- a/setup/automation/standalone-net-centos-proxy +++ b/setup/automation/standalone-net-centos-proxy @@ -70,7 +70,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/automation/standalone-net-ubuntu b/setup/automation/standalone-net-ubuntu index 45fa01476..2aad4ea0e 100644 --- a/setup/automation/standalone-net-ubuntu +++ b/setup/automation/standalone-net-ubuntu @@ -69,7 +69,7 @@ RULESETUP=ETOPEN SOREMOTEPASS1=onionuser SOREMOTEPASS2=onionuser STRELKA=1 -THEHIVE=1 +THEHIVE=0 WAZUH=1 WEBUSER=onionuser@somewhere.invalid WEBPASSWD1=0n10nus3r diff --git a/setup/so-functions b/setup/so-functions index 2c3d2a649..61c3985e3 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -49,14 +49,14 @@ airgap_repo() { rm -rf /etc/yum.repos.d/* echo "[airgap_repo]" > /etc/yum.repos.d/airgap_repo.repo if $is_manager; then - echo "baseurl=https://$HOSTNAME/repo" >> /etc/yum.repos.d/airgap_repo.repo + echo "baseurl=https://$HOSTNAME/repo" >> /etc/yum.repos.d/airgap_repo.repo else - echo "baseurl=https://$MSRV/repo" >> /etc/yum.repos.d/airgap_repo.repo + echo "baseurl=https://$MSRV/repo" >> /etc/yum.repos.d/airgap_repo.repo fi - echo "gpgcheck=1" >> /etc/yum.repos.d/airgap_repo.repo - echo "sslverify=0" >> /etc/yum.repos.d/airgap_repo.repo - echo "name=Airgap Repo" >> /etc/yum.repos.d/airgap_repo.repo - echo "enabled=1" >> /etc/yum.repos.d/airgap_repo.repo + echo "gpgcheck=1" >> /etc/yum.repos.d/airgap_repo.repo + echo "sslverify=0" >> /etc/yum.repos.d/airgap_repo.repo + echo "name=Airgap Repo" >> /etc/yum.repos.d/airgap_repo.repo + echo "enabled=1" >> /etc/yum.repos.d/airgap_repo.repo } airgap_rules() { @@ -138,6 +138,45 @@ analyze_system() { logCmd "ip a" } +analyst_salt_local() { + + # Install everything using local salt + # Set the repo + securityonion_repo + gpg_rpm_import + # Install salt + logCmd "yum -y install salt-minion-3004.1 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq" + logCmd "yum -y update --exclude=salt*" + + salt-call state.apply workstation --local --file-root=../salt/ -l info 2>&1 | tee -a outfile + read -r -d '' message <<- EOM + Finished Analyst workstation installation. + + Press ENTER to reboot. + EOM + + whiptail --title "$whiptail_title" --msgbox "$message" 12 75 + reboot + exit 0 + +} + + +analyst_workstation_pillar() { + + local pillar_file=$temp_install_dir/pillar/minions/$MINION_ID.sls + + # Create the analyst workstation pillar + printf '%s\n'\ + "host:"\ + " mainint: '$MNIC'"\ + "workstation:"\ + " gui:"\ + " enabled: true" >> "$pillar_file"\ + "sensoroni:"\ + " node_description: '${NODE_DESCRIPTION//\'/''}'" > $pillar_file +} + calculate_useable_cores() { # Calculate reasonable core usage @@ -153,18 +192,6 @@ check_admin_pass() { check_pass_match "$ADMINPASS1" "$ADMINPASS2" "APMATCH" } -check_hive_init() { - - wait_for_file /opt/so/state/thehive.txt 20 5 - local return_val=$? - if [[ $return_val -ne 0 ]]; then - return $return_val - fi - - docker stop so-thehive - docker rm so-thehive -} - check_manager_state() { echo "Checking state of manager services. This may take a moment..." retry 2 15 "__check_so_status" >> $setup_log 2>&1 && retry 2 15 "__check_salt_master" >> $setup_log 2>&1 && return 0 || return 1 @@ -229,36 +256,6 @@ check_service_status() { } -check_salt_master_status() { - local timeout=$1 - echo "Checking if we can talk to the salt master" >> "$setup_log" 2>&1 - salt-call saltutil.kill_all_jobs > /dev/null 2>&1 - salt-call state.show_top -t $timeout > /dev/null 2>&1 - local status=$? - if [ $status -gt 0 ]; then - echo " Could not talk to salt master" >> "$setup_log" 2>&1 - return 1; - else - echo " Can talk to salt master" >> "$setup_log" 2>&1 - return 0; - fi - -} - -check_salt_minion_status() { - local timeout=$1 - echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 - salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 - local status=$? - if [ $status -gt 0 ]; then - echo " Minion did not respond" >> "$setup_log" 2>&1 - return 1; - else - echo " Received job response from salt minion" >> "$setup_log" 2>&1 - return 0; - fi -} - check_soremote_pass() { check_pass_match "$SOREMOTEPASS1" "$SOREMOTEPASS2" "SCMATCH" } @@ -447,6 +444,13 @@ collect_hostname_validate() { done } +collect_idh_preferences() { + IDHMGTRESTRICT='False' + whiptail_idh_preferences + + if [[ "$idh_preferences" != "" ]]; then IDHMGTRESTRICT='True'; fi +} + collect_idh_services() { whiptail_idh_services @@ -778,6 +782,9 @@ collect_zeek() { configure_minion() { local minion_type=$1 + if [[ $is_analyst ]]; then + minion_type=workstation + fi echo "Configuring minion type as $minion_type" >> "$setup_log" 2>&1 echo "role: so-$minion_type" > /etc/salt/grains @@ -786,6 +793,9 @@ configure_minion() { echo "id: '$MINION_ID'" > "$minion_config" case "$minion_type" in + 'workstation') + echo "master: '$MSRV'" >> "$minion_config" + ;; 'helix') cp -f ../salt/ca/files/signing_policies.conf /etc/salt/minion.d/signing_policies.conf echo "master: '$HOSTNAME'" >> "$minion_config" @@ -894,6 +904,8 @@ check_requirements() { if [[ -n $nsm_mount ]]; then if [[ "$standalone_or_dist" == 'import' ]]; then req_storage=50 + elif [[ "$node_type" == 'idh' ]]; then + req_storage=12 else req_storage=100 fi @@ -906,6 +918,8 @@ check_requirements() { else if [[ "$standalone_or_dist" == 'import' ]]; then req_storage=50 + elif [[ "$node_type" == 'idh' ]]; then + req_storage=12 else req_storage=200 fi @@ -1120,6 +1134,7 @@ detect_os() { echo "Detecting Base OS" >> "$log" 2>&1 if [ -f /etc/redhat-release ]; then OS=centos + is_centos=true if grep -q "CentOS Linux release 7" /etc/redhat-release; then OSVER=7 elif grep -q "CentOS Linux release 8" /etc/redhat-release; then @@ -1219,7 +1234,7 @@ disable_ipv6() { docker_install() { - if [ $OS = 'centos' ]; then + if [[ $is_centos ]]; then logCmd "yum clean expire-cache" if [[ ! $is_iso ]]; then logCmd "yum -y install docker-ce-20.10.5-3.el7 docker-ce-cli-20.10.5-3.el7 docker-ce-rootless-extras-20.10.5-3.el7 containerd.io-1.4.4-3.1.el7" @@ -1241,15 +1256,15 @@ docker_install() { ;; esac if [ $OSVER == "bionic" ]; then - service docker stop + service docker stop apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras retry 50 10 "apt-get -y install --allow-downgrades docker-ce=5:20.10.5~3-0~ubuntu-bionic docker-ce-cli=5:20.10.5~3-0~ubuntu-bionic docker-ce-rootless-extras=5:20.10.5~3-0~ubuntu-bionic python3-docker" >> "$setup_log" 2>&1 || exit 1 - apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras + apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras elif [ $OSVER == "focal" ]; then - service docker stop + service docker stop apt -y purge docker-ce docker-ce-cli docker-ce-rootless-extras retry 50 10 "apt-get -y install --allow-downgrades docker-ce=5:20.10.8~3-0~ubuntu-focal docker-ce-cli=5:20.10.8~3-0~ubuntu-focal docker-ce-rootless-extras=5:20.10.8~3-0~ubuntu-focal python3-docker" >> "$setup_log" 2>&1 || exit 1 - apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras + apt-mark hold docker-ce docker-ce-cli docker-ce-rootless-extras fi fi docker_registry @@ -1380,9 +1395,9 @@ es_heapsize() { # Set heap size to 33% of available memory ES_HEAP_SIZE=$(( total_mem / 3 )) if [ "$ES_HEAP_SIZE" -ge 25001 ] ; then - ES_HEAP_SIZE="25000m" + ES_HEAP_SIZE="25000m" else - ES_HEAP_SIZE=$ES_HEAP_SIZE"m" + ES_HEAP_SIZE=$ES_HEAP_SIZE"m" fi fi export ES_HEAP_SIZE @@ -1499,19 +1514,12 @@ generate_passwords(){ FLEETSAPASS=$(get_random_value) FLEETJWT=$(get_random_value) GRAFANAPASS=$(get_random_value) - if [[ "$THEHIVE" == "1" ]]; then - HIVEKEY=$(get_random_value) - HIVEPLAYSECRET=$(get_random_value) - CORTEXKEY=$(get_random_value) - CORTEXORGUSERKEY=$(get_random_value) - CORTEXPLAYSECRET=$(get_random_value) - fi SENSORONIKEY=$(get_random_value) KRATOSKEY=$(get_random_value) } generate_repo_tarball() { - mkdir /opt/so/repo + mkdir -p /opt/so/repo tar -czf /opt/so/repo/"$SOVERSION".tar.gz -C "$(pwd)/.." . } @@ -1673,7 +1681,6 @@ manager_pillar() { " es_port: $node_es_port"\ " grafana: $GRAFANA"\ " osquery: $OSQUERY"\ - " thehive: $THEHIVE"\ " playbook: $PLAYBOOK"\ ""\ "elasticsearch:"\ @@ -1748,7 +1755,7 @@ manager_global() { " managerip: '$MAINIP'" > "$global_pillar" if [[ $HIGHLANDER == 'True' ]]; then - printf '%s\n'\ + printf '%s\n'\ " highlander: True"\ >> "$global_pillar" fi if [[ $is_airgap ]]; then @@ -1759,22 +1766,6 @@ manager_global() { " airgap: False"\ >> "$global_pillar" fi - # Check if TheHive is enabled. If so, add creds and other details - if [[ "$THEHIVE" == "1" ]]; then - printf '%s\n'\ - " hiveuser: '$WEBUSER'"\ - " hivepassword: '$WEBPASSWD1'"\ - " hivekey: '$HIVEKEY'"\ - " hiveplaysecret: '$HIVEPLAYSECRET'"\ - " cortexuser: '$WEBUSER'"\ - " cortexpassword: '$WEBPASSWD1'"\ - " cortexkey: '$CORTEXKEY'"\ - " cortexorgname: 'SecurityOnion'"\ - " cortexorguser: 'soadmin'"\ - " cortexorguserkey: '$CORTEXORGUSERKEY'"\ - " cortexplaysecret: '$CORTEXPLAYSECRET'" >> "$global_pillar" - fi - # Continue adding other details printf '%s\n'\ " fleet_custom_hostname: "\ @@ -1794,14 +1785,15 @@ manager_global() { " enabled: $STRELKA"\ " rules: 1" >> "$global_pillar" if [[ $is_airgap ]]; then - printf '%s\n'\ - " repos:"\ - " - 'https://$HOSTNAME/repo/rules/strelka'" >> "$global_pillar" - else - printf '%s\n'\ - " repos:"\ - " - 'https://github.com/Neo23x0/signature-base'" >> "$global_pillar" - fi + printf '%s\n'\ + " repos:"\ + " - 'https://$HOSTNAME/repo/rules/strelka'" >> "$global_pillar" + else + printf '%s\n'\ + " repos:"\ + " - 'https://github.com/Neo23x0/signature-base'" >> "$global_pillar" + fi + printf '%s\n'\ "curator:"\ " hot_warm: False"\ @@ -1829,101 +1821,101 @@ manager_global() { " cluster_routing_allocation_disk_watermark_flood_stage: '98%'"\ " index_settings:"\ " so-beats:"\ - " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " index_template:"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 1"\ " warm: 7"\ " close: 30"\ " delete: 365"\ " so-endgame:"\ - " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " index_template:"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 1"\ " warm: 7"\ " close: 30"\ " delete: 365"\ " so-firewall:"\ " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 1"\ " warm: 7"\ " close: 30"\ " delete: 365"\ " so-flow:"\ " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 1"\ " warm: 7"\ " close: 30"\ " delete: 365"\ " so-ids:"\ " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 1"\ " warm: 7"\ " close: 30"\ " delete: 365"\ " so-import:"\ " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 1"\ " warm: 7"\ " close: 73000"\ " delete: 73001"\ " so-osquery:"\ " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 1"\ " warm: 7"\ " close: 30"\ " delete: 365"\ " so-ossec:"\ " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 1"\ " warm: 7"\ " close: 30"\ " delete: 365"\ " so-strelka:"\ " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 1"\ " warm: 7"\ " close: 30"\ " delete: 365"\ " so-syslog:"\ " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 1"\ " warm: 7"\ " close: 30"\ " delete: 365"\ " so-zeek:"\ " index_template:"\ - " template:"\ - " settings:"\ - " index:"\ - " number_of_shards: 1"\ + " template:"\ + " settings:"\ + " index:"\ + " number_of_shards: 2"\ " warm: 7"\ " close: 45"\ " delete: 365"\ @@ -1969,7 +1961,7 @@ minio_generate_keys() { network_init() { disable_ipv6 set_hostname - if [[ "$setup_type" == 'iso' ]]; then + if [[ ( $is_iso || $is_analyst_iso ) ]]; then set_management_interface fi } @@ -2214,7 +2206,7 @@ reset_proxy() { [[ -f /etc/gitconfig ]] && rm -f /etc/gitconfig - if [[ $OS == 'centos' ]]; then + if [[ $is_centos ]]; then sed -i "/proxy=/d" /etc/yum.conf else [[ -f /etc/apt/apt.conf.d/00-proxy.conf ]] && rm -f /etc/apt/apt.conf.d/00-proxy.conf @@ -2242,7 +2234,7 @@ backup_dir() { remove_package() { local package_name=$1 - if [ $OS = 'centos' ]; then + if [[ $is_centos ]]; then if rpm -qa | grep -q "$package_name"; then logCmd "yum remove -y $package_name" fi @@ -2263,38 +2255,41 @@ remove_package() { saltify() { # Install updates and Salt - if [ $OS = 'centos' ]; then + if [[ $is_centos ]]; then set_progress_str 6 'Installing various dependencies' - if [[ ! $is_iso ]]; then + if [[ ! ( $is_iso || $is_analyst_iso ) ]]; then logCmd "yum -y install wget nmap-ncat" - fi - case "$install_type" in - 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT') - reserve_group_ids - if [[ ! $is_iso ]]; then - logCmd "yum -y install sqlite curl mariadb-devel" - fi - # Download Ubuntu Keys in case manager updates = 1 - logCmd "mkdir -vp /opt/so/gpg" - if [[ ! $is_airgap ]]; then - logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt/SALTSTACK-GPG-KEY.pub" - logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg" - logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH" - fi - set_progress_str 7 'Installing salt-master' - if [[ ! $is_iso ]]; then - logCmd "yum -y install salt-master-3004.1" - fi - logCmd "systemctl enable salt-master" - ;; - *) - ;; - esac + fi + + if [[ ! $is_analyst ]]; then + case "$install_type" in + 'MANAGER' | 'EVAL' | 'MANAGERSEARCH' | 'FLEET' | 'HELIXSENSOR' | 'STANDALONE'| 'IMPORT') + reserve_group_ids + if [[ ! $is_iso ]]; then + logCmd "yum -y install sqlite curl mariadb-devel" + fi + # Download Ubuntu Keys in case manager updates = 1 + logCmd "mkdir -vp /opt/so/gpg" + if [[ ! $is_airgap ]]; then + logCmd "wget -q --inet4-only -O /opt/so/gpg/SALTSTACK-GPG-KEY.pub https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt/SALTSTACK-GPG-KEY.pub" + logCmd "wget -q --inet4-only -O /opt/so/gpg/docker.pub https://download.docker.com/linux/ubuntu/gpg" + logCmd "wget -q --inet4-only -O /opt/so/gpg/GPG-KEY-WAZUH https://packages.wazuh.com/key/GPG-KEY-WAZUH" + fi + set_progress_str 7 'Installing salt-master' + if [[ ! $is_iso ]]; then + logCmd "yum -y install salt-master-3004.1" + fi + logCmd "systemctl enable salt-master" + ;; + *) + ;; + esac + fi if [[ ! $is_airgap ]]; then logCmd "yum clean expire-cache" fi set_progress_str 8 'Installing salt-minion & python modules' - if [[ ! $is_iso ]]; then + if [[ ! ( $is_iso || $is_analyst_iso ) ]]; then logCmd "yum -y install salt-minion-3004.1 httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq" logCmd "yum -y update --exclude=salt*" fi @@ -2407,7 +2402,7 @@ secrets_pillar(){ securityonion_repo() { # Remove all the current repos - if [[ "$OS" == "centos" ]]; then + if [[ $is_centos ]]; then if [[ "$INTERWEBS" == "AIRGAP" ]]; then echo "This is airgap I don't need to add this repo" else @@ -2426,8 +2421,8 @@ securityonion_repo() { # update this package because the repo config files get added back # if the package is updated when the update_packages function is called logCmd "yum -v -y update centos-release" - echo "Move the .repo files that were added by the centos-release package." - find /etc/yum.repos.d/ -type f -not -name 'securityonion*repo' -print0 | xargs -0 -I {} mv -bvf {} /root/oldrepos/ + echo "Backing up the .repo files that were added by the centos-release package." + logCmd "find /etc/yum.repos.d/ -type f -not -name 'securityonion*repo' -print0 | xargs -0 -I {} mv -bvf {} /root/oldrepos/" logCmd "yum repolist all" fi else @@ -2441,8 +2436,28 @@ set_network_dev_status_list() { } set_main_ip() { - MAINIP=$(ip route get 1 | awk '{print $7;exit}') - MNIC_IP=$(ip a s "$MNIC" | grep -oE 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2) + local count=0 + local progress='.' + local c=0 + local m=3.3 + local max_attempts=30 + echo "Gathering the management IP. " + while ! valid_ip4 "$MAINIP" || ! valid_ip4 "$MNIC_IP"; do + MAINIP=$(ip route get 1 | awk '{print $7;exit}') + MNIC_IP=$(ip a s "$MNIC" | grep -oE 'inet [0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}' | cut -d' ' -f2) + ((count=count+1)) + p=$(awk -vp=$m -vq=$count 'BEGIN{printf "%.0f" ,p * q}') + printf "%-*s" $((count+1)) '[' | tr ' ' '#' + printf "%*s%3d%%\r" $((max_attempts-count)) "]" "$p" + if [ $count = $max_attempts ]; then + echo "ERROR: Could not determine MAINIP or MNIC_IP." >> "$setup_log" 2>&1 + echo "MAINIP=$MAINIP" >> "$setup_log" 2>&1 + echo "MNIC_IP=$MNIC_IP" >> "$setup_log" 2>&1 + whiptail_error_message "The management IP could not be determined. Please check the log at /root/sosetup.log and verify the network configuration. Press OK to exit." + exit 1 + fi + sleep 1 + done } # Add /usr/sbin to everyone's path @@ -2498,7 +2513,7 @@ set_proxy() { "}" > /root/.docker/config.json # Set proxy for package manager - if [ "$OS" = 'centos' ]; then + if [[ $is_centos ]]; then echo "proxy=$so_proxy" >> /etc/yum.conf else # Set it up so the updates roll through the manager @@ -2669,8 +2684,8 @@ set_initial_firewall_policy() { 'EVAL' | 'MANAGERSEARCH' | 'STANDALONE' | 'IMPORT') $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP" - $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP" - $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP" + $default_salt_dir/salt/common/tools/sbin/so-firewall includehost sensor "$MAINIP" + $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost search_node "$MAINIP" case "$install_type" in 'EVAL') $default_salt_dir/pillar/data/addtotab.sh evaltab "$MINION_ID" "$MAINIP" "$num_cpu_cores" "$random_uid" "$MNIC" "$filesystem_root" "$filesystem_nsm" "$INTERFACE" True @@ -2686,7 +2701,7 @@ set_initial_firewall_policy() { 'HELIXSENSOR') $default_salt_dir/salt/common/tools/sbin/so-firewall includehost manager "$MAINIP" $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP" - $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP" + $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost sensor "$MAINIP" ;; 'SENSOR' | 'SEARCHNODE' | 'HEAVYNODE' | 'FLEET' | 'IDH' | 'RECEIVER') $sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP" @@ -2726,6 +2741,13 @@ set_initial_firewall_policy() { # TODO: implement ;; esac + + # Add some firewall rules for analyst workstations that get added to the grid + if [[ $is_analyst ]]; then + $sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall includehost minion "$MAINIP" + $sshcmd -i /root/.ssh/so.key soremote@"$MSRV" sudo $default_salt_dir/salt/common/tools/sbin/so-firewall --apply includehost analyst "$MAINIP" + fi + } # Set up the management interface on the ISO @@ -2777,7 +2799,7 @@ set_redirect() { set_updates() { if [ "$MANAGERUPDATES" = '1' ]; then - if [ "$OS" = 'centos' ]; then + if [[ $is_centos ]]; then if [[ ! $is_airgap ]] && ! ( grep -q "$MSRV" /etc/yum.conf); then if grep -q "proxy=" /etc/yum.conf; then sed -i "s/proxy=.*/proxy=http:\/\/$MSRV:3142/" /etc/yum.conf @@ -2844,9 +2866,9 @@ update_sudoers() { } update_packages() { - if [ "$OS" = 'centos' ]; then - logCmd "yum repolist" - logCmd "yum -y update --exclude=salt*,wazuh*,docker*,containerd*" + if [[ $is_centos ]]; then + logCmd "yum repolist" + logCmd "yum -y update --exclude=salt*,wazuh*,docker*,containerd*" else retry 50 10 "apt-get -y update" >> "$setup_log" 2>&1 || exit 1 retry 50 10 "apt-get -y upgrade" >> "$setup_log" 2>&1 || exit 1 @@ -2903,10 +2925,11 @@ write_out_idh_services() { printf '%s\n'\ "idh:"\ + " restrict_management_ip: $IDHMGTRESTRICT"\ " services:" >> "$pillar_file" for service in ${idh_services[@]}; do echo " - $service" | tr '[:upper:]' '[:lower:]' >> "$pillar_file" - done + done } # Enable Zeek Logs diff --git a/setup/so-preflight b/setup/so-preflight index cdbcf6bfe..d1fd89b6e 100755 --- a/setup/so-preflight +++ b/setup/so-preflight @@ -96,7 +96,7 @@ check_new_repos() { local repo_arr=( "https://download.docker.com/linux/ubuntu/gpg" "https://download.docker.com/linux/ubuntu" - "https://repo.saltstack.com/py3/ubuntu/$ubuntu_version/amd64/archive/3003/SALTSTACK-GPG-KEY.pub" + "https://repo.securityonion.net/file/securityonion-repo/ubuntu/$ubuntu_version/amd64/salt/SALTSTACK-GPG-KEY.pub" "https://packages.wazuh.com/key/GPG-KEY-WAZUH" "https://packages.wazuh.com" ) diff --git a/setup/so-setup b/setup/so-setup index 9579df79d..e06b5ded9 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -70,10 +70,71 @@ while [[ $# -gt 0 ]]; do esac done -if [[ "$setup_type" == 'iso' ]]; then - is_iso=true +detect_os +is_analyst= +if [ "$setup_type" = 'analyst' ]; then + is_analyst=true + # Check to see if this is an ISO + if [ -d /root/SecurityOnion ]; then + is_analyst_iso=true + fi fi +if [[ "$setup_type" == 'iso' ]]; then + if [[ $is_centos ]]; then + is_iso=true + else + echo "Only use 'so-setup iso' for an ISO install on CentOS. Please run 'so-setup network' instead." + exit 1 + fi +fi + +# Check to see if this is an analyst install. If it is let's run things differently + +if [[ $is_analyst ]]; then + + # Make sure it's CentOS + if [[ ! $is_centos ]]; then + echo "Analyst Workstation is only supported on CentOS 7" + exit 1 + fi + + if ! whiptail_analyst_install; then + if [[ $is_analyst_iso ]]; then + if whiptail_analyst_nongrid_iso; then + # Remove setup from auto launching + parse_install_username + sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1 + echo "Enabling graphical interface and setting it to load at boot" + systemctl set-default graphical.target + startx + exit 0 + else + # Abort! + exit 0 + fi + else + if whiptail_analyst_nongrid_network; then + echo "" + echo "" + echo "Kicking off the automated setup of the analyst workstation. This can take a while depending on your network connection." + echo "" + echo "" + analyst_salt_local + else + # Abort! + exit 0 + fi + fi + fi + + # If you got this far then you want to join the grid + is_minion=true + +fi + + + if ! [ -f $install_opt_file ] && [ -d /root/manager_setup/securityonion ] && [[ $(pwd) != /root/manager_setup/securityonion/setup ]]; then exec bash /root/manager_setup/securityonion/setup/so-setup "${original_args[@]}" fi @@ -105,7 +166,6 @@ catch() { whiptail_setup_failed exit 1 } - automated=no progress() { local msg=${1:-'Please wait while installing...'} @@ -117,8 +177,6 @@ progress() { fi } -detect_os - if [[ -f automation/$automation && $(basename $automation) == $automation ]]; then echo "Preselecting variable values based on automated setup: $automation" >> $setup_log 2>&1 source automation/$automation @@ -151,11 +209,11 @@ if [[ -f automation/$automation && $(basename $automation) == $automation ]]; th fi case "$setup_type" in - iso | network) # Accepted values + iso | network | analyst) # Accepted values echo "Beginning Security Onion $setup_type install" >> $setup_log 2>&1 ;; *) - echo "Invalid install type, must be 'iso' or 'network'" | tee -a $setup_log + echo "Invalid install type, must be 'iso', 'network' or 'analyst'." | tee -a $setup_log exit 1 ;; esac @@ -197,6 +255,37 @@ if ! [[ -f $install_opt_file ]]; then echo "User cancelled setup." | tee -a "$setup_log" whiptail_cancel fi + if [[ $is_analyst ]]; then + collect_hostname + if [[ $is_analyst_iso ]]; then + # Prompt Network Setup + whiptail_management_nic + whiptail_dhcp_or_static + + if [ "$address_type" != 'DHCP' ]; then + collect_int_ip_mask + collect_gateway + collect_dns + collect_dns_domain + fi + + fi + if [[ ! $is_analyst_iso ]]; then + # This should be a network install + whiptail_network_notice + whiptail_dhcp_warn + whiptail_management_nic + fi + whiptail_network_init_notice + network_init + printf '%s\n' \ + "MNIC=$MNIC" \ + "HOSTNAME=$HOSTNAME" > "$net_init_file" + set_main_ip + compare_main_nic_ip + + fi + if [[ $setup_type == 'iso' ]] && [ "$automated" == no ]; then whiptail_first_menu_iso if [[ $option == "CONFIGURENETWORK" ]]; then @@ -207,14 +296,16 @@ if ! [[ -f $install_opt_file ]]; then printf '%s\n' \ "MNIC=$MNIC" \ "HOSTNAME=$HOSTNAME" > "$net_init_file" - set_main_ip >> $setup_log 2>&1 + set_main_ip compare_main_nic_ip whiptail_net_setup_complete else true fi fi - whiptail_install_type + if [[ ! $is_analyst ]]; then + whiptail_install_type + fi else source $install_opt_file fi @@ -263,18 +354,15 @@ elif [ "$install_type" = 'RECEIVER' ]; then is_minion=true is_receiver=true elif [ "$install_type" = 'ANALYST' ]; then - cd .. || exit 255 - exec bash so-analyst-install + if [ "$setup_type" != 'analyst' ]; then + exec bash so-setup analyst + fi fi if [[ $is_manager || $is_import ]]; then check_elastic_license fi -if [[ $is_idh ]]; then - collect_idh_services -fi - if ! [[ -f $install_opt_file ]]; then if [[ $is_manager && $is_sensor ]]; then check_requirements "standalone" @@ -284,7 +372,7 @@ if ! [[ -f $install_opt_file ]]; then check_requirements "dist" "idh" elif [[ $is_sensor && ! $is_eval ]]; then check_requirements "dist" "sensor" - elif [[ $is_distmanager || $is_minion ]] && [[ ! $is_import ]]; then + elif [[ $is_distmanager || $is_minion ]] && [[ ! ( $is_import || $is_analyst ) ]]; then check_requirements "dist" elif [[ $is_import ]]; then check_requirements "import" @@ -309,26 +397,28 @@ if ! [[ -f $install_opt_file ]]; then network_init fi - set_main_ip >> $setup_log 2>&1 + set_main_ip compare_main_nic_ip if [[ $is_minion ]]; then collect_mngr_hostname add_mngr_ip_to_hosts - fi - - if [[ $is_minion ]]; then whiptail_ssh_key_copy_notice copy_ssh_key >> $setup_log 2>&1 fi + if [[ $is_idh ]]; then + collect_idh_services + collect_idh_preferences + fi + # Check if this is an airgap install if [[ ( $is_manager || $is_import) && $is_iso ]]; then whiptail_airgap if [[ "$INTERWEBS" == 'AIRGAP' ]]; then is_airgap=true fi - elif [[ $is_minion && $is_iso ]]; then + elif [[ $is_minion && ( $is_iso || $is_analyst ) ]]; then $sshcmd -i /root/.ssh/so.key soremote@"$MSRV" [[ -f /etc/yum.repos.d/airgap_repo.repo ]] >> $setup_log 2>&1 airgap_check=$? [[ $airgap_check == 0 ]] && is_airgap=true >> $setup_log 2>&1 @@ -394,7 +484,12 @@ detect_cloud short_name=$(echo "$HOSTNAME" | awk -F. '{print $1}') -MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]') +if [[ $is_analyst ]]; then + MINION_ID=$(echo "${short_name}_workstation" | tr '[:upper:]' '[:lower:]') +fi +if [[ ! $is_analyst ]]; then + MINION_ID=$(echo "${short_name}_${install_type}" | tr '[:upper:]' '[:lower:]') +fi export MINION_ID echo "MINION_ID = $MINION_ID" >> $setup_log 2>&1 @@ -440,7 +535,6 @@ if [[ $is_import ]]; then GRAFANA=${GRAFANA:-0} OSQUERY=${OSQUERY:-0} WAZUH=${WAZUH:-0} - THEHIVE=${THEHIVE:-0} PLAYBOOK=${PLAYBOOK:-0} fi @@ -558,7 +652,7 @@ if [[ $is_sensor && ! $is_eval ]]; then fi fi -[[ $is_iso ]] && collect_ntp_servers +[[ ( $is_iso || $is_analyst ) ]] && collect_ntp_servers if [[ ($is_node || $is_receiver) && ! $is_eval ]]; then whiptail_node_advanced @@ -616,7 +710,9 @@ echo "1" > /root/accept_changes [[ ${#ntp_servers[@]} -gt 0 ]] && configure_ntp >> $setup_log 2>&1 - reserve_ports + if [[ ! $is_analyst ]]; then + reserve_ports + fi set_path @@ -646,8 +742,12 @@ echo "1" > /root/accept_changes if [[ $is_manager && ! $is_eval ]]; then add_soremote_user_manager >> $setup_log 2>&1 fi - - host_pillar >> $setup_log 2>&1 + if [[ ! $is_analyst ]]; then + host_pillar >> $setup_log 2>&1 + fi + if [[ $is_analyst ]]; then + analyst_workstation_pillar + fi ntp_pillar >> $setup_log 2>&1 @@ -670,12 +770,12 @@ echo "1" > /root/accept_changes # Import the gpg keys gpg_rpm_import >> $setup_log 2>&1 info "Disabling fastestmirror" - [[ $OS == 'centos' ]] && disable_fastestmirror + [[ $is_centos ]] && disable_fastestmirror if [[ ! $is_airgap ]]; then - securityonion_repo >> $setup_log 2>&1 - update_packages >> $setup_log 2>&1 + securityonion_repo >> $setup_log 2>&1 + update_packages >> $setup_log 2>&1 else - airgap_repo >> $setup_log 2>&1 + airgap_repo >> $setup_log 2>&1 fi if [[ $is_sensor || $is_helix || $is_import ]]; then @@ -694,17 +794,22 @@ echo "1" > /root/accept_changes set_progress_str 5 'Installing Salt and dependencies' saltify 2>> $setup_log + + if [[ ! $is_analyst ]]; then + set_progress_str 6 'Installing Docker and dependencies' + docker_install >> $setup_log 2>&1 + fi - set_progress_str 6 'Installing Docker and dependencies' - docker_install >> $setup_log 2>&1 - set_progress_str 7 'Generating patch pillar' patch_pillar >> $setup_log 2>&1 set_progress_str 8 'Initializing Salt minion' configure_minion "$minion_type" >> $setup_log 2>&1 - check_sos_appliance >> $setup_log 2>&1 + if [[ ! $is_analyst ]]; then + check_sos_appliance >> $setup_log 2>&1 + fi + update_sudoers_for_testing >> $setup_log 2>&1 if [[ $is_manager || $is_helix || $is_import ]]; then @@ -782,8 +887,10 @@ echo "1" > /root/accept_changes generate_ca >> $setup_log 2>&1 fi - set_progress_str 24 'Generating SSL' - generate_ssl >> $setup_log 2>&1 + if [[ ! $is_analyst ]]; then + set_progress_str 24 'Generating SSL' + generate_ssl >> $setup_log 2>&1 + fi if [[ $is_manager || $is_helix || $is_import ]]; then set_progress_str 25 'Configuring firewall' @@ -810,18 +917,22 @@ echo "1" > /root/accept_changes echo "Finished so-elastic-auth..." >> $setup_log 2>&1 fi - set_progress_str 61 "$(print_salt_state_apply 'firewall')" - salt-call state.apply -l info firewall >> $setup_log 2>&1 + if [[ ! $is_analyst ]]; then + set_progress_str 61 "$(print_salt_state_apply 'firewall')" + salt-call state.apply -l info firewall >> $setup_log 2>&1 + fi - if [ $OS = 'centos' ]; then + if [[ $is_centos ]]; then set_progress_str 61 'Installing Yum utilities' salt-call state.apply -l info yum.packages >> $setup_log 2>&1 fi - set_progress_str 62 "$(print_salt_state_apply 'common')" - salt-call state.apply -l info common >> $setup_log 2>&1 + if [[ ! $is_analyst ]]; then + set_progress_str 62 "$(print_salt_state_apply 'common')" + salt-call state.apply -l info common >> $setup_log 2>&1 + fi - if [[ ! $is_helix && ! $is_receiver && ! $is_idh ]]; then + if [[ ! $is_helix && ! $is_receiver && ! $is_idh && ! $is_analyst ]]; then set_progress_str 62 "$(print_salt_state_apply 'nginx')" salt-call state.apply -l info nginx >> $setup_log 2>&1 fi @@ -919,6 +1030,8 @@ echo "1" > /root/accept_changes set_progress_str 77 "$(print_salt_state_apply 'fleet.event_update-custom-hostname')" pillar_override="{\"global\":{\"fleet_custom_hostname\": \"$FLEETCUSTOMHOSTNAME\"}}" salt-call state.apply -l info fleet.event_update-custom-hostname pillar="$pillar_override" >> $setup_log 2>&1 + rm -f /etc/pki/managerssl.crt + salt-call state.apply -l info ssl >> $setup_log 2>&1 fi set_progress_str 78 "$(print_salt_state_apply 'so-fleet-setup')" @@ -937,11 +1050,6 @@ echo "1" > /root/accept_changes salt-call state.apply -l info wazuh >> $setup_log 2>&1 fi - if [[ "$THEHIVE" = 1 ]]; then - set_progress_str 80 "$(print_salt_state_apply 'thehive')" - salt-call state.apply -l info thehive >> $setup_log 2>&1 - fi - if [[ "$STRELKA" = 1 ]]; then if [[ $is_sensor ]]; then set_progress_str 81 "$(print_salt_state_apply 'strelka')" @@ -967,15 +1075,22 @@ echo "1" > /root/accept_changes salt-call state.apply -l info filebeat >> $setup_log 2>&1 fi - set_progress_str 85 'Applying finishing touches' - filter_unused_nics >> $setup_log 2>&1 - network_setup >> $setup_log 2>&1 - so-ssh-harden >> $setup_log 2>&1 - + if [[ ! $is_analyst ]]; then + set_progress_str 85 'Applying finishing touches' + filter_unused_nics >> $setup_log 2>&1 + network_setup >> $setup_log 2>&1 + so-ssh-harden >> $setup_log 2>&1 + fi + if [[ $is_manager || $is_import ]]; then set_progress_str 87 'Adding user to SOC' add_web_user >> $setup_log 2>&1 fi + + if [[ $is_analyst ]]; then + # Remove access to the manager from the analyst workstation + rm -rf /root/.ssh/so.key* + fi set_progress_str 90 'Enabling checkin at boot' checkin_at_boot >> $setup_log 2>&1 @@ -1018,20 +1133,15 @@ else generate_repo_tarball >> "$setup_log" 2>&1 fi - if [[ $THEHIVE == 1 ]]; then - set_progress_str 99 'Waiting for TheHive to start up' - check_hive_init >> $setup_log 2>&1 - fi - if [[ -n $LEARN_LOGSCAN_ENABLE ]]; then set_progress_str 99 'Enabling logscan' so-learn enable logscan --apply >> $setup_log 2>&1 fi if [[ -n $ENDGAMEHOST ]]; then - set_progress_str 99 'Configuring firewall for Endgame SMP' - so-firewall --apply includehost endgame $ENDGAMEHOST >> $setup_log 2>&1 - fi + set_progress_str 99 'Configuring firewall for Endgame SMP' + so-firewall --apply includehost endgame $ENDGAMEHOST >> $setup_log 2>&1 + fi } | whiptail_gauge_post_setup "Running post-installation steps..." diff --git a/setup/so-whiptail b/setup/so-whiptail index c39ec4ff7..2c60b7e3e 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -33,6 +33,58 @@ whiptail_airgap() { INTERWEBS=$(echo "${INTERWEBS^^}" | tr -d ' ') } +whiptail_analyst_install() { + + [ -n "$TESTING" ] && return + + read -r -d '' message <<- EOM + + Welcome to the Security Onion Analyst Workstation install! + + Would you like to join this workstation to an existing grid? + + EOM + whiptail --title "$whiptail_title" \ + --yesno "$message" 11 75 --defaultno + +} + +whiptail_analyst_nongrid_iso() { + + [ -n "$TESTING" ] && return + + read -r -d '' message <<- EOM + + You have selected this Analyst workstation to be independent. + + Would you still like to have the graphical interface loaded at boot? + + NOTE: Selecting no will exit without making changes. + + EOM + whiptail --title "$whiptail_title" \ + --yesno "$message" 11 75 --defaultno + +} + +whiptail_analyst_nongrid_network() { + + [ -n "$TESTING" ] && return + + read -r -d '' message <<- EOM + + You have selected this Analyst workstation to be independent. + + Would you still like to install and load the graphical interface? + + NOTE: Selecting no will exit without making changes. + + EOM + whiptail --title "$whiptail_title" \ + --yesno "$message" 11 75 --defaultno + +} + whiptail_avoid_default_hostname() { [ -n "$TESTING" ] && return @@ -434,9 +486,9 @@ whiptail_end_settings() { if [[ $is_idh ]]; then __append_end_msg "IDH Services Enabled:" - for service in ${idh_services[@]}; do + for service in ${idh_services[@]}; do __append_end_msg "- $service" - done + done fi @@ -578,14 +630,16 @@ whiptail_end_settings() { __append_end_msg " Logstash Batch Size: $LSPIPELINEBATCH" __append_end_msg " Logstash Input Threads: $LSINPUTTHREADS" else - if [[ ! $is_receiver ]]; then - __append_end_msg "Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE" - __append_end_msg "Elasticsearch Storage Space: ${log_size_limit}GB" + if [[ ! $is_analyst ]]; then + if [[ ! $is_receiver ]]; then + __append_end_msg "Elasticsearch Heap Size: $NODE_ES_HEAP_SIZE" + __append_end_msg "Elasticsearch Storage Space: ${log_size_limit}GB" + fi + __append_end_msg "Logstash Heap Size: $NODE_LS_HEAP_SIZE" + __append_end_msg "Logstash Worker Count: $LSPIPELINEWORKERS" + __append_end_msg "Logstash Batch Size: $LSPIPELINEBATCH" + __append_end_msg "Logstash Input Threads: $LSINPUTTHREADS" fi - __append_end_msg "Logstash Heap Size: $NODE_LS_HEAP_SIZE" - __append_end_msg "Logstash Worker Count: $LSPIPELINEWORKERS" - __append_end_msg "Logstash Batch Size: $LSPIPELINEBATCH" - __append_end_msg "Logstash Input Threads: $LSINPUTTHREADS" fi @@ -725,6 +779,17 @@ whiptail_homenet_sensor() { export HNSENSOR } + whiptail_idh_preferences() { + + [ -n "$TESTING" ] && return + + idh_preferences=$(whiptail --title "$whiptail_title" --radiolist \ + "\nBy default, the IDH services selected in the previous screen will be bound to all interfaces and IP addresses on this system.\n\nIf you would like to prevent IDH services from being published on this system's management IP, you can select the option below." 20 75 5 \ + "$MAINIP" "Disable IDH services on this management IP " OFF 3>&1 1>&2 2>&3 ) + + local exitstatus=$? + whiptail_check_exitstatus $exitstatus +} whiptail_idh_services() { @@ -867,23 +932,13 @@ whiptail_install_type_other() { [ -n "$TESTING" ] && return - # so-analyst-install will only work with a working network connection - # so only show it on network installs for now - if [[ $setup_type == 'network' ]]; then - install_type=$(whiptail --title "$whiptail_title" --radiolist \ - "Choose distributed node type:" 9 65 2 \ - "ANALYST" "Quit setup and run so-analyst-install " ON \ - "HELIXSENSOR" "Create a Helix sensor " OFF \ - 3>&1 1>&2 2>&3 + install_type=$(whiptail --title "$whiptail_title" --radiolist \ + "Choose node type:" 9 65 2 \ + "ANALYST" "Setup will run 'so-setup analyst' " ON \ + "HELIXSENSOR" "Create a Helix sensor " OFF \ + 3>&1 1>&2 2>&3 ) - else - install_type=$(whiptail --title "$whiptail_title" --radiolist \ - "Choose distributed node type:" 8 65 1 \ - "HELIXSENSOR" "Create a Helix sensor " ON \ - 3>&1 1>&2 2>&3 - ) - fi - + local exitstatus=$? whiptail_check_exitstatus $exitstatus @@ -1473,6 +1528,14 @@ whiptail_oinkcode() { #TODO: helper function to display error message or exit if batch mode # exit_if_batch <"Error string"> +whiptail_error_message() { + + local error_message=$1 # message to be displayed + + whiptail --title "$whiptail_title" --msgbox "$error_message" 10 75 + +} + whiptail_passwords_dont_match() { whiptail --title "$whiptail_title" --msgbox "Passwords don't match. Please re-enter." 8 75 @@ -1545,40 +1608,37 @@ whiptail_patch_schedule_select_hours() { [ -n "$TESTING" ] && return - # Select the hours to patch - PATCHSCHEDULEHOURS=$(whiptail --title "$whiptail_title" --checklist \ - "At which time, UTC, do you want to apply OS patches on the selected days?" 22 75 13 \ - 00:00 "" OFF \ - 01:00 "" OFF \ - 02:00 "" ON \ - 03:00 "" OFF \ - 04:00 "" OFF \ - 05:00 "" OFF \ - 06:00 "" OFF \ - 07:00 "" OFF \ - 08:00 "" OFF \ - 09:00 "" OFF \ - 10:00 "" OFF \ - 11:00 "" OFF \ - 12:00 "" OFF \ - 13:00 "" OFF \ - 14:00 "" OFF \ - 15:00 "" OFF \ - 16:00 "" OFF \ - 17:00 "" OFF \ - 18:00 "" OFF \ - 19:00 "" OFF \ - 20:00 "" OFF \ - 21:00 "" OFF \ - 22:00 "" OFF \ - 23:00 "" OFF 3>&1 1>&2 2>&3) - - local exitstatus=$? - whiptail_check_exitstatus $exitstatus - - PATCHSCHEDULEHOURS=$(echo "$PATCHSCHEDULEHOURS" | tr -d '"') - - IFS=' ' read -ra PATCHSCHEDULEHOURS <<< "$PATCHSCHEDULEHOURS" + # Select the hours to patch + PATCHSCHEDULEHOURS=$(whiptail --title "$whiptail_title" --checklist \ + "At which time, UTC, do you want to apply OS patches on the selected days?" 22 75 13 \ + 00:00 "" OFF \ + 01:00 "" OFF \ + 02:00 "" ON \ + 03:00 "" OFF \ + 04:00 "" OFF \ + 05:00 "" OFF \ + 06:00 "" OFF \ + 07:00 "" OFF \ + 08:00 "" OFF \ + 09:00 "" OFF \ + 10:00 "" OFF \ + 11:00 "" OFF \ + 12:00 "" OFF \ + 13:00 "" OFF \ + 14:00 "" OFF \ + 15:00 "" OFF \ + 16:00 "" OFF \ + 17:00 "" OFF \ + 18:00 "" OFF \ + 19:00 "" OFF \ + 20:00 "" OFF \ + 21:00 "" OFF \ + 22:00 "" OFF \ + 23:00 "" OFF 3>&1 1>&2 2>&3) + local exitstatus=$? + whiptail_check_exitstatus $exitstatus + PATCHSCHEDULEHOURS=$(echo "$PATCHSCHEDULEHOURS" | tr -d '"') + IFS=' ' read -ra PATCHSCHEDULEHOURS <<< "$PATCHSCHEDULEHOURS" } @@ -1923,10 +1983,10 @@ whiptail_suricata_pins() { done if [[ $is_node && $is_sensor && ! $is_eval ]]; then - local PROCS=$(expr $lb_procs / 2) - if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi + local PROCS=$(expr $lb_procs / 2) + if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi else - local PROCS=$lb_procs + local PROCS=$lb_procs fi SURIPINS=$(whiptail --noitem --title "$whiptail_title" --checklist "Please select $PROCS cores to pin Suricata to:" 20 75 12 "${filtered_core_str[@]}" 3>&1 1>&2 2>&3 ) @@ -1978,7 +2038,7 @@ whiptail_you_sure() { read -r -d '' you_sure_text <<- EOM Welcome to Security Onion Setup! - You can use Setup for lots of different use cases from a small standalone installation to a large distributed deployment for your enterprise. Don't forget to review the documentation at: + You can use Setup for several different use cases, from a small standalone installation to a large distributed deployment for your enterprise. Don't forget to review the documentation at: https://docs.securityonion.net Setup uses keyboard navigation and you can use arrow keys to move around. Certain screens may provide a list and ask you to select one or more items from that list. You can use [SPACE] to select items and [ENTER] to proceed to the next screen. @@ -2006,10 +2066,10 @@ whiptail_zeek_pins() { done if [[ $is_smooshed ]]; then - local PROCS=$(expr $lb_procs / 2) - if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi + local PROCS=$(expr $lb_procs / 2) + if [ "$PROCS" -lt 1 ]; then PROCS=1; else PROCS=$PROCS; fi else - local PROCS=$lb_procs + local PROCS=$lb_procs fi ZEEKPINS=$(whiptail --noitem --title "$whiptail_title" --checklist "Please select $PROCS cores to pin Zeek to:" 20 75 12 "${cpu_core_list_whiptail[@]}" 3>&1 1>&2 2>&3 ) diff --git a/sigs/securityonion-2.3.120-20220425.iso.sig b/sigs/securityonion-2.3.120-20220425.iso.sig new file mode 100644 index 000000000..ba8743ad3 Binary files /dev/null and b/sigs/securityonion-2.3.120-20220425.iso.sig differ diff --git a/so-analyst-install b/so-analyst-install index 50417c23d..ac92afd77 100755 --- a/so-analyst-install +++ b/so-analyst-install @@ -15,6 +15,6 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see . -cd "$(dirname "$0")/salt/common/tools/sbin" || exit 255 +cd "$(dirname "$0")/setup" || exit 255 -./so-analyst-install +./so-setup analyst "$@"