From b129b4ceaaab3e93b0cef21379cd2bef597c67b1 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 14 Jul 2023 17:03:20 -0400 Subject: [PATCH 001/417] prepare for alt login --- salt/kratos/enabled.sls | 3 +-- salt/nginx/etc/nginx.conf | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/salt/kratos/enabled.sls b/salt/kratos/enabled.sls index 52d53a4db..31097ccf4 100644 --- a/salt/kratos/enabled.sls +++ b/salt/kratos/enabled.sls @@ -21,8 +21,7 @@ so-kratos: - sobridge: - ipv4_address: {{ DOCKER.containers['so-kratos'].ip }} - binds: - - /opt/so/conf/kratos/schema.json:/kratos-conf/schema.json:ro - - /opt/so/conf/kratos/kratos.yaml:/kratos-conf/kratos.yaml:ro + - /opt/so/conf/kratos/:/kratos-conf:ro - /opt/so/log/kratos/:/kratos-log:rw - /nsm/kratos/db:/kratos-data:rw {% if DOCKER.containers['so-kratos'].custom_bind_mounts %} diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index 52e3d6d3d..925583ff3 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -146,7 +146,7 @@ http { proxy_set_header X-Forwarded-Proto $scheme; } - location ~ ^/auth/.*?(login) { + location ~ ^/auth/.*?(login|oidc/callback/) { rewrite /auth/(.*) /$1 break; limit_req zone=auth_throttle burst={{ NGINXMERGED.config.throttle_login_burst }} nodelay; limit_req_status 429; From b24afac0f4776e72a23320da05c7013508d48795 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 18 Jul 2023 10:48:42 -0400 Subject: [PATCH 002/417] upgrade registry version --- salt/registry/enabled.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/registry/enabled.sls b/salt/registry/enabled.sls index 4d9867676..9ea3ad1df 100644 --- a/salt/registry/enabled.sls +++ b/salt/registry/enabled.sls @@ -14,7 +14,7 @@ include: # Install the registry container so-dockerregistry: docker_container.running: - - image: ghcr.io/security-onion-solutions/registry:latest + - image: ghcr.io/security-onion-solutions/registry:2.8.2 - hostname: so-registry - networks: - sobridge: From 101e2e8ba19cdfd875419bed0661584758a40ae9 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 24 Jul 2023 17:05:52 -0400 Subject: [PATCH 003/417] do not redirect to API URLs when not logged in --- salt/nginx/etc/nginx.conf | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index 925583ff3..bdcbdeacc 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -296,7 +296,9 @@ http { error_page 429 = @error429; location @error401 { - add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400"; + if ($request_uri ~* ^/(?!(^/api/.*))) { + add_header Set-Cookie "AUTH_REDIRECT=$request_uri;Path=/;Max-Age=14400"; + } return 302 /auth/self-service/login/browser; } From 6d56deb2e46730df2fdeda35af096c6548cf1036 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 25 Jul 2023 08:12:45 -0400 Subject: [PATCH 004/417] oidc 1 --- salt/kratos/config.sls | 8 +++ salt/kratos/defaults.yaml | 13 +++++ salt/kratos/files/oidc.jsonnet | 8 +++ salt/kratos/map.jinja | 4 ++ salt/kratos/soc_kratos.yaml | 86 +++++++++++++++++++++++++++++++++ salt/manager/tools/sbin/so-user | 13 +++-- 6 files changed, 128 insertions(+), 4 deletions(-) create mode 100644 salt/kratos/files/oidc.jsonnet diff --git a/salt/kratos/config.sls b/salt/kratos/config.sls index 55949ea3c..0be43b460 100644 --- a/salt/kratos/config.sls +++ b/salt/kratos/config.sls @@ -51,6 +51,14 @@ kratosschema: - group: 928 - mode: 600 +kratosoidc: + file.managed: + - name: /opt/so/conf/kratos/oidc.jsonnet + - source: salt://kratos/files/oidc.jsonnet + - user: 928 + - group: 928 + - mode: 600 + kratosconfig: file.managed: - name: /opt/so/conf/kratos/kratos.yaml diff --git a/salt/kratos/defaults.yaml b/salt/kratos/defaults.yaml index 3f5370dde..202670e3d 100644 --- a/salt/kratos/defaults.yaml +++ b/salt/kratos/defaults.yaml @@ -1,5 +1,18 @@ kratos: enabled: False + oidc: + enabled: false + config: + id: SSO + mapper_url: file:///kratos-conf/oidc.jsonnet + subject_source: userinfo + scopes: + - email + - profile + requested_claims: + id_token: + email: + essential: true config: session: lifespan: 24h diff --git a/salt/kratos/files/oidc.jsonnet b/salt/kratos/files/oidc.jsonnet new file mode 100644 index 000000000..c155b275d --- /dev/null +++ b/salt/kratos/files/oidc.jsonnet @@ -0,0 +1,8 @@ +local claims = std.extVar('claims'); +{ + identity: { + traits: { + email: if 'email' in claims then claims.email else claims.preferred_username + }, + }, +} \ No newline at end of file diff --git a/salt/kratos/map.jinja b/salt/kratos/map.jinja index 6a2b1e0c9..6d1e2917c 100644 --- a/salt/kratos/map.jinja +++ b/salt/kratos/map.jinja @@ -20,3 +20,7 @@ {% do KRATOSDEFAULTS.kratos.config.courier.smtp.update({'connection_uri': KRATOSDEFAULTS.kratos.config.courier.smtp.connection_uri | replace("URL_BASE", GLOBALS.url_base)}) %} {% set KRATOSMERGED = salt['pillar.get']('kratos', default=KRATOSDEFAULTS.kratos, merge=true) %} + +{% if KRATOSMERGED.oidc.enabled and 'oidc' in salt['pillar.get']('licensed_features') %} +{% do KRATOSMERGED.config.selfservice.methods.update({'oidc': {'enabled': true, 'config': {'providers': [KRATOSMERGED.oidc.config]}}}) %} +{% endif %} \ No newline at end of file diff --git a/salt/kratos/soc_kratos.yaml b/salt/kratos/soc_kratos.yaml index b580e9611..3d63f825e 100644 --- a/salt/kratos/soc_kratos.yaml +++ b/salt/kratos/soc_kratos.yaml @@ -3,6 +3,91 @@ kratos: description: You can enable or disable Kratos. advanced: True helpLink: kratos.html + + oidc: + enabled: + description: Set to True to enable OIDC / Single Sign-On (SSO) into SOC. Requires a valid Security Onion license key. + global: True + helpLink: oidc.html + config: + id: + description: Customize the OIDC provider name. This name appears on the login page. Required. + global: True + forcedType: string + helpLink: oidc.html + provider: + description: "Specify the provider type. Required. Valid values are: auth0, generic, github, google, microsoft" + global: True + forcedType: string + regex: "auth0|generic|github|google|microsoft" + regexFailureMessage: "Valid values are: auth0, generic, github, google, microsoft" + helpLink: oidc.html + client_id: + description: Specify the client ID, also referenced as the application ID. Required. + global: True + forcedType: string + helpLink: oidc.html + client_secret: + description: Specify the client secret. Required. + global: True + forcedType: string + helpLink: oidc.html + microsoft_tenant: + description: Specify the Microsoft Active Directory Tenant ID. Required when provider is 'microsoft'. + global: True + forcedType: string + helpLink: oidc.html + subject_source: + description: The source of the subject identifier. Typically 'userinfo'. Only used when provider is 'microsoft'. + global: True + forcedType: string + regex: me|userinfo + regexFailureMessage: "Valid values are: me, userinfo" + helpLink: oidc.html + auth_url: + description: Provider's auth URL. Required when provider is 'generic'. + global: True + forcedType: string + helpLink: oidc.html + issuer_url: + description: Provider's issuer URL. Required when provider is 'generic'. + global: True + forcedType: string + helpLink: oidc.html + mapper_url: + description: A file path or URL in Jsonnet format, used to map OIDC claims to the Kratos schema. Defaults to an included file that maps the email claim. Note that the contents of the included file can be customized via the "OIDC Claims Mapping" setting. + advanced: True + global: True + forcedType: string + helpLink: oidc.html + token_url: + description: Provider's token URL. Required when provider is 'generic'. + global: True + forcedType: string + helpLink: oidc.html + scope: + description: List of scoped data categories to request in the authentication response. Typically 'email' and 'profile' are the minimum required scopes. Some providers use an alternate scope name, such as 'user:email'. + advanced: True + global: True + forcedType: "[]string" + helpLink: oidc.html + requested_claims: + id_token: + email: + essential: + description: Specifies whether the email claim is necessary. Typically leave this value set to true. + advanced: True + global: True + helpLink: oidc.html + files: + oidc__jsonnet: + title: OIDC Claims Mapping + description: Customize the OIDC claim mappings to the Kratos schema. The default mappings include the minimum required for login functionality, so this typically does not need to be customized. Visit https://jsonnet.org for more information about this file format. + advanced: True + file: True + global: True + helpLink: oidc.html + config: session: lifespan: @@ -65,6 +150,7 @@ kratos: global: True advanced: True helpLink: kratos.html + flows: settings: privileged_session_max_age: diff --git a/salt/manager/tools/sbin/so-user b/salt/manager/tools/sbin/so-user index 50836e94c..98850143e 100755 --- a/salt/manager/tools/sbin/so-user +++ b/salt/manager/tools/sbin/so-user @@ -341,14 +341,19 @@ function syncElastic() { " and ic.identity_id=i.id " \ " and ict.id=ic.identity_credential_type_id " \ " and ict.name='password' " \ - " and instr(ic.config, 'hashed_password') " \ " and i.state == 'active' " \ "order by ici.identifier;" | \ sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath") [[ $? != 0 ]] && fail "Unable to read credential hashes from database" - echo "${userData}" | \ - jq -r '.user + ":" + .data.hashed_password' \ - >> "$usersTmpFile" + + user_data_formatted=$(echo "${userData}" | jq -r '.user + ":" + .data.hashed_password') + if lookup_salt_value "licensed_features" "" "pillar" | grep -x oidc; then + # generate random placeholder salt/hash for users without passwords + random_crypt=$(get_random_value 53) + user_data_formatted=$(echo "${user_data_formatted}" | sed -r "s/^(.+:)\$/\\1\$2a\$12${random_crypt}/") + fi + + echo "${user_data_formatted}" >> "$usersTmpFile" # Append the user roles while IFS="" read -r rolePair || [ -n "$rolePair" ]; do From b712d505f2fc63278376bf212fc674c38c6613c1 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 26 Jul 2023 09:21:23 -0400 Subject: [PATCH 005/417] update version to use kilo images --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 79a614418..7f2e97617 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.4 +2.4.0-kilo From aa36e9a785b01c67052c7008775235f94a4fad41 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 27 Jul 2023 08:40:27 -0400 Subject: [PATCH 006/417] oidc --- salt/kratos/defaults.yaml | 2 +- salt/kratos/soc_kratos.yaml | 8 ++++---- salt/manager/tools/sbin/so-user | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/salt/kratos/defaults.yaml b/salt/kratos/defaults.yaml index 202670e3d..1e2eef5ed 100644 --- a/salt/kratos/defaults.yaml +++ b/salt/kratos/defaults.yaml @@ -6,7 +6,7 @@ kratos: id: SSO mapper_url: file:///kratos-conf/oidc.jsonnet subject_source: userinfo - scopes: + scope: - email - profile requested_claims: diff --git a/salt/kratos/soc_kratos.yaml b/salt/kratos/soc_kratos.yaml index 3d63f825e..6269fda60 100644 --- a/salt/kratos/soc_kratos.yaml +++ b/salt/kratos/soc_kratos.yaml @@ -6,7 +6,7 @@ kratos: oidc: enabled: - description: Set to True to enable OIDC / Single Sign-On (SSO) into SOC. Requires a valid Security Onion license key. + description: Set to True to enable OIDC / Single Sign-On (SSO) to SOC. Requires a valid Security Onion license key. global: True helpLink: oidc.html config: @@ -104,7 +104,7 @@ kratos: methods: password: enabled: - description: Set to True to enable traditional password authentication. Leave as default to ensure proper security protections remain in place. + description: Set to True to enable traditional password authentication to SOC. Typically set to true, except when exclusively using OIDC authentication. global: True advanced: True helpLink: kratos.html @@ -115,7 +115,7 @@ kratos: helpLink: kratos.html totp: enabled: - description: Set to True to enable Time-based One-Time Password (TOTP) multi-factor authentication (MFA). Enable to ensure proper security protections remain in place. Be aware that disabling this setting, after users have already setup TOTP, may prevent users from logging in. + description: Set to True to enable Time-based One-Time Password (TOTP) multi-factor authentication (MFA) to SOC. Enable to ensure proper security protections remain in place. Be aware that disabling this setting, after users have already setup TOTP, may prevent users from logging in. global: True helpLink: kratos.html config: @@ -126,7 +126,7 @@ kratos: helpLink: kratos.html webauthn: enabled: - description: Set to True to enable Security Keys (WebAuthn / PassKeys) for passwordless or multi-factor authentication (MFA) logins. Security Keys are a Public-Key Infrastructure (PKI) based authentication method, typically involving biometric hardware devices, such as laptop fingerprint scanners and USB hardware keys. Be aware that disabling this setting, after users have already setup their accounts with Security Keys, may prevent users from logging in. + description: Set to True to enable Security Keys (WebAuthn / PassKeys) for passwordless or multi-factor authentication (MFA) SOC logins. Security Keys are a Public-Key Infrastructure (PKI) based authentication method, typically involving biometric hardware devices, such as laptop fingerprint scanners and USB hardware keys. Be aware that disabling this setting, after users have already setup their accounts with Security Keys, may prevent users from logging in. global: True helpLink: kratos.html config: diff --git a/salt/manager/tools/sbin/so-user b/salt/manager/tools/sbin/so-user index 98850143e..d597cdacb 100755 --- a/salt/manager/tools/sbin/so-user +++ b/salt/manager/tools/sbin/so-user @@ -235,8 +235,8 @@ function updatePassword() { # Update DB with new hash echo "update identity_credentials set config=CAST('{\"hashed_password\":\"$passwordHash\"}' as BLOB), created_at=datetime('now'), updated_at=datetime('now') where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name='password');" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath" # Deactivate MFA - echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath" - echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath" + echo "delete from identity_credential_identifiers where identity_credential_id=(select id from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc')));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath" + echo "delete from identity_credentials where identity_id='${identityId}' and identity_credential_type_id=(select id from identity_credential_types where name in ('totp', 'webauthn', 'oidc'));" | sqlite3 -cmd ".timeout ${databaseTimeout}" "$databasePath" [[ $? != 0 ]] && fail "Unable to update password" fi } From a5c47835641a728591704d1fc4c8cbe430bd957e Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 27 Jul 2023 18:36:50 -0400 Subject: [PATCH 007/417] oidc --- salt/kratos/soc_kratos.yaml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/salt/kratos/soc_kratos.yaml b/salt/kratos/soc_kratos.yaml index 6269fda60..0ac2fcd44 100644 --- a/salt/kratos/soc_kratos.yaml +++ b/salt/kratos/soc_kratos.yaml @@ -50,7 +50,7 @@ kratos: forcedType: string helpLink: oidc.html issuer_url: - description: Provider's issuer URL. Required when provider is 'generic'. + description: Provider's issuer URL. Required when provider is 'auth0' or 'generic'. global: True forcedType: string helpLink: oidc.html @@ -66,8 +66,7 @@ kratos: forcedType: string helpLink: oidc.html scope: - description: List of scoped data categories to request in the authentication response. Typically 'email' and 'profile' are the minimum required scopes. Some providers use an alternate scope name, such as 'user:email'. - advanced: True + description: List of scoped data categories to request in the authentication response. Typically 'email' and 'profile' are the minimum required scopes. However, GitHub requires `user:email', instead and Auth0 requires 'profile', 'email', and 'openid'. global: True forcedType: "[]string" helpLink: oidc.html From a885baf9603061784bb7641749e3c7039376cc9b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 23 Aug 2023 15:24:32 -0400 Subject: [PATCH 008/417] add desktop to grid --- salt/manager/tools/sbin/so-minion | 11 ++++------- setup/so-setup | 15 +++++++++++++-- 2 files changed, 17 insertions(+), 9 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index edc0b1404..de55c3a5b 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -187,15 +187,9 @@ function add_logstash_to_minion() { # Security Onion Desktop function add_desktop_to_minion() { printf '%s\n'\ - "host:"\ - " mainint: '$MNIC'"\ "desktop:"\ " gui:"\ - " enabled: true"\ - "sensoroni:"\ - " enabled: True"\ - " config:"\ - " node_description: '${NODE_DESCRIPTION//\'/''}'" >> $PILLARFILE + " enabled: true"\ >> $PILLARFILE } # Add basic host info to the minion file @@ -556,6 +550,9 @@ function createRECEIVER() { add_telegraf_to_minion } +function createDESKTOP() { + add_desktop_to_minion +} function testConnection() { retry 15 3 "salt '$MINION_ID' test.ping" True diff --git a/setup/so-setup b/setup/so-setup index c3172280f..8e8b7af43 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -333,7 +333,7 @@ if [[ $is_desktop ]]; then exit 1 fi -# if ! whiptail_desktop_install; then + if ! whiptail_desktop_install; then if [[ $is_desktop_iso ]]; then if whiptail_desktop_nongrid_iso; then # Remove setup from auto launching @@ -365,7 +365,7 @@ if [[ $is_desktop ]]; then exit 0 fi fi -# fi + fi # If you got this far then you want to join the grid is_minion=true @@ -574,6 +574,17 @@ if ! [[ -f $install_opt_file ]]; then check_manager_connection set_minion_info whiptail_end_settings + + elif [[ $is_desktop ]]; then + info "Setting up as node type desktop" + #check_requirements "desktop" + networking_needful + collect_mngr_hostname + add_mngr_ip_to_hosts + check_manager_connection + set_minion_info + whiptail_end_settings + fi if [[ $waitforstate ]]; then From 4a489afb893077be5575076359fe9a2be42b7df5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 25 Aug 2023 08:55:00 -0400 Subject: [PATCH 009/417] remove old and install new watchdog package --- salt/common/packages.sls | 2 -- salt/strelka/filestream/config.sls | 8 ++++++++ 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index 5f4a348e7..fe36a1fa1 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -21,7 +21,6 @@ commonpkgs: - python3-dateutil - python3-docker - python3-packaging - - python3-watchdog - python3-lxml - git - rsync @@ -78,7 +77,6 @@ commonpkgs: - python3-packaging - python3-pyyaml - python3-rich - - python3-watchdog - rsync - sqlite - tcpdump diff --git a/salt/strelka/filestream/config.sls b/salt/strelka/filestream/config.sls index 993a59650..a254e9253 100644 --- a/salt/strelka/filestream/config.sls +++ b/salt/strelka/filestream/config.sls @@ -47,6 +47,14 @@ filestream_config: FILESTREAMCONFIG: {{ STRELKAMERGED.filestream.config }} # Filecheck Section +remove_old_watchdog: + pkg.removed: + - name: python3-watchdog + +install_watchdog: + pkg.installed: + - name: securityonion-python39-watchdog + filecheck_logdir: file.directory: - name: /opt/so/log/strelka From ab1d97c985130bb3504ec3eee4ea330953cdb595 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 25 Aug 2023 09:39:16 -0400 Subject: [PATCH 010/417] restart filecheck if watchdog pkg changes --- salt/strelka/filestream/config.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/strelka/filestream/config.sls b/salt/strelka/filestream/config.sls index a254e9253..a84ab5ba1 100644 --- a/salt/strelka/filestream/config.sls +++ b/salt/strelka/filestream/config.sls @@ -135,6 +135,7 @@ filecheck_restart: - onchanges: - file: filecheck_script - file: filecheck_conf + - pkg: install_watchdog filcheck_history_clean: cron.present: From 0a88c812e867b51d19eb643d47dab1f9f7c24df3 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 25 Aug 2023 13:03:33 -0400 Subject: [PATCH 011/417] differnet watchdog package names for debian vs redhat fams --- salt/strelka/filestream/config.sls | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/strelka/filestream/config.sls b/salt/strelka/filestream/config.sls index a84ab5ba1..833a08505 100644 --- a/salt/strelka/filestream/config.sls +++ b/salt/strelka/filestream/config.sls @@ -47,6 +47,12 @@ filestream_config: FILESTREAMCONFIG: {{ STRELKAMERGED.filestream.config }} # Filecheck Section +{% if GLOBALS.os_family == 'Debian' %} +install_watchdog: + pkg.installed: + - name: python3-watchdog + +{% elif GLOBALS.os_family == 'RedHat' %} remove_old_watchdog: pkg.removed: - name: python3-watchdog @@ -54,6 +60,7 @@ remove_old_watchdog: install_watchdog: pkg.installed: - name: securityonion-python39-watchdog +{% endif %} filecheck_logdir: file.directory: From c22f9687fb1f23f5232c1a21e4dfa59555def7ec Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 25 Aug 2023 13:40:34 -0400 Subject: [PATCH 012/417] sync local repo in soup --- salt/manager/tools/sbin/soup | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 934cef2ee..21933c1a8 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -303,6 +303,7 @@ check_log_size_limit() { check_os_updates() { # Check to see if there are OS updates + echo "Checking for OS updates." NEEDUPDATES="We have detected missing operating system (OS) updates. Do you want to install these OS updates now? This could take a while depending on the size of your grid and how many packages are missing, but it is recommended to keep your system updated." OSUPDATES=$(dnf -q list updates | grep -v docker | grep -v containerd | grep -v salt | grep -v Available | wc -l) if [[ "$OSUPDATES" -gt 0 ]]; then @@ -437,6 +438,11 @@ post_to_2.4.20() { POSTVERSION=2.4.20 } +repo_sync() { + echo "Sync the local repo." + su socore -c '/usr/sbin/so-repo-sync' +} + stop_salt_master() { # kill all salt jobs across the grid because the hang indefinitely if they are queued and salt-master restarts set +e @@ -762,9 +768,7 @@ main() { fi echo "Verifying we have the latest soup script." verify_latest_update_script - echo "Checking for OS updates." - check_os_updates - + echo "Let's see if we need to update Security Onion." upgrade_check upgrade_space @@ -776,6 +780,10 @@ main() { if [[ $is_airgap -eq 0 ]]; then yum clean all check_os_updates + elif [[ $OS == 'oracle' || $OS == 'redhat'|| $OS == 'centos' ]]; then + # sync remote repo down to local if not airgap + repo_sync + check_os_updates fi if [ "$is_hotfix" == "true" ]; then From 388c90f64113af0f750fec4aa091bda4064571b0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 25 Aug 2023 14:56:42 -0400 Subject: [PATCH 013/417] add oel to set_os --- salt/common/tools/sbin/so-common | 4 ++++ salt/manager/tools/sbin/soup | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index a76aab1f1..03b19d756 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -446,6 +446,10 @@ set_os() { OS=centos OSVER=9 is_centos=true + elif grep -q "Oracle Linux Server release 9" /etc/system-release; then + OS=oel + OSVER=9 + is_oracle=true fi cron_service_name="crond" else diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 21933c1a8..5cb59d6ac 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -780,7 +780,7 @@ main() { if [[ $is_airgap -eq 0 ]]; then yum clean all check_os_updates - elif [[ $OS == 'oracle' || $OS == 'redhat'|| $OS == 'centos' ]]; then + elif [[ $OS == 'oel' || $OS == 'rocky'|| $OS == 'centos' ]]; then # sync remote repo down to local if not airgap repo_sync check_os_updates From 022ee36bca46ae016b0e14868dfcf1cf726c68dd Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 25 Aug 2023 16:44:03 -0400 Subject: [PATCH 014/417] ingest pfsense sample data --- salt/common/tools/sbin/so-test | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/common/tools/sbin/so-test b/salt/common/tools/sbin/so-test index 8d6bcf4e1..90309766b 100755 --- a/salt/common/tools/sbin/so-test +++ b/salt/common/tools/sbin/so-test @@ -5,4 +5,10 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +set -e + +# Playback live sample data onto monitor interface so-tcpreplay /opt/samples/* 2> /dev/null + +# Ingest sample pfsense log entry +echo "<134>$(date '+%b %d %H:%M:%S') filterlog[31624]: 84,,,1567509287,igb0.244,match,pass,in,4,0x0,,64,0,0,DF,6,tcp,64,192.168.1.1,10.10.10.10,56320,443,0,S,3333585167,,65535,,mss;nop;wscale;nop;nop;TS;sackOK;eol" | nc -uv -w1 localhost 514 From 5879eeabfa12a370feed8b7a462ed36ee379230e Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 25 Aug 2023 16:45:31 -0400 Subject: [PATCH 015/417] ingest pfsense sample data --- salt/common/tools/sbin/so-test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-test b/salt/common/tools/sbin/so-test index 90309766b..7286a35a8 100755 --- a/salt/common/tools/sbin/so-test +++ b/salt/common/tools/sbin/so-test @@ -11,4 +11,4 @@ set -e so-tcpreplay /opt/samples/* 2> /dev/null # Ingest sample pfsense log entry -echo "<134>$(date '+%b %d %H:%M:%S') filterlog[31624]: 84,,,1567509287,igb0.244,match,pass,in,4,0x0,,64,0,0,DF,6,tcp,64,192.168.1.1,10.10.10.10,56320,443,0,S,3333585167,,65535,,mss;nop;wscale;nop;nop;TS;sackOK;eol" | nc -uv -w1 localhost 514 +echo "<134>$(date '+%b %d %H:%M:%S') filterlog[31624]: 84,,,1567509287,igb0.244,match,pass,in,4,0x0,,64,0,0,DF,6,tcp,64,192.168.1.1,10.10.10.10,56320,443,0,S,3333585167,,65535,,mss;nop;wscale;nop;nop;TS;sackOK;eol" | nc -uv -w1 localhost 514 > /dev/null 2>&1 From 1ef4d2cde11d581dd5b3f871460306f554f90a0c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 28 Aug 2023 09:37:45 -0400 Subject: [PATCH 016/417] dont need to repo_sync rocky or centos --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 5cb59d6ac..37c9b3ba5 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -780,7 +780,7 @@ main() { if [[ $is_airgap -eq 0 ]]; then yum clean all check_os_updates - elif [[ $OS == 'oel' || $OS == 'rocky'|| $OS == 'centos' ]]; then + elif [[ $OS == 'oel' ]]; then # sync remote repo down to local if not airgap repo_sync check_os_updates From a8ec3717c44d1fd76343b321babaa7e44ab64bea Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 28 Aug 2023 10:20:53 -0400 Subject: [PATCH 017/417] fail soup if so-repo-sync fails --- salt/manager/tools/sbin/so-repo-sync | 4 +++- salt/manager/tools/sbin/soup | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/so-repo-sync b/salt/manager/tools/sbin/so-repo-sync index 3e129cd0d..84384fcdf 100644 --- a/salt/manager/tools/sbin/so-repo-sync +++ b/salt/manager/tools/sbin/so-repo-sync @@ -11,6 +11,8 @@ set_version set_os salt_minion_count +set -e + curl --retry 5 --retry-delay 60 -A "reposync/$VERSION/$OS/$(uname -r)/$MINIONCOUNT" https://sigs.securityonion.net/checkup --output /tmp/checkup dnf reposync --norepopath -g --delete -m -c /opt/so/conf/reposync/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/ -createrepo /nsm/repo \ No newline at end of file +createrepo /nsm/repo diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 37c9b3ba5..45e3df530 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -440,7 +440,7 @@ post_to_2.4.20() { repo_sync() { echo "Sync the local repo." - su socore -c '/usr/sbin/so-repo-sync' + su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." } stop_salt_master() { From c10e686ec6f91d55bc53c8bd3b73c7f431b77bb9 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 28 Aug 2023 11:07:28 -0400 Subject: [PATCH 018/417] fix path to intermediate ca cert on heavy nodes --- salt/redis/enabled.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/redis/enabled.sls b/salt/redis/enabled.sls index 4c452bec0..27177d217 100644 --- a/salt/redis/enabled.sls +++ b/salt/redis/enabled.sls @@ -33,7 +33,7 @@ so-redis: {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %} - /etc/pki/ca.crt:/certs/ca.crt:ro {% else %} - - /etc/pki/certs/intca.crt:/certs/ca.crt:ro + - /etc/pki/tls/certs/intca.crt:/certs/ca.crt:ro {% endif %} {% if DOCKER.containers['so-redis'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-redis'].custom_bind_mounts %} From 6b0fbe4634609603fdbddcc86d7eeea96e406a3b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 28 Aug 2023 11:53:45 -0400 Subject: [PATCH 019/417] include so-repo-sync in soup_manager_scripts state --- salt/common/soup_scripts.sls | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/common/soup_scripts.sls b/salt/common/soup_scripts.sls index 8dff85ddb..041649200 100644 --- a/salt/common/soup_scripts.sls +++ b/salt/common/soup_scripts.sls @@ -19,4 +19,5 @@ soup_manager_scripts: - source: salt://manager/tools/sbin - include_pat: - so-firewall - - soup \ No newline at end of file + - so-repo-sync + - soup From bd61ee22be5ea6e0568505f4dc1381322efd70fe Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Mon, 28 Aug 2023 14:41:06 -0400 Subject: [PATCH 020/417] Update defaults.map.jinja --- salt/soc/defaults.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/defaults.map.jinja b/salt/soc/defaults.map.jinja index 7720e7027..2587051c5 100644 --- a/salt/soc/defaults.map.jinja +++ b/salt/soc/defaults.map.jinja @@ -16,7 +16,7 @@ {# add nodes from the logstash:nodes pillar to soc.server.modules.elastic.remoteHostUrls #} {% for node_type, minions in salt['pillar.get']('logstash:nodes', {}).items() %} {% for m in minions.keys() %} -{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append(m) %} +{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %} {% endfor %} {% endfor %} From 1c3d3d703ce4baf6e445721afd8facd53905c232 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 29 Aug 2023 08:56:01 -0400 Subject: [PATCH 021/417] add desktop.map.jinja for global vars --- salt/vars/desktop.map.jinja | 1 + 1 file changed, 1 insertion(+) create mode 100644 salt/vars/desktop.map.jinja diff --git a/salt/vars/desktop.map.jinja b/salt/vars/desktop.map.jinja new file mode 100644 index 000000000..964f69663 --- /dev/null +++ b/salt/vars/desktop.map.jinja @@ -0,0 +1 @@ +{% set ROLE_GLOBALS = {} %} From a1b1294247d2464b66a56a22a400bb7341daff1c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 29 Aug 2023 09:05:01 -0400 Subject: [PATCH 022/417] desktop doesnt need docker state --- salt/logrotate/init.sls | 1 + salt/top.sls | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/logrotate/init.sls b/salt/logrotate/init.sls index 1b096f9db..bdfc3b86c 100644 --- a/salt/logrotate/init.sls +++ b/salt/logrotate/init.sls @@ -3,6 +3,7 @@ logrotateconfdir: file.directory: - name: /opt/so/conf/logrotate + - makedirs: True commonlogrotatescript: file.managed: diff --git a/salt/top.sls b/salt/top.sls index 2323731a1..4a605b13c 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -28,12 +28,12 @@ base: - motd - salt.minion-check - salt.lasthighstate - - docker 'not *_desktop and G@saltversion:{{saltversion}}': - match: compound - common - + - docker + '*_sensor and G@saltversion:{{saltversion}}': - match: compound - sensor From 67ea7d31e110da56301dda1f49b84df7f1888df8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 29 Aug 2023 09:32:10 -0400 Subject: [PATCH 023/417] dont exec so-setup desktop --- setup/so-functions | 4 +--- setup/so-whiptail | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index fc0876248..9f7e61fa1 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1664,9 +1664,7 @@ process_installtype() { elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true elif [ "$install_type" = 'DESKTOP' ]; then - if [ "$setup_type" != 'desktop' ]; then - exec bash so-setup desktop - fi + is_desktop=true fi } diff --git a/setup/so-whiptail b/setup/so-whiptail index c55e2db8f..01c0ffde9 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -678,9 +678,7 @@ whiptail_install_type_dist_existing() { elif [ "$install_type" = 'RECEIVER' ]; then is_receiver=true elif [ "$install_type" = 'DESKTOP' ]; then - if [ "$setup_type" != 'desktop' ]; then - exec bash so-setup desktop - fi + is_desktop=true fi local exitstatus=$? From 532b2c222a1180663dbc5420fe7cbd157c7c49d9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 29 Aug 2023 10:16:51 -0400 Subject: [PATCH 024/417] edit other/desktop install whiptail --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 01c0ffde9..702949813 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -692,7 +692,7 @@ whiptail_install_type_other() { install_type=$(whiptail --title "$whiptail_title" --menu \ "Choose node type:" 10 65 2 \ - "DESKTOP" "Setup will run 'so-setup desktop' " 3>&1 1>&2 2>&3) + "DESKTOP" 3>&1 1>&2 2>&3) local exitstatus=$? whiptail_check_exitstatus $exitstatus From 0455063a39500fad28ba6fb8c0521244211dd01c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 29 Aug 2023 10:26:29 -0400 Subject: [PATCH 025/417] edit other/desktop install whiptail --- setup/so-whiptail | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 702949813..6188406cb 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -692,7 +692,7 @@ whiptail_install_type_other() { install_type=$(whiptail --title "$whiptail_title" --menu \ "Choose node type:" 10 65 2 \ - "DESKTOP" 3>&1 1>&2 2>&3) + "DESKTOP" "Install Security Onion Desktop " 3>&1 1>&2 2>&3) local exitstatus=$? whiptail_check_exitstatus $exitstatus From d40bbf6b090fce2fc922e6cf7d5e1f078195da46 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 29 Aug 2023 10:59:40 -0400 Subject: [PATCH 026/417] Add Apache templates --- salt/elasticsearch/defaults.yaml | 36 ++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 3ea24c3fd..8ae75f984 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -286,6 +286,42 @@ elasticsearch: data_stream: hidden: false allow_custom_routing: false + so-logs-apache_x_access: + index_sorting: False + index_template: + index_patterns: + - "logs-apache.access-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-apache.access@package" + - "logs-apache.access@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-apache_x_error: + index_sorting: False + index_template: + index_patterns: + - "logs-apache.error-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-apache.error@package" + - "logs-apache.error@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false so-logs-auditd_x_log: index_sorting: False index_template: From f118e25e8c8e424cd80110f04eeac77e4470aa95 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 29 Aug 2023 11:00:31 -0400 Subject: [PATCH 027/417] Add Apache references --- salt/elasticsearch/soc_elasticsearch.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index a960facd1..1823337b5 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -201,7 +201,8 @@ elasticsearch: so-logs-windows_x_powershell: *indexSettings so-logs-windows_x_powershell_operational: *indexSettings so-logs-windows_x_sysmon_operational: *indexSettings - so-logs-auditd_x_log: *indexSettings + so-logs-apache_x_access: *indexSettings + so-logs-apache_x_error: *indexSettings so-logs-aws_x_cloudtrail: *indexSettings so-logs-aws_x_cloudwatch_logs: *indexSettings so-logs-aws_x_ec2_logs: *indexSettings From c01a9006a6609a0d928bea0c640fe997d42c415e Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 29 Aug 2023 11:01:22 -0400 Subject: [PATCH 028/417] Add Apache package --- salt/elasticfleet/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 77fa9dd31..55e70113f 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -26,6 +26,7 @@ elasticfleet: - stderr - stdout packages: + - apache - auditd - aws - azure From d2063c7e119d77f31139c6c9c94a5e5de1f18b3e Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 29 Aug 2023 11:14:49 -0400 Subject: [PATCH 029/417] Add auditd reference back --- salt/elasticsearch/soc_elasticsearch.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 1823337b5..e4de29e00 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -203,6 +203,7 @@ elasticsearch: so-logs-windows_x_sysmon_operational: *indexSettings so-logs-apache_x_access: *indexSettings so-logs-apache_x_error: *indexSettings + so-logs-auditd_x_log: *indexSettings so-logs-aws_x_cloudtrail: *indexSettings so-logs-aws_x_cloudwatch_logs: *indexSettings so-logs-aws_x_ec2_logs: *indexSettings From a4dc48237215eb3f87e377a28d5077f08be915ba Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 29 Aug 2023 13:10:06 -0400 Subject: [PATCH 030/417] add is_desktop_grid var --- setup/so-functions | 2 +- setup/so-whiptail | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 9f7e61fa1..4e105dcd6 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1883,7 +1883,7 @@ securityonion_repo() { if [ -n "$(ls -A /etc/yum.repos.d/ 2>/dev/null)" ]; then logCmd "mv -v /etc/yum.repos.d/* /root/oldrepos/" fi - if [[ $is_desktop_iso ]]; then + if [[ ! $is_desktop_grid ]]; then gpg_rpm_import if [[ ! $is_airgap ]]; then echo "https://repo.securityonion.net/file/so-repo/prod/2.4/oracle/9" > /etc/yum/mirror.txt diff --git a/setup/so-whiptail b/setup/so-whiptail index 6188406cb..62f60a84a 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -58,6 +58,8 @@ whiptail_desktop_install() { whiptail --title "$whiptail_title" \ --yesno "$message" 11 75 --defaultno + is_desktop_grid=$? + } whiptail_desktop_nongrid_iso() { From 706a6e2d56ca045f97e2393d270e01a406334928 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 30 Aug 2023 08:34:04 -0400 Subject: [PATCH 031/417] Make sure a data stream is created for syslog --- salt/elasticsearch/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 8ae75f984..33362825f 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -4187,6 +4187,7 @@ elasticsearch: so-syslog: index_sorting: False index_template: + data_stream: {} index_patterns: - logs-syslog-so* template: From ce05f29dc4e436060a05cf02adfe3aa9578e3ee6 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 30 Aug 2023 13:03:28 +0000 Subject: [PATCH 032/417] Add port_bindings for port 514 --- salt/docker/defaults.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/docker/defaults.yaml b/salt/docker/defaults.yaml index e39feaf06..a5d6c5d6d 100644 --- a/salt/docker/defaults.yaml +++ b/salt/docker/defaults.yaml @@ -178,6 +178,9 @@ docker: extra_env: [] 'so-elastic-agent': final_octet: 46 + port_bindings: + - 0.0.0.0:514:514/tcp + - 0.0.0.0:514:514/udp custom_bind_mounts: [] extra_hosts: [] extra_env: [] From 655eea2b007124d9abe0674b5281435817cf290d Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 30 Aug 2023 13:03:56 +0000 Subject: [PATCH 033/417] Add port_bindings --- salt/elasticagent/enabled.sls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/elasticagent/enabled.sls b/salt/elasticagent/enabled.sls index 963b8549b..7d0f401e9 100644 --- a/salt/elasticagent/enabled.sls +++ b/salt/elasticagent/enabled.sls @@ -31,6 +31,10 @@ so-elastic-agent: - {{ XTRAHOST }} {% endfor %} {% endif %} + - port_bindings: + {% for BINDING in DOCKER.containers['so-elastic-agent'].port_bindings %} + - {{ BINDING }} + {% endfor %} - binds: - /opt/so/conf/elastic-agent/elastic-agent.yml:/usr/share/elastic-agent/elastic-agent.yml:ro - /opt/so/log/elasticagent:/usr/share/elastic-agent/logs From 0e22acc255cc62af53810156eabaf9471d8bbcae Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 30 Aug 2023 13:04:32 +0000 Subject: [PATCH 034/417] Add tcp and udp integration --- .../files/elastic-agent.yml.jinja | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/salt/elasticagent/files/elastic-agent.yml.jinja b/salt/elasticagent/files/elastic-agent.yml.jinja index 92aacfa44..7d0b93344 100644 --- a/salt/elasticagent/files/elastic-agent.yml.jinja +++ b/salt/elasticagent/files/elastic-agent.yml.jinja @@ -430,3 +430,54 @@ inputs: exclude_files: - >- broker|capture_loss|cluster|ecat_arp_info|known_hosts|known_services|loaded_scripts|ntp|ocsp|packet_filter|reporter|stats|stderr|stdout.log$ + - id: udp-udp-35051de0-46a5-11ee-8d5d-9f98c8182f60 + name: syslog-udp-514 + revision: 3 + type: udp + use_output: default + meta: + package: + name: udp + version: 1.10.0 + data_stream: + namespace: so + package_policy_id: 35051de0-46a5-11ee-8d5d-9f98c8182f60 + streams: + - id: udp-udp.generic-35051de0-46a5-11ee-8d5d-9f98c8182f60 + data_stream: + dataset: syslog + pipeline: syslog + host: '0.0.0.0:514' + max_message_size: 10KiB + processors: + - add_fields: + fields: + module: syslog + target: event + tags: + - syslog + - id: tcp-tcp-33d37bb0-46a5-11ee-8d5d-9f98c8182f60 + name: syslog-tcp-514 + revision: 3 + type: tcp + use_output: default + meta: + package: + name: tcp + version: 1.10.0 + data_stream: + namespace: so + package_policy_id: 33d37bb0-46a5-11ee-8d5d-9f98c8182f60 + streams: + - id: tcp-tcp.generic-33d37bb0-46a5-11ee-8d5d-9f98c8182f60 + data_stream: + dataset: syslog + pipeline: syslog + host: '0.0.0.0:514' + processors: + - add_fields: + fields: + module: syslog + target: event + tags: + - syslog From 60b0af5ab793fa1b6a592743f162e6905b797798 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 30 Aug 2023 13:05:30 +0000 Subject: [PATCH 035/417] Allow external syslog --- salt/firewall/defaults.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index ff127c419..ecb4bad6b 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -1141,6 +1141,12 @@ firewall: localhost: portgroups: - all + self: + portgroups: + - syslog + syslog: + portgroups: + - syslog customhostgroup0: portgroups: [] customhostgroup1: From ae01da780e242ddc084e3fb7907a952d0599cb88 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 30 Aug 2023 09:10:59 -0400 Subject: [PATCH 036/417] desktop network install nongrid --- setup/so-functions | 2 +- setup/so-setup | 61 +++++++++++++++++++++++----------------------- setup/so-whiptail | 6 ++++- 3 files changed, 37 insertions(+), 32 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 4e105dcd6..1c0cad2a7 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1883,7 +1883,7 @@ securityonion_repo() { if [ -n "$(ls -A /etc/yum.repos.d/ 2>/dev/null)" ]; then logCmd "mv -v /etc/yum.repos.d/* /root/oldrepos/" fi - if [[ ! $is_desktop_grid ]]; then + if ! $is_desktop_grid; then gpg_rpm_import if [[ ! $is_airgap ]]; then echo "https://repo.securityonion.net/file/so-repo/prod/2.4/oracle/9" > /etc/yum/mirror.txt diff --git a/setup/so-setup b/setup/so-setup index b946c06c8..61c0d88e3 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -333,38 +333,39 @@ if [[ $is_desktop ]]; then exit 1 fi - if ! whiptail_desktop_install; then - if [[ $is_desktop_iso ]]; then - if whiptail_desktop_nongrid_iso; then - # Remove setup from auto launching - parse_install_username - sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1 - securityonion_repo - info "Enabling graphical interface and setting it to load at boot" - systemctl set-default graphical.target - info "Setting desktop background" - set_desktop_background - echo "Desktop Install Complete!" - echo "" - echo "Please reboot to start graphical interface." - exit 0 + whiptail_desktop_install + if ! is_desktop_grid; then + if [[ $is_desktop_iso ]]; then + if whiptail_desktop_nongrid_iso; then + # Remove setup from auto launching + parse_install_username + sed -i '$ d' /home/$INSTALLUSERNAME/.bash_profile >> "$setup_log" 2>&1 + securityonion_repo + info "Enabling graphical interface and setting it to load at boot" + systemctl set-default graphical.target + info "Setting desktop background" + set_desktop_background + echo "Desktop Install Complete!" + echo "" + echo "Please reboot to start graphical interface." + exit 0 + else + # Abort! + exit 0 + fi else - # Abort! - exit 0 + if whiptail_desktop_nongrid_network; then + info "" + info "" + info "Kicking off the automated setup of the Security Onion Desktop. This can take a while depending on your network connection." + info "" + info "" + desktop_salt_local + else + # Abort! + exit 0 + fi fi - else - if whiptail_desktop_nongrid_network; then - info "" - info "" - info "Kicking off the automated setup of the Security Onion Desktop. This can take a while depending on your network connection." - info "" - info "" - desktop_salt_local - else - # Abort! - exit 0 - fi - fi fi # If you got this far then you want to join the grid diff --git a/setup/so-whiptail b/setup/so-whiptail index 62f60a84a..8fd3b5fdd 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -58,7 +58,11 @@ whiptail_desktop_install() { whiptail --title "$whiptail_title" \ --yesno "$message" 11 75 --defaultno - is_desktop_grid=$? + if [ $? -eq 0 ]; then + is_desktop_grid=true + else + is_desktop_grid=false + fi } From 97587064f8dd55b427d362c76748e73c581ce936 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 30 Aug 2023 09:48:52 -0400 Subject: [PATCH 037/417] remove packages from nongrid desktop install --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 1c0cad2a7..eab98b849 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -91,7 +91,7 @@ desktop_salt_local() { securityonion_repo gpg_rpm_import # Install salt - logCmd "yum -y install salt-minion-$SALTVERSION httpd-tools python3 python36-docker python36-dateutil python36-m2crypto python36-mysql python36-packaging python36-lxml yum-utils device-mapper-persistent-data lvm2 openssl jq" + logCmd "yum -y install salt-minion-$SALTVERSION httpd-tools python3 python3-dateutil yum-utils device-mapper-persistent-data lvm2 openssl jq" logCmd "yum -y update --exclude=salt*" logCmd "salt-call state.apply desktop --local --file-root=../salt/ -l info" From a3eeba4761991448fbaf3eb0ae48383e5681a3dc Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 30 Aug 2023 09:51:09 -0400 Subject: [PATCH 038/417] do networking_needful for nongrid desktop network install --- setup/so-setup | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-setup b/setup/so-setup index 61c0d88e3..de117331d 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -355,6 +355,7 @@ if [[ $is_desktop ]]; then fi else if whiptail_desktop_nongrid_network; then + networking_needful info "" info "" info "Kicking off the automated setup of the Security Onion Desktop. This can take a while depending on your network connection." From 8381fa1d4220d0943eb5efc703c8040bf505d923 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 30 Aug 2023 10:26:24 -0400 Subject: [PATCH 039/417] cant import globals because of nongrid desktop install~ --- salt/desktop/xwindows.sls | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/salt/desktop/xwindows.sls b/salt/desktop/xwindows.sls index b18109d45..66e4c9a05 100644 --- a/salt/desktop/xwindows.sls +++ b/salt/desktop/xwindows.sls @@ -1,7 +1,5 @@ -{% from 'vars/globals.map.jinja' import GLOBALS %} - {# we only want this state to run it is CentOS #} -{% if GLOBALS.os == 'OEL' %} +{% if grains.os == 'OEL' %} include: - desktop.packages From b14614ae53f055cb75be9fab8744dfcf7a811639 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 30 Aug 2023 10:32:13 -0400 Subject: [PATCH 040/417] need $ for vars --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index de117331d..f6e5c8c4e 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -334,7 +334,7 @@ if [[ $is_desktop ]]; then fi whiptail_desktop_install - if ! is_desktop_grid; then + if ! $is_desktop_grid; then if [[ $is_desktop_iso ]]; then if whiptail_desktop_nongrid_iso; then # Remove setup from auto launching From b45e114ef25389be5fb97d1a8dc896e8b872d887 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 30 Aug 2023 10:41:34 -0400 Subject: [PATCH 041/417] cant use GLOBALS var due to desktop nongrid install --- salt/desktop/packages.sls | 4 +--- salt/desktop/remove_gui.sls | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/salt/desktop/packages.sls b/salt/desktop/packages.sls index 524c2c266..3817f2e80 100644 --- a/salt/desktop/packages.sls +++ b/salt/desktop/packages.sls @@ -1,7 +1,5 @@ -{% from 'vars/globals.map.jinja' import GLOBALS %} - {# we only want this state to run it is CentOS #} -{% if GLOBALS.os == 'OEL' %} +{% if grains.os == 'OEL' %} desktop_packages: pkg.installed: diff --git a/salt/desktop/remove_gui.sls b/salt/desktop/remove_gui.sls index 53d927cbe..d8de07a9a 100644 --- a/salt/desktop/remove_gui.sls +++ b/salt/desktop/remove_gui.sls @@ -1,7 +1,5 @@ -{% from 'vars/globals.map.jinja' import GLOBALS %} - {# we only want this state to run it is CentOS #} -{% if GLOBALS.os == 'OEL' %} +{% if grains.os == 'OEL' %} remove_graphical_target: file.symlink: From fe690922de38a2d6ef9dd9da8afad312e39ad97e Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 30 Aug 2023 19:16:05 +0000 Subject: [PATCH 042/417] Add analyzer configuration to the defaults file --- salt/sensoroni/defaults.yaml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/salt/sensoroni/defaults.yaml b/salt/sensoroni/defaults.yaml index 4ccc11ce9..f53646ac2 100644 --- a/salt/sensoroni/defaults.yaml +++ b/salt/sensoroni/defaults.yaml @@ -8,3 +8,31 @@ sensoroni: node_checkin_interval_ms: 10000 sensoronikey: soc_host: + analyzers: + emailrep: + base_url: https://emailrep.io/ + api_key: + greynoise: + base_url: https://api.greynoise.io/ + api_key: + api_version: community + localfile: + file_path: [] + otx: + base_url: https://otx.alienvault.com/api/v1/ + api_key: + pulsedive: + base_url: https://pulsedive.com/api/ + api_key: + spamhaus: + lookup_host: zen.spamhaus.org + nameservers: [] + urlscan: + base_url: https://urlscan.io/api/v1/ + api_key: + enabled: False + visibility: public + timeout: 180 + virustotal: + base_url: https://www.virustotal.com/api/v3/search?query= + api_key: From 8cc19b0748c6804abe01abb8f7dd3df9dc23784f Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 30 Aug 2023 19:16:38 +0000 Subject: [PATCH 043/417] Add analyzer configuration description --- salt/sensoroni/soc_sensoroni.yaml | 142 ++++++++++++++++++++++++++++++ 1 file changed, 142 insertions(+) diff --git a/salt/sensoroni/soc_sensoroni.yaml b/salt/sensoroni/soc_sensoroni.yaml index 8a35272ea..6a728ef9c 100644 --- a/salt/sensoroni/soc_sensoroni.yaml +++ b/salt/sensoroni/soc_sensoroni.yaml @@ -37,3 +37,145 @@ sensoroni: helpLink: sensoroni.html global: True advanced: True + analyzers: + emailrep: + api_key: + description: API key for the EmailRep analyzer. + helpLink: sensoroni.html + global: True + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the EmailRep analyzer. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: string + greynoise: + api_key: + description: API key for the GreyNoise analyzer. + helpLink: sensoroni.html + global: True + sensitive: True + advanced: True + forcedType: string + api_version: + description: API key for the GreyNoise analyzer. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: string + base_url: + description: Base URL for the GreyNoise analyzer. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: string + localfile: + file_path: + description: File path for the LocalFile analyzer. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: "[]string" + otx: + api_key: + description: API key for the OTX analyzer. + helpLink: sensoroni.html + global: True + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the OTX analyzer. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: string + pulsedive: + api_key: + description: API key for the Pulsedive analyzer. + helpLink: sensoroni.html + global: True + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the Pulsedive analyzer. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: string + spamhaus: + lookup_host: + description: Host to use for lookups. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: string + nameservers: + description: Nameservers used for queries. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedTypes: "[]string" + urlscan: + api_key: + description: API key for the Urlscan analyzer. + helpLink: sensoroni.html + global: True + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the Urlscan analyzer. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: string + enabled: + description: Analyzer enabled + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: bool + timeout: + description: Timeout for the Urlscan analyzer. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: int + visibility: + description: Type of visibility. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: string + virustotal: + api_key: + description: API key for the VirusTotal analyzer. + helpLink: sensoroni.html + global: True + sensitive: True + advanced: True + forcedType: string + base_url: + description: Base URL for the VirusTotal analyzer. + helpLink: sensoroni.html + global: True + sensitive: False + advanced: True + forcedType: string From 78915f900b8aad6ebb9e4038ec1f4b0ad916add6 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 30 Aug 2023 15:37:30 -0400 Subject: [PATCH 044/417] Add fortigate package --- salt/elasticfleet/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 55e70113f..979e795f7 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -41,6 +41,7 @@ elasticfleet: - fleet_server - fim - fortinet + - fortinet_fortigate - gcp - github - google_workspace From d090852895fb899fb9d029c57ae2c54e879a9722 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 30 Aug 2023 15:40:40 -0400 Subject: [PATCH 045/417] Correct fortigate template name --- salt/elasticsearch/defaults.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 33362825f..cc2f5e1cd 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -970,18 +970,18 @@ elasticsearch: data_stream: hidden: false allow_custom_routing: false - so-logs-fortinet_x_fortigate: + so-logs-fortinet_fortigate_x_log: index_sorting: False index_template: index_patterns: - - "logs-fortinet.fortigate-*" + - "logs-fortinet_fortigate.log-*" template: settings: index: number_of_replicas: 0 composed_of: - - "logs-fortinet.fortigate@package" - - "logs-fortinet.fortigate@custom" + - "logs-fortinet_fortigate.log@package" + - "logs-fortinet_fortigate.log@custom" - "so-fleet_globals-1" - "so-fleet_agent_id_verification-1" priority: 501 From 21e91a753701b2672fedc9a5025982b05138fb6b Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 30 Aug 2023 16:10:38 -0400 Subject: [PATCH 046/417] Fix api_version --- salt/sensoroni/soc_sensoroni.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/sensoroni/soc_sensoroni.yaml b/salt/sensoroni/soc_sensoroni.yaml index 6a728ef9c..2d1536191 100644 --- a/salt/sensoroni/soc_sensoroni.yaml +++ b/salt/sensoroni/soc_sensoroni.yaml @@ -62,7 +62,7 @@ sensoroni: advanced: True forcedType: string api_version: - description: API key for the GreyNoise analyzer. + description: API version for the GreyNoise analyzer. helpLink: sensoroni.html global: True sensitive: False From 41300af944c1c537ef9bf99bb2411d80bafdbb4e Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 30 Aug 2023 16:30:32 -0400 Subject: [PATCH 047/417] Set global to false --- salt/sensoroni/soc_sensoroni.yaml | 38 +++++++++++++++---------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/salt/sensoroni/soc_sensoroni.yaml b/salt/sensoroni/soc_sensoroni.yaml index 2d1536191..eb63dbe25 100644 --- a/salt/sensoroni/soc_sensoroni.yaml +++ b/salt/sensoroni/soc_sensoroni.yaml @@ -42,14 +42,14 @@ sensoroni: api_key: description: API key for the EmailRep analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: True advanced: True forcedType: string base_url: description: Base URL for the EmailRep analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: string @@ -57,21 +57,21 @@ sensoroni: api_key: description: API key for the GreyNoise analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: True advanced: True forcedType: string api_version: description: API version for the GreyNoise analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: string base_url: description: Base URL for the GreyNoise analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: string @@ -79,7 +79,7 @@ sensoroni: file_path: description: File path for the LocalFile analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: "[]string" @@ -87,14 +87,14 @@ sensoroni: api_key: description: API key for the OTX analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: True advanced: True forcedType: string base_url: description: Base URL for the OTX analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: string @@ -102,14 +102,14 @@ sensoroni: api_key: description: API key for the Pulsedive analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: True advanced: True forcedType: string base_url: description: Base URL for the Pulsedive analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: string @@ -117,14 +117,14 @@ sensoroni: lookup_host: description: Host to use for lookups. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: string nameservers: description: Nameservers used for queries. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedTypes: "[]string" @@ -132,35 +132,35 @@ sensoroni: api_key: description: API key for the Urlscan analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: True advanced: True forcedType: string base_url: description: Base URL for the Urlscan analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: string enabled: description: Analyzer enabled helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: bool timeout: description: Timeout for the Urlscan analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: int visibility: description: Type of visibility. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: string @@ -168,14 +168,14 @@ sensoroni: api_key: description: API key for the VirusTotal analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: True advanced: True forcedType: string base_url: description: Base URL for the VirusTotal analyzer. helpLink: sensoroni.html - global: True + global: False sensitive: False advanced: True forcedType: string From 14a62805310e59d4e16f3b6809fa0d74860edb81 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 30 Aug 2023 16:49:17 -0400 Subject: [PATCH 048/417] iso desktop join grid - set install_type and minion_type --- setup/so-functions | 2 +- setup/so-setup | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index eab98b849..4f973d147 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1269,7 +1269,7 @@ get_redirect() { get_minion_type() { local minion_type case "$install_type" in - 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER') + 'EVAL' | 'MANAGERSEARCH' | 'MANAGER' | 'SENSOR' | 'HEAVYNODE' | 'SEARCHNODE' | 'FLEET' | 'IDH' | 'STANDALONE' | 'IMPORT' | 'RECEIVER' | 'DESKTOP') minion_type=$(echo "$install_type" | tr '[:upper:]' '[:lower:]') ;; esac diff --git a/setup/so-setup b/setup/so-setup index f6e5c8c4e..cdc7e67d6 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -104,6 +104,7 @@ if [ "$setup_type" = 'desktop' ]; then # Check to see if this is an ISO. Usually this dir on exists on ISO installs. if [ -d /root/SecurityOnion ]; then is_desktop_iso=true + install_type='DESKTOP' fi fi From a615fc8e47f444ea9dc87390c897626b5b226216 Mon Sep 17 00:00:00 2001 From: Corey Ogburn Date: Wed, 30 Aug 2023 15:33:01 -0600 Subject: [PATCH 049/417] New Config Default: longRelayTimeoutMs Salt is getting a second timeout for operations known to take a long time such as sending and importing files. There's also an entry in soc_soc.yaml so the value can be changed in SOC's config page. --- salt/soc/defaults.yaml | 1 + salt/soc/soc_soc.yaml | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index ff8b240ec..05543cd19 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1012,6 +1012,7 @@ soc: verifyCert: false salt: queueDir: /opt/sensoroni/queue + longRelayTimeoutMs: 120000 sostatus: refreshIntervalMs: 30000 offlineThresholdMs: 900000 diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index b2ed893f6..e94144069 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -111,6 +111,11 @@ soc: description: Duration (in milliseconds) that must elapse after a grid node fails to check-in before the node will be marked offline (fault). global: True advanced: True + salt: + longRelayTimeoutMs: + description: Duration (in milliseconds) to wait for a response from the Salt API when executing tasks known for being long running before giving up and showing an error on the SOC UI. + global: True + advanced: True client: enableReverseLookup: description: Set to true to enable reverse DNS lookups for IP addresses in the SOC UI. From c812c3991ef952eb3b9e472ea4b1353221e72695 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 31 Aug 2023 08:54:13 -0400 Subject: [PATCH 050/417] we dont need to run convert-gnome-classic script --- salt/desktop/scripts/convert-gnome-classic.sh | 4 ---- salt/desktop/xwindows.sls | 5 +---- 2 files changed, 1 insertion(+), 8 deletions(-) delete mode 100644 salt/desktop/scripts/convert-gnome-classic.sh diff --git a/salt/desktop/scripts/convert-gnome-classic.sh b/salt/desktop/scripts/convert-gnome-classic.sh deleted file mode 100644 index e69a43b2d..000000000 --- a/salt/desktop/scripts/convert-gnome-classic.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash -echo "Setting default session to gnome-classic" -cp /usr/share/accountsservice/user-templates/standard /etc/accountsservice/user-templates/ -sed -i 's|Session=gnome|Session=gnome-classic|g' /etc/accountsservice/user-templates/standard diff --git a/salt/desktop/xwindows.sls b/salt/desktop/xwindows.sls index 66e4c9a05..85da0590c 100644 --- a/salt/desktop/xwindows.sls +++ b/salt/desktop/xwindows.sls @@ -12,10 +12,7 @@ graphical_target: - require: - desktop_packages -convert_gnome_classic: - cmd.script: - - name: salt://desktop/scripts/convert-gnome-classic.sh - +{# set users to use gnome-classic #} {% for username in salt['file.find'](path='/home/',mindepth=1,maxdepth=1,type='d') %} {% set username = username.split('/')[2] %} {% if username != 'zeek' %} From da56a421e5b0a89c2fa7750249066762777191cd Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Thu, 31 Aug 2023 09:17:33 -0400 Subject: [PATCH 051/417] Update motd.md --- salt/soc/files/soc/motd.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/files/soc/motd.md b/salt/soc/files/soc/motd.md index cf22e863d..d6b0d3d27 100644 --- a/salt/soc/files/soc/motd.md +++ b/salt/soc/files/soc/motd.md @@ -1,6 +1,6 @@ ## Getting Started -New to Security Onion 2? Click the menu in the upper-right corner and you'll find links for [Help](/docs/) and a [Cheatsheet](/docs/cheatsheet.pdf) that will help you best utilize Security Onion to hunt for evil! In addition, check out our free Security Onion 2 Essentials online course, available on our [Training](https://securityonionsolutions.com/training) website. +New to Security Onion 2? Click the menu in the upper-right corner and you'll find links for [Help](/docs/) and a [Cheat Sheet](/docs/cheatsheet.pdf) that will help you best utilize Security Onion to hunt for evil! In addition, check out our free Security Onion 2 Essentials online course, available on our [Training](https://securityonionsolutions.com/training) website. If you're ready to dive in, take a look at the [Alerts](/#/alerts) interface to see what Security Onion has detected so far. Then go to the [Dashboards](/#/dashboards) interface for a general overview of all logs collected or go to the [Hunt](/#/hunt) interface for more focused threat hunting. Once you've found something of interest, escalate it to [Cases](/#/cases) to then collect evidence and analyze observables as you work towards closing the case. From a60c34d5488bc88fbbe9a6f817dffd3327eecf8e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 31 Aug 2023 09:40:54 -0400 Subject: [PATCH 052/417] exclude unnecessary pillars from desktop nodes --- pillar/top.sls | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/pillar/top.sls b/pillar/top.sls index 4893c44f9..bf28b6474 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -2,21 +2,23 @@ base: '*': - global.soc_global - global.adv_global - - docker.soc_docker - - docker.adv_docker - - firewall.soc_firewall - - firewall.adv_firewall - - influxdb.token - logrotate.soc_logrotate - logrotate.adv_logrotate - - nginx.soc_nginx - - nginx.adv_nginx - - node_data.ips - ntp.soc_ntp - ntp.adv_ntp - patch.needs_restarting - patch.soc_patch - patch.adv_patch + + '* and not *_desktop': + - docker.soc_docker + - docker.adv_docker + - firewall.soc_firewall + - firewall.adv_firewall + - influxdb.token + - nginx.soc_nginx + - nginx.adv_nginx + - node_data.ips - sensoroni.soc_sensoroni - sensoroni.adv_sensoroni - telegraf.soc_telegraf From ee848b8a8c7c8b547d70885e5f03ea99a0f1e25e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 31 Aug 2023 09:51:55 -0400 Subject: [PATCH 053/417] comments for desktop install --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index cdc7e67d6..a9c7776c3 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -578,9 +578,9 @@ if ! [[ -f $install_opt_file ]]; then set_minion_info whiptail_end_settings + # desktop install will only get this far if joining the grid elif [[ $is_desktop ]]; then info "Setting up as node type desktop" - #check_requirements "desktop" networking_needful collect_mngr_hostname add_mngr_ip_to_hosts From 1a3b3b21fbd9dfbc0395659fb7183bea9f1c9d4d Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 31 Aug 2023 15:09:19 +0000 Subject: [PATCH 054/417] Change entropy value syntax --- salt/elasticsearch/files/ingest/strelka.file | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/files/ingest/strelka.file b/salt/elasticsearch/files/ingest/strelka.file index 741e20aa1..a74a7c622 100644 --- a/salt/elasticsearch/files/ingest/strelka.file +++ b/salt/elasticsearch/files/ingest/strelka.file @@ -63,8 +63,8 @@ { "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 50 && ctx.rule?.score <=69", "field": "event.severity", "value": 2, "override": true } }, { "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 70 && ctx.rule?.score <=89", "field": "event.severity", "value": 3, "override": true } }, { "set": { "if": "ctx.rule?.score != null && ctx.rule?.score >= 90", "field": "event.severity", "value": 4, "override": true } }, - { "set": { "if": "ctx.scan?.entropy?.entropy == 0", "field": "scan.entropy.entropy", "value": "0.0", "override": true } }, - { "set": { "if": "ctx.scan?.pe?.image_version == 0", "field": "scan.pe.image_version", "value": "0.0", "override": true } }, + { "set": { "if": "ctx.scan?.entropy?.entropy == '0'", "field": "scan.entropy.entropy", "value": "0.0", "override": true } }, + { "set": { "if": "ctx.scan?.pe?.image_version == '0'", "field": "scan.pe.image_version", "value": "0.0", "override": true } }, { "set": { "field": "observer.name", "value": "{{agent.name}}" }}, { "convert" : { "field" : "scan.exiftool","type": "string", "ignore_missing":true }}, { "remove": { "field": ["host", "path", "message", "exiftool", "scan.yara.meta"], "ignore_missing": true } }, From 0fed757b11c56af7106badc3ca5cb38786108b32 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 31 Aug 2023 15:10:27 +0000 Subject: [PATCH 055/417] Add entropy mapping --- .../templates/component/so/so-scan-mappings.json | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/salt/elasticsearch/templates/component/so/so-scan-mappings.json b/salt/elasticsearch/templates/component/so/so-scan-mappings.json index 60dc5b928..8ddbe6077 100644 --- a/salt/elasticsearch/templates/component/so/so-scan-mappings.json +++ b/salt/elasticsearch/templates/component/so/so-scan-mappings.json @@ -33,10 +33,17 @@ } } } - } + }, + "entropy": { + "properties": { + "entropy": { + "type": "float" + } + } + } } } } } } -} \ No newline at end of file +} From b010919099acda92e7473ab605a9e2ae60f2049c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 31 Aug 2023 13:21:32 -0400 Subject: [PATCH 056/417] add sensoroni, telegraf, common states to desktop. allow docker_registry connection to managers for desktop --- pillar/top.sls | 10 +++++----- salt/firewall/defaults.yaml | 9 +++++++++ salt/top.sls | 26 +++----------------------- 3 files changed, 17 insertions(+), 28 deletions(-) diff --git a/pillar/top.sls b/pillar/top.sls index bf28b6474..9f21a2c99 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -2,6 +2,7 @@ base: '*': - global.soc_global - global.adv_global + - influxdb.token - logrotate.soc_logrotate - logrotate.adv_logrotate - ntp.soc_ntp @@ -9,20 +10,19 @@ base: - patch.needs_restarting - patch.soc_patch - patch.adv_patch + - sensoroni.soc_sensoroni + - sensoroni.adv_sensoroni + - telegraf.soc_telegraf + - telegraf.adv_telegraf '* and not *_desktop': - docker.soc_docker - docker.adv_docker - firewall.soc_firewall - firewall.adv_firewall - - influxdb.token - nginx.soc_nginx - nginx.adv_nginx - node_data.ips - - sensoroni.soc_sensoroni - - sensoroni.adv_sensoroni - - telegraf.soc_telegraf - - telegraf.adv_telegraf '*_manager or *_managersearch': - match: compound diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index ecb4bad6b..578a242f9 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -463,6 +463,9 @@ firewall: - endgame desktop: portgroups: + - docker_registry + - influxdb + - sensoroni - yum customhostgroup0: portgroups: [] @@ -651,6 +654,9 @@ firewall: - endgame desktop: portgroups: + - docker_registry + - influxdb + - sensoroni - yum customhostgroup0: portgroups: [] @@ -847,6 +853,9 @@ firewall: - strelka_frontend desktop: portgroups: + - docker_registry + - influxdb + - sensoroni - yum customhostgroup0: portgroups: [] diff --git a/salt/top.sls b/salt/top.sls index 4a605b13c..ccad30307 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -28,18 +28,18 @@ base: - motd - salt.minion-check - salt.lasthighstate + - common + - sensoroni + - telegraf 'not *_desktop and G@saltversion:{{saltversion}}': - match: compound - - common - docker '*_sensor and G@saltversion:{{saltversion}}': - match: compound - sensor - ssl - - sensoroni - - telegraf - firewall - nginx - pcap @@ -57,11 +57,9 @@ base: - ca - ssl - registry - - sensoroni - manager - backup.config_backup - nginx - - telegraf - influxdb - soc - kratos @@ -92,9 +90,7 @@ base: - ca - ssl - registry - - sensoroni - nginx - - telegraf - influxdb - soc - kratos @@ -124,11 +120,9 @@ base: - ca - ssl - registry - - sensoroni - manager - backup.config_backup - nginx - - telegraf - influxdb - soc - kratos @@ -157,9 +151,7 @@ base: '*_searchnode and G@saltversion:{{saltversion}}': - match: compound - ssl - - sensoroni - nginx - - telegraf - firewall - elasticsearch - logstash @@ -172,9 +164,7 @@ base: - ca - ssl - registry - - sensoroni - nginx - - telegraf - influxdb - soc - kratos @@ -201,9 +191,7 @@ base: - match: compound - sensor - ssl - - sensoroni - nginx - - telegraf - firewall - elasticsearch - logstash @@ -224,10 +212,8 @@ base: - ca - ssl - registry - - sensoroni - manager - nginx - - telegraf - influxdb - soc - kratos @@ -247,8 +233,6 @@ base: '*_receiver and G@saltversion:{{saltversion}}': - match: compound - ssl - - sensoroni - - telegraf - firewall - logstash - redis @@ -258,8 +242,6 @@ base: '*_idh and G@saltversion:{{saltversion}}': - match: compound - ssl - - sensoroni - - telegraf - firewall - elasticfleet.install_agent_grid - docker_clean @@ -268,8 +250,6 @@ base: '*_fleet and G@saltversion:{{saltversion}}': - match: compound - ssl - - sensoroni - - telegraf - firewall - logstash - elasticfleet From 1871d48f7f1fe38686024804f0cde9c93ff75f27 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 31 Aug 2023 20:42:00 -0400 Subject: [PATCH 057/417] remove unnecesary OTHER submenu --- setup/so-whiptail | 19 +------------------ 1 file changed, 1 insertion(+), 18 deletions(-) diff --git a/setup/so-whiptail b/setup/so-whiptail index 8fd3b5fdd..9622ad44a 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -563,7 +563,7 @@ whiptail_install_type() { "EVAL" "Evaluation mode (not for production) " \ "STANDALONE" "Standalone production install " \ "DISTRIBUTED" "Distributed install submenu " \ - "OTHER" "Other install types" \ + "DESKTOP" "Install Security Onion Desktop" \ 3>&1 1>&2 2>&3 ) elif [[ "$OSVER" == "focal" ]]; then @@ -584,8 +584,6 @@ whiptail_install_type() { else whiptail_install_type_dist_existing fi - elif [[ $install_type == "OTHER" ]]; then - whiptail_install_type_other fi export install_type @@ -691,21 +689,6 @@ whiptail_install_type_dist_existing() { whiptail_check_exitstatus $exitstatus } - -whiptail_install_type_other() { - - [ -n "$TESTING" ] && return - - install_type=$(whiptail --title "$whiptail_title" --menu \ - "Choose node type:" 10 65 2 \ - "DESKTOP" "Install Security Onion Desktop " 3>&1 1>&2 2>&3) - - local exitstatus=$? - whiptail_check_exitstatus $exitstatus - - export install_type -} - whiptail_invalid_input() { # TODO: This should accept a list of arguments to specify what general pattern the input should follow [ -n "$TESTING" ] && return From b64fa512688239d06ba8b09bc3261c595f943ee8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 1 Sep 2023 09:16:24 -0400 Subject: [PATCH 058/417] give desktop docker state and pillars --- pillar/top.sls | 4 ++-- salt/top.sls | 5 +---- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/pillar/top.sls b/pillar/top.sls index 9f21a2c99..53ec8a330 100644 --- a/pillar/top.sls +++ b/pillar/top.sls @@ -2,6 +2,8 @@ base: '*': - global.soc_global - global.adv_global + - docker.soc_docker + - docker.adv_docker - influxdb.token - logrotate.soc_logrotate - logrotate.adv_logrotate @@ -16,8 +18,6 @@ base: - telegraf.adv_telegraf '* and not *_desktop': - - docker.soc_docker - - docker.adv_docker - firewall.soc_firewall - firewall.adv_firewall - nginx.soc_nginx diff --git a/salt/top.sls b/salt/top.sls index ccad30307..e278635b5 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -29,13 +29,10 @@ base: - salt.minion-check - salt.lasthighstate - common + - docker - sensoroni - telegraf - 'not *_desktop and G@saltversion:{{saltversion}}': - - match: compound - - docker - '*_sensor and G@saltversion:{{saltversion}}': - match: compound - sensor From 0fb00d569e4417f382a211b8b89d3685c5ea46a1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 1 Sep 2023 09:39:39 -0400 Subject: [PATCH 059/417] allow states for desktop. give all nodes docker_clean, order it last --- salt/allowed_states.map.jinja | 2 ++ salt/docker_clean/init.sls | 1 + salt/top.sls | 12 +----------- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index a3c5c75ab..6932e8c84 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -188,6 +188,8 @@ 'docker_clean' ], 'so-desktop': [ + 'docker_clean', + 'telegraf' ], }, grain='role') %} diff --git a/salt/docker_clean/init.sls b/salt/docker_clean/init.sls index c11af4f56..ee60f5591 100644 --- a/salt/docker_clean/init.sls +++ b/salt/docker_clean/init.sls @@ -9,6 +9,7 @@ prune_images: cmd.run: - name: so-docker-prune + - order: last {% else %} diff --git a/salt/top.sls b/salt/top.sls index e278635b5..2409aec82 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -32,6 +32,7 @@ base: - docker - sensoroni - telegraf + - docker_clean '*_sensor and G@saltversion:{{saltversion}}': - match: compound @@ -44,7 +45,6 @@ base: - healthcheck - zeek - strelka - - docker_clean - elasticfleet.install_agent_grid '*_eval and G@saltversion:{{saltversion}}': @@ -79,7 +79,6 @@ base: - playbook - redis - elasticfleet - - docker_clean '*_manager and G@saltversion:{{saltversion}}': - match: compound @@ -108,7 +107,6 @@ base: - soctopus - playbook - elasticfleet - - docker_clean '*_standalone and G@saltversion:{{saltversion}}': - match: compound @@ -143,7 +141,6 @@ base: - soctopus - playbook - elasticfleet - - docker_clean '*_searchnode and G@saltversion:{{saltversion}}': - match: compound @@ -153,7 +150,6 @@ base: - elasticsearch - logstash - elasticfleet.install_agent_grid - - docker_clean '*_managersearch and G@saltversion:{{saltversion}}': - match: compound @@ -182,7 +178,6 @@ base: - soctopus - playbook - elasticfleet - - docker_clean '*_heavynode and G@saltversion:{{saltversion}}': - match: compound @@ -200,7 +195,6 @@ base: - zeek - elasticfleet.install_agent_grid - elasticagent - - docker_clean '*_import and G@saltversion:{{saltversion}}': - match: compound @@ -225,7 +219,6 @@ base: - suricata - zeek - elasticfleet - - docker_clean '*_receiver and G@saltversion:{{saltversion}}': - match: compound @@ -234,14 +227,12 @@ base: - logstash - redis - elasticfleet.install_agent_grid - - docker_clean '*_idh and G@saltversion:{{saltversion}}': - match: compound - ssl - firewall - elasticfleet.install_agent_grid - - docker_clean - idh '*_fleet and G@saltversion:{{saltversion}}': @@ -252,7 +243,6 @@ base: - elasticfleet - elasticfleet.install_agent_grid - schedule - - docker_clean 'J@desktop:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:OEL )': - match: compound From b64d4e36584bca8ea60c7f7f4d9a99407cb52251 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 1 Sep 2023 09:53:26 -0400 Subject: [PATCH 060/417] add telegraf pillar to desktop --- salt/manager/tools/sbin/so-minion | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index de55c3a5b..075632985 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -552,6 +552,7 @@ function createRECEIVER() { function createDESKTOP() { add_desktop_to_minion + add_telegraf_to_minion } function testConnection() { From 546c562ef0ad9779c41adacc03378e515d826c50 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 1 Sep 2023 10:31:02 -0400 Subject: [PATCH 061/417] expose standard relay timeout in config UI; up default to 45s to accommodate sluggish pillar.get calls --- salt/soc/defaults.yaml | 1 + salt/soc/soc_soc.yaml | 4 ++++ 2 files changed, 5 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 05543cd19..6d8ed5bfd 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1012,6 +1012,7 @@ soc: verifyCert: false salt: queueDir: /opt/sensoroni/queue + timeoutMs: 45000 longRelayTimeoutMs: 120000 sostatus: refreshIntervalMs: 30000 diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index e94144069..291f564ed 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -116,6 +116,10 @@ soc: description: Duration (in milliseconds) to wait for a response from the Salt API when executing tasks known for being long running before giving up and showing an error on the SOC UI. global: True advanced: True + relayTimeoutMs: + description: Duration (in milliseconds) to wait for a response from the Salt API when executing common grid management tasks before giving up and showing an error on the SOC UI. + global: True + advanced: True client: enableReverseLookup: description: Set to true to enable reverse DNS lookups for IP addresses in the SOC UI. From 765a22e6f0c435e4d9b36d957623b5bcf8b2cd30 Mon Sep 17 00:00:00 2001 From: weslambert Date: Fri, 1 Sep 2023 11:31:23 -0400 Subject: [PATCH 062/417] Add so-elastic-agent --- salt/firewall/containers.map.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/firewall/containers.map.jinja b/salt/firewall/containers.map.jinja index 617b4a216..02e8a4644 100644 --- a/salt/firewall/containers.map.jinja +++ b/salt/firewall/containers.map.jinja @@ -58,6 +58,7 @@ {% set NODE_CONTAINERS = [ 'so-curator', 'so-elasticsearch', + 'so-elastic-agent', 'so-logstash', 'so-nginx', 'so-redis', From 3434d0f200ea1a79d4ad5c58c33fa3b04cb7093b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 1 Sep 2023 12:02:30 -0400 Subject: [PATCH 063/417] add sensoroni and telegraf back to individual nodes. add seperate block for desktop --- salt/top.sls | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/salt/top.sls b/salt/top.sls index 2409aec82..b8ca0f14e 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -30,14 +30,14 @@ base: - salt.lasthighstate - common - docker - - sensoroni - - telegraf - docker_clean '*_sensor and G@saltversion:{{saltversion}}': - match: compound - sensor - ssl + - sensoroni + - telegraf - firewall - nginx - pcap @@ -60,6 +60,8 @@ base: - influxdb - soc - kratos + - sensoroni + - telegraf - firewall - idstools - suricata.manager @@ -92,6 +94,8 @@ base: - kratos - firewall - manager + - sensoroni + - telegraf - backup.config_backup - idstools - suricata.manager @@ -122,6 +126,8 @@ base: - soc - kratos - firewall + - sensoroni + - telegraf - idstools - suricata.manager - healthcheck @@ -145,6 +151,8 @@ base: '*_searchnode and G@saltversion:{{saltversion}}': - match: compound - ssl + - sensoroni + - telegraf - nginx - firewall - elasticsearch @@ -163,6 +171,8 @@ base: - kratos - firewall - manager + - sensoroni + - telegraf - backup.config_backup - idstools - suricata.manager @@ -183,6 +193,8 @@ base: - match: compound - sensor - ssl + - sensoroni + - telegraf - nginx - firewall - elasticsearch @@ -208,6 +220,8 @@ base: - influxdb - soc - kratos + - sensoroni + - telegraf - firewall - idstools - suricata.manager @@ -223,6 +237,8 @@ base: '*_receiver and G@saltversion:{{saltversion}}': - match: compound - ssl + - sensoroni + - telegraf - firewall - logstash - redis @@ -231,6 +247,8 @@ base: '*_idh and G@saltversion:{{saltversion}}': - match: compound - ssl + - sensoroni + - telegraf - firewall - elasticfleet.install_agent_grid - idh @@ -238,12 +256,19 @@ base: '*_fleet and G@saltversion:{{saltversion}}': - match: compound - ssl + - sensoroni + - telegraf - firewall - logstash - elasticfleet - elasticfleet.install_agent_grid - schedule + '*_desktop and G@saltversion:{{saltversion}}': + - ssl + - sensoroni + - telegraf + 'J@desktop:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:OEL )': - match: compound - desktop From 490669d3782fbc037a4a5474c2eafcd34bf89c8d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 1 Sep 2023 12:03:01 -0400 Subject: [PATCH 064/417] add ssl to desktop for allowed_states --- salt/allowed_states.map.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/allowed_states.map.jinja b/salt/allowed_states.map.jinja index 6932e8c84..4e3e57f9c 100644 --- a/salt/allowed_states.map.jinja +++ b/salt/allowed_states.map.jinja @@ -188,6 +188,7 @@ 'docker_clean' ], 'so-desktop': [ + 'ssl', 'docker_clean', 'telegraf' ], From aebfb19ab77c409210935bfbdcdecce31a0b7c37 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 1 Sep 2023 12:05:28 -0400 Subject: [PATCH 065/417] add sostatus.sh to desktop for telegraf scripts --- salt/telegraf/defaults.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/telegraf/defaults.yaml b/salt/telegraf/defaults.yaml index 36ef679f0..a87fa952b 100644 --- a/salt/telegraf/defaults.yaml +++ b/salt/telegraf/defaults.yaml @@ -87,4 +87,5 @@ telegraf: - sostatus.sh fleet: - sostatus.sh - desktop: [] + desktop: + - sostatus.sh From 585fba4bc69a244aafe3717c87eb80b498e05347 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 1 Sep 2023 12:40:01 -0400 Subject: [PATCH 066/417] add functions salt_install_module_deps and salt_patch_x509_v2 --- setup/so-functions | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 4f973d147..0300e8d21 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -94,6 +94,9 @@ desktop_salt_local() { logCmd "yum -y install salt-minion-$SALTVERSION httpd-tools python3 python3-dateutil yum-utils device-mapper-persistent-data lvm2 openssl jq" logCmd "yum -y update --exclude=salt*" + salt_install_module_deps + salt_patch_x509_v2 + logCmd "salt-call state.apply desktop --local --file-root=../salt/ -l info" read -r -d '' message <<- EOM Finished Security Onion Desktop installation. @@ -2073,21 +2076,27 @@ saltify() { fi logCmd "mkdir -p /etc/salt/minion.d" + salt_install_module_deps + salt_patch_x509_v2 + +} + +# Run a salt command to generate the minion key +salt_firstcheckin() { + salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput +} + +salt_install_module_deps() { logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/" logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/" +} +salt_patch_x509_v2() { # this can be removed when https://github.com/saltstack/salt/issues/64195 is resolved if [ $SALTVERSION == "3006.1" ]; then info "Salt version 3006.1 found. Patching /opt/saltstack/salt/lib/python3.10/site-packages/salt/states/x509_v2.py" \cp -v ./files/patch/states/x509_v2.py /opt/saltstack/salt/lib/python3.10/site-packages/salt/states/x509_v2.py fi - -} - - -# Run a salt command to generate the minion key -salt_firstcheckin() { - salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput } # Create an secrets pillar so that passwords survive re-install From 8093e5ce7c44612225a27515aef4fed0dfbac468 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 1 Sep 2023 13:01:17 -0400 Subject: [PATCH 067/417] use IP to avoid host issues --- salt/common/tools/sbin/so-test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-test b/salt/common/tools/sbin/so-test index 7286a35a8..1758a44bb 100755 --- a/salt/common/tools/sbin/so-test +++ b/salt/common/tools/sbin/so-test @@ -11,4 +11,4 @@ set -e so-tcpreplay /opt/samples/* 2> /dev/null # Ingest sample pfsense log entry -echo "<134>$(date '+%b %d %H:%M:%S') filterlog[31624]: 84,,,1567509287,igb0.244,match,pass,in,4,0x0,,64,0,0,DF,6,tcp,64,192.168.1.1,10.10.10.10,56320,443,0,S,3333585167,,65535,,mss;nop;wscale;nop;nop;TS;sackOK;eol" | nc -uv -w1 localhost 514 > /dev/null 2>&1 +echo "<134>$(date '+%b %d %H:%M:%S') filterlog[31624]: 84,,,1567509287,igb0.244,match,pass,in,4,0x0,,64,0,0,DF,6,tcp,64,192.168.1.1,10.10.10.10,56320,443,0,S,3333585167,,65535,,mss;nop;wscale;nop;nop;TS;sackOK;eol" | nc -uv -w1 127.0.0.1 514 > /dev/null 2>&1 From 07ed93de19a011ddd18ccae0aa3f93ffe1fd5bf0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 1 Sep 2023 14:33:32 -0400 Subject: [PATCH 068/417] add elastic agent to desktop --- salt/firewall/defaults.yaml | 17 ++++++++++++++--- salt/top.sls | 1 + 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index 578a242f9..75df49b25 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -289,6 +289,11 @@ firewall: - elastic_agent_control - elastic_agent_data - elastic_agent_update + desktop: + portgroups: + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update customhostgroup0: portgroups: [] customhostgroup1: @@ -467,6 +472,9 @@ firewall: - influxdb - sensoroni - yum + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update customhostgroup0: portgroups: [] customhostgroup1: @@ -658,6 +666,9 @@ firewall: - influxdb - sensoroni - yum + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update customhostgroup0: portgroups: [] customhostgroup1: @@ -857,6 +868,9 @@ firewall: - influxdb - sensoroni - yum + - elastic_agent_control + - elastic_agent_data + - elastic_agent_update customhostgroup0: portgroups: [] customhostgroup1: @@ -1214,9 +1228,6 @@ firewall: analyst: portgroups: - nginx - desktop: - portgroups: - - yum customhostgroup0: portgroups: [] customhostgroup1: diff --git a/salt/top.sls b/salt/top.sls index b8ca0f14e..6db19b361 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -268,6 +268,7 @@ base: - ssl - sensoroni - telegraf + - elasticfleet.install_agent_grid 'J@desktop:gui:enabled:^[Tt][Rr][Uu][Ee]$ and ( G@saltversion:{{saltversion}} and G@os:OEL )': - match: compound From 335aaa55944ae951153c07b59d3ea2e53aa6c6be Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 1 Sep 2023 15:30:53 -0400 Subject: [PATCH 069/417] add additional test modes --- setup/so-setup | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index a9c7776c3..99a7c672e 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -183,9 +183,26 @@ if [ -n "$test_profile" ]; then install_type=SEARCHNODE HOSTNAME=search MSRVIP_OFFSET=-1 - else + elif [[ "$test_profile" =~ "-managersearch" ]]; then + install_type=MANAGERSEARCH + elif [[ "$test_profile" =~ "-heavynode" ]]; then + install_type=HEAVYNODE + HOSTNAME=sensor + MSRVIP_OFFSET=-1 + elif [[ "$test_profile" =~ "-desktop" ]]; then + install_type=DESKTOP + MSRVIP_OFFSET=-3 + is_desktop_grid=true + fi + + if [[ -z "$HOSTNAME" ]]; then HOSTNAME=manager fi + + if [[ "$install_type" =~ "DESKTOP" ]]; then + is_desktop=true + hostname=desktop + fi info "Activating test profile; profile=$test_profile; install_type=$install_type" From 863db14b61b4d691ce38f7d28c2a47dc4fcf9b7b Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 1 Sep 2023 16:27:02 -0400 Subject: [PATCH 070/417] add additional test modes --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 99a7c672e..ce13af06b 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -201,7 +201,7 @@ if [ -n "$test_profile" ]; then if [[ "$install_type" =~ "DESKTOP" ]]; then is_desktop=true - hostname=desktop + HOSTNAME=desktop fi info "Activating test profile; profile=$test_profile; install_type=$install_type" From a11259c6832ce3331f55e62e704caaf6a6624cdb Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 1 Sep 2023 17:08:27 -0400 Subject: [PATCH 071/417] add additional test modes --- setup/so-setup | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-setup b/setup/so-setup index ce13af06b..c6ff27198 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -174,6 +174,7 @@ if [ -n "$test_profile" ]; then # The below settings are hardcoded purely for automated testing purposes. TESTING=true + is_desktop_grid=false if [[ "$test_profile" =~ "-sensor" ]]; then install_type=SENSOR From 6efdf1b9d0444ddb9f39127589c7cbba66d4af83 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 1 Sep 2023 17:24:12 -0400 Subject: [PATCH 072/417] add additional test modes --- setup/so-functions | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 4f973d147..efa6c800f 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -101,8 +101,10 @@ desktop_salt_local() { Press the Enter key to reboot. EOM - whiptail --title "$whiptail_title" --msgbox "$message" 12 75 - reboot + if [[ -z "$TESTING" ]]; then + whiptail --title "$whiptail_title" --msgbox "$message" 12 75 + reboot + fi exit 0 } From 0aae107155a18b89f49a8f5048ade3af5a49e4d4 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 1 Sep 2023 20:30:53 -0400 Subject: [PATCH 073/417] ensure hostname is set --- setup/so-setup | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index c6ff27198..8537aa7c3 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -194,9 +194,7 @@ if [ -n "$test_profile" ]; then install_type=DESKTOP MSRVIP_OFFSET=-3 is_desktop_grid=true - fi - - if [[ -z "$HOSTNAME" ]]; then + else HOSTNAME=manager fi From 0cf913a7c145c8213fa39176409a77291150f66b Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sat, 2 Sep 2023 06:05:37 -0400 Subject: [PATCH 074/417] ensure hostname is set --- setup/so-setup | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 8537aa7c3..7c419fae2 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -174,7 +174,6 @@ if [ -n "$test_profile" ]; then # The below settings are hardcoded purely for automated testing purposes. TESTING=true - is_desktop_grid=false if [[ "$test_profile" =~ "-sensor" ]]; then install_type=SENSOR @@ -201,6 +200,9 @@ if [ -n "$test_profile" ]; then if [[ "$install_type" =~ "DESKTOP" ]]; then is_desktop=true HOSTNAME=desktop + if [[ -z "$is_desktop_grid" ]]; then + is_desktop_grid=false + fi fi info "Activating test profile; profile=$test_profile; install_type=$install_type" From 8e2bed7f91c93542777114325dfecdad7546b3ac Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sun, 3 Sep 2023 19:56:40 -0400 Subject: [PATCH 075/417] MS testing --- setup/so-setup | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-setup b/setup/so-setup index 7c419fae2..c1d92ec62 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -185,6 +185,7 @@ if [ -n "$test_profile" ]; then MSRVIP_OFFSET=-1 elif [[ "$test_profile" =~ "-managersearch" ]]; then install_type=MANAGERSEARCH + HOSTNAME=manager elif [[ "$test_profile" =~ "-heavynode" ]]; then install_type=HEAVYNODE HOSTNAME=sensor From cf19c8f8c2fa88adf5deb26ee1c466e332622f0e Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 5 Sep 2023 13:43:41 +0000 Subject: [PATCH 076/417] Remove templates --- .../logs-elastic_agent.apm_server@custom.json | 12 - ...logs-elastic_agent.apm_server@package.json | 329 --- .../logs-elastic_agent.auditbeat@custom.json | 12 - .../logs-elastic_agent.auditbeat@package.json | 329 --- .../logs-elastic_agent.cloudbeat@custom.json | 12 - .../logs-elastic_agent.cloudbeat@package.json | 339 --- ...lastic_agent.endpoint_security@custom.json | 12 - ...astic_agent.endpoint_security@package.json | 329 --- .../logs-elastic_agent.filebeat@custom.json | 12 - .../logs-elastic_agent.filebeat@package.json | 329 --- ...ogs-elastic_agent.fleet_server@custom.json | 12 - ...gs-elastic_agent.fleet_server@package.json | 329 --- .../logs-elastic_agent.heartbeat@custom.json | 12 - .../logs-elastic_agent.heartbeat@package.json | 329 --- .../logs-elastic_agent.metricbeat@custom.json | 12 - ...logs-elastic_agent.metricbeat@package.json | 329 --- ...logs-elastic_agent.osquerybeat@custom.json | 12 - ...ogs-elastic_agent.osquerybeat@package.json | 329 --- .../logs-elastic_agent.packetbeat@custom.json | 12 - ...logs-elastic_agent.packetbeat@package.json | 322 --- .../logs-system.application@custom.json | 12 - .../logs-system.application@package.json | 952 ------ .../logs-system.auth@custom.json | 12 - .../logs-system.auth@package.json | 530 ---- .../logs-system.security@custom.json | 12 - .../logs-system.security@package.json | 1840 ------------ .../logs-system.syslog@custom.json | 12 - .../logs-system.syslog@package.json | 327 --- .../logs-system.system@custom.json | 12 - .../logs-system.system@package.json | 986 ------- .../logs-windows.forwarded@custom.json | 12 - .../logs-windows.forwarded@package.json | 2544 ----------------- .../logs-windows.powershell@custom.json | 12 - .../logs-windows.powershell@package.json | 1335 --------- ...windows.powershell_operational@custom.json | 12 - ...indows.powershell_operational@package.json | 1334 --------- ...ogs-windows.sysmon_operational@custom.json | 12 - ...gs-windows.sysmon_operational@package.json | 1752 ------------ 38 files changed, 15121 deletions(-) delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.apm_server@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.apm_server@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.auditbeat@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.auditbeat@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.cloudbeat@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.cloudbeat@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.endpoint_security@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.endpoint_security@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.filebeat@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.filebeat@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.fleet_server@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.fleet_server@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.heartbeat@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.heartbeat@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.metricbeat@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.metricbeat@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.osquerybeat@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.osquerybeat@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.packetbeat@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.packetbeat@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.application@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.application@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.auth@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.auth@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.security@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.security@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.system@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-system.system@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-windows.forwarded@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-windows.forwarded@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell_operational@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell_operational@package.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-windows.sysmon_operational@custom.json delete mode 100644 salt/elasticsearch/templates/component/elastic-agent/logs-windows.sysmon_operational@package.json diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.apm_server@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.apm_server@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.apm_server@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.apm_server@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.apm_server@package.json deleted file mode 100644 index 919763caa..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.apm_server@package.json +++ /dev/null @@ -1,329 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent.apm_server-1.7.0", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version" - ] - } - } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } - } - }, - "event": { - "properties": { - "dataset": { - "type": "constant_keyword" - } - } - }, - "message": { - "type": "text" - } - } - } - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.auditbeat@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.auditbeat@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.auditbeat@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.auditbeat@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.auditbeat@package.json deleted file mode 100644 index 175ad4431..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.auditbeat@package.json +++ /dev/null @@ -1,329 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent.auditbeat-1.7.0", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version" - ] - } - } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } - } - }, - "event": { - "properties": { - "dataset": { - "type": "constant_keyword" - } - } - }, - "message": { - "type": "text" - } - } - } - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.cloudbeat@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.cloudbeat@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.cloudbeat@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.cloudbeat@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.cloudbeat@package.json deleted file mode 100644 index a96480471..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.cloudbeat@package.json +++ /dev/null @@ -1,339 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent.cloudbeat-1.7.0", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "decision_id", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version" - ] - } - } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } - } - }, - "message": { - "type": "match_only_text" - }, - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "result": { - "type": "object" - }, - "input": { - "type": "object" - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "decision_id": { - "type": "text" - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "dataset": { - "type": "constant_keyword" - } - } - } - } - } - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.endpoint_security@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.endpoint_security@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.endpoint_security@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.endpoint_security@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.endpoint_security@package.json deleted file mode 100644 index 5f16d18de..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.endpoint_security@package.json +++ /dev/null @@ -1,329 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent.endpoint_security-1.7.0", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version" - ] - } - } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } - } - }, - "event": { - "properties": { - "dataset": { - "type": "constant_keyword" - } - } - }, - "message": { - "type": "text" - } - } - } - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.filebeat@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.filebeat@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.filebeat@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.filebeat@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.filebeat@package.json deleted file mode 100644 index f5b1ab12a..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.filebeat@package.json +++ /dev/null @@ -1,329 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent.filebeat-1.7.0", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version" - ] - } - } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } - } - }, - "event": { - "properties": { - "dataset": { - "type": "constant_keyword" - } - } - }, - "message": { - "type": "text" - } - } - } - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.fleet_server@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.fleet_server@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.fleet_server@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.fleet_server@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.fleet_server@package.json deleted file mode 100644 index a61d9f7a9..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.fleet_server@package.json +++ /dev/null @@ -1,329 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent.fleet_server-1.7.0", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version" - ] - } - } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } - } - }, - "event": { - "properties": { - "dataset": { - "type": "constant_keyword" - } - } - }, - "message": { - "type": "text" - } - } - } - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.heartbeat@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.heartbeat@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.heartbeat@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.heartbeat@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.heartbeat@package.json deleted file mode 100644 index d7e244dc2..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.heartbeat@package.json +++ /dev/null @@ -1,329 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent.heartbeat-1.7.0", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version" - ] - } - } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } - } - }, - "message": { - "type": "text" - }, - "event": { - "properties": { - "dataset": { - "type": "constant_keyword" - } - } - } - } - } - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.metricbeat@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.metricbeat@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.metricbeat@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.metricbeat@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.metricbeat@package.json deleted file mode 100644 index 7b0c81283..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.metricbeat@package.json +++ /dev/null @@ -1,329 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent.metricbeat-1.7.0", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version" - ] - } - } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } - } - }, - "event": { - "properties": { - "dataset": { - "type": "constant_keyword" - } - } - }, - "message": { - "type": "text" - } - } - } - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.osquerybeat@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.osquerybeat@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.osquerybeat@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.osquerybeat@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.osquerybeat@package.json deleted file mode 100644 index 2a6780e69..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.osquerybeat@package.json +++ /dev/null @@ -1,329 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent.osquerybeat-1.7.0", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version" - ] - } - } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } - } - }, - "event": { - "properties": { - "dataset": { - "type": "constant_keyword" - } - } - }, - "message": { - "type": "text" - } - } - } - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.packetbeat@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.packetbeat@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.packetbeat@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.packetbeat@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.packetbeat@package.json deleted file mode 100644 index 973427be1..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent.packetbeat@package.json +++ /dev/null @@ -1,322 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-elastic_agent.packetbeat-1.7.0", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "ecs.version", - "agent.build.original", - "agent.ephemeral_id", - "agent.id", - "agent.name", - "agent.type", - "agent.version", - "log.level", - "message", - "elastic_agent.id", - "elastic_agent.process", - "elastic_agent.version" - ] - } - } - }, - "mappings": { - "dynamic": false, - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "agent": { - "properties": { - "build": { - "properties": { - "original": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "ephemeral_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "elastic_agent": { - "properties": { - "process": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "snapshot": { - "type": "boolean" - } - } - }, - "message": { - "type": "text" - } - } - } - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.application@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.application@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-system.application@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.application@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.application@package.json deleted file mode 100644 index 05741a4f0..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-system.application@package.json +++ /dev/null @@ -1,952 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-system.application-1.6.4", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "event.code", - "event.original", - "error.message", - "message", - "winlog.api", - "winlog.activity_id", - "winlog.computer_name", - "winlog.event_data.AuthenticationPackageName", - "winlog.event_data.Binary", - "winlog.event_data.BitlockerUserInputTime", - "winlog.event_data.BootMode", - "winlog.event_data.BootType", - "winlog.event_data.BuildVersion", - "winlog.event_data.Company", - "winlog.event_data.CorruptionActionState", - "winlog.event_data.CreationUtcTime", - "winlog.event_data.Description", - "winlog.event_data.Detail", - "winlog.event_data.DeviceName", - "winlog.event_data.DeviceNameLength", - "winlog.event_data.DeviceTime", - "winlog.event_data.DeviceVersionMajor", - "winlog.event_data.DeviceVersionMinor", - "winlog.event_data.DriveName", - "winlog.event_data.DriverName", - "winlog.event_data.DriverNameLength", - "winlog.event_data.DwordVal", - "winlog.event_data.EntryCount", - "winlog.event_data.ExtraInfo", - "winlog.event_data.FailureName", - "winlog.event_data.FailureNameLength", - "winlog.event_data.FileVersion", - "winlog.event_data.FinalStatus", - "winlog.event_data.Group", - "winlog.event_data.IdleImplementation", - "winlog.event_data.IdleStateCount", - "winlog.event_data.ImpersonationLevel", - "winlog.event_data.IntegrityLevel", - "winlog.event_data.IpAddress", - "winlog.event_data.IpPort", - "winlog.event_data.KeyLength", - "winlog.event_data.LastBootGood", - "winlog.event_data.LastShutdownGood", - "winlog.event_data.LmPackageName", - "winlog.event_data.LogonGuid", - "winlog.event_data.LogonId", - "winlog.event_data.LogonProcessName", - "winlog.event_data.LogonType", - "winlog.event_data.MajorVersion", - "winlog.event_data.MaximumPerformancePercent", - "winlog.event_data.MemberName", - "winlog.event_data.MemberSid", - "winlog.event_data.MinimumPerformancePercent", - "winlog.event_data.MinimumThrottlePercent", - "winlog.event_data.MinorVersion", - "winlog.event_data.NewProcessId", - "winlog.event_data.NewProcessName", - "winlog.event_data.NewSchemeGuid", - "winlog.event_data.NewTime", - "winlog.event_data.NominalFrequency", - "winlog.event_data.Number", - "winlog.event_data.OldSchemeGuid", - "winlog.event_data.OldTime", - "winlog.event_data.OriginalFileName", - "winlog.event_data.Path", - "winlog.event_data.PerformanceImplementation", - "winlog.event_data.PreviousCreationUtcTime", - "winlog.event_data.PreviousTime", - "winlog.event_data.PrivilegeList", - "winlog.event_data.ProcessId", - "winlog.event_data.ProcessName", - "winlog.event_data.ProcessPath", - "winlog.event_data.ProcessPid", - "winlog.event_data.Product", - "winlog.event_data.PuaCount", - "winlog.event_data.PuaPolicyId", - "winlog.event_data.QfeVersion", - "winlog.event_data.Reason", - "winlog.event_data.SchemaVersion", - "winlog.event_data.ScriptBlockText", - "winlog.event_data.ServiceName", - "winlog.event_data.ServiceVersion", - "winlog.event_data.ShutdownActionType", - "winlog.event_data.ShutdownEventCode", - "winlog.event_data.ShutdownReason", - "winlog.event_data.Signature", - "winlog.event_data.SignatureStatus", - "winlog.event_data.Signed", - "winlog.event_data.StartTime", - "winlog.event_data.State", - "winlog.event_data.Status", - "winlog.event_data.StopTime", - "winlog.event_data.SubjectDomainName", - "winlog.event_data.SubjectLogonId", - "winlog.event_data.SubjectUserName", - "winlog.event_data.SubjectUserSid", - "winlog.event_data.TSId", - "winlog.event_data.TargetDomainName", - "winlog.event_data.TargetInfo", - "winlog.event_data.TargetLogonGuid", - "winlog.event_data.TargetLogonId", - "winlog.event_data.TargetServerName", - "winlog.event_data.TargetUserName", - "winlog.event_data.TargetUserSid", - "winlog.event_data.TerminalSessionId", - "winlog.event_data.TokenElevationType", - "winlog.event_data.TransmittedServices", - "winlog.event_data.UserSid", - "winlog.event_data.Version", - "winlog.event_data.Workstation", - "winlog.event_data.param1", - "winlog.event_data.param2", - "winlog.event_data.param3", - "winlog.event_data.param4", - "winlog.event_data.param5", - "winlog.event_data.param6", - "winlog.event_data.param7", - "winlog.event_data.param8", - "winlog.event_id", - "winlog.keywords", - "winlog.channel", - "winlog.record_id", - "winlog.related_activity_id", - "winlog.opcode", - "winlog.provider_guid", - "winlog.provider_name", - "winlog.task", - "winlog.user.identifier", - "winlog.user.name", - "winlog.user.domain", - "winlog.user.type" - ] - } - } - }, - "mappings": { - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - }, - { - "winlog.user_data": { - "path_match": "winlog.user_data.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "winlog": { - "properties": { - "related_activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "computer_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "process": { - "properties": { - "pid": { - "type": "long" - }, - "thread": { - "properties": { - "id": { - "type": "long" - } - } - } - } - }, - "keywords": { - "ignore_above": 1024, - "type": "keyword" - }, - "channel": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_data": { - "properties": { - "SignatureStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "OriginalFileName": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootMode": { - "ignore_above": 1024, - "type": "keyword" - }, - "Product": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "FileVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "StopTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Status": { - "ignore_above": 1024, - "type": "keyword" - }, - "CorruptionActionState": { - "ignore_above": 1024, - "type": "keyword" - }, - "KeyLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousCreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "PerformanceImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Group": { - "ignore_above": 1024, - "type": "keyword" - }, - "Description": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownActionType": { - "ignore_above": 1024, - "type": "keyword" - }, - "DwordVal": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMajor": { - "ignore_above": 1024, - "type": "keyword" - }, - "ScriptBlockText": { - "ignore_above": 1024, - "type": "keyword" - }, - "TransmittedServices": { - "ignore_above": 1024, - "type": "keyword" - }, - "MaximumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "FinalStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleStateCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "MajorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "Path": { - "ignore_above": 1024, - "type": "keyword" - }, - "SchemaVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "TokenElevationType": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "QfeVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMinor": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpAddress": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Company": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaPolicyId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IntegrityLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastShutdownGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpPort": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "LmPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastBootGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Version": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signed": { - "ignore_above": 1024, - "type": "keyword" - }, - "StartTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownEventCode": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "State": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootType": { - "ignore_above": 1024, - "type": "keyword" - }, - "Binary": { - "ignore_above": 1024, - "type": "keyword" - }, - "ImpersonationLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Detail": { - "ignore_above": 1024, - "type": "keyword" - }, - "TerminalSessionId": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "CreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Reason": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownReason": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetServerName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Number": { - "ignore_above": 1024, - "type": "keyword" - }, - "BuildVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TSId": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PrivilegeList": { - "ignore_above": 1024, - "type": "keyword" - }, - "param7": { - "ignore_above": 1024, - "type": "keyword" - }, - "param8": { - "ignore_above": 1024, - "type": "keyword" - }, - "param5": { - "ignore_above": 1024, - "type": "keyword" - }, - "param6": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriveName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonType": { - "ignore_above": 1024, - "type": "keyword" - }, - "ExtraInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "param3": { - "ignore_above": 1024, - "type": "keyword" - }, - "param4": { - "ignore_above": 1024, - "type": "keyword" - }, - "param1": { - "ignore_above": 1024, - "type": "keyword" - }, - "param2": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "Workstation": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signature": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumThrottlePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "EntryCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "BitlockerUserInputTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuthenticationPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NominalFrequency": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "opcode": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "type": "long" - }, - "record_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "task": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_guid": { - "ignore_above": 1024, - "type": "keyword" - }, - "activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "api": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "user": { - "properties": { - "identifier": { - "ignore_above": 1024, - "type": "keyword" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "ingested": { - "type": "date" - }, - "code": { - "ignore_above": 1024, - "type": "keyword" - }, - "original": { - "ignore_above": 1024, - "type": "keyword" - }, - "created": { - "type": "date" - }, - "module": { - "type": "constant_keyword", - "value": "system" - }, - "dataset": { - "type": "constant_keyword", - "value": "system.application" - } - } - }, - "error": { - "properties": { - "message": { - "type": "match_only_text" - } - } - }, - "message": { - "type": "match_only_text" - } - } - } - }, - "_meta": { - "package": { - "name": "system" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.auth@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.auth@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-system.auth@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.auth@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.auth@package.json deleted file mode 100644 index 51e707850..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-system.auth@package.json +++ /dev/null @@ -1,530 +0,0 @@ -{ - "template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-system.auth-1.6.4", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.os.full", - "host.type", - "event.action", - "event.category", - "event.code", - "event.kind", - "event.outcome", - "event.provider", - "event.type", - "ecs.version", - "error.message", - "group.id", - "group.name", - "message", - "process.name", - "related.hosts", - "related.user", - "source.as.organization.name", - "source.geo.city_name", - "source.geo.continent_name", - "source.geo.country_iso_code", - "source.geo.country_name", - "source.geo.region_iso_code", - "source.geo.region_name", - "user.effective.name", - "user.id", - "user.name", - "system.auth.ssh.method", - "system.auth.ssh.signature", - "system.auth.ssh.event", - "system.auth.sudo.error", - "system.auth.sudo.tty", - "system.auth.sudo.pwd", - "system.auth.sudo.user", - "system.auth.sudo.command", - "system.auth.useradd.home", - "system.auth.useradd.shell", - "version" - ] - } - } - }, - "mappings": { - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "process": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "pid": { - "type": "long" - } - } - }, - "source": { - "properties": { - "geo": { - "properties": { - "continent_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "region_iso_code": { - "ignore_above": 1024, - "type": "keyword" - }, - "city_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "country_iso_code": { - "ignore_above": 1024, - "type": "keyword" - }, - "country_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "location": { - "type": "geo_point" - }, - "region_name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "as": { - "properties": { - "number": { - "type": "long" - }, - "organization": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "port": { - "type": "long" - }, - "ip": { - "type": "ip" - } - } - }, - "error": { - "properties": { - "message": { - "type": "match_only_text" - } - } - }, - "message": { - "type": "match_only_text" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "@timestamp": { - "type": "date" - }, - "system": { - "properties": { - "auth": { - "properties": { - "ssh": { - "properties": { - "method": { - "ignore_above": 1024, - "type": "keyword" - }, - "dropped_ip": { - "type": "ip" - }, - "signature": { - "ignore_above": 1024, - "type": "keyword" - }, - "event": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "sudo": { - "properties": { - "tty": { - "ignore_above": 1024, - "type": "keyword" - }, - "error": { - "ignore_above": 1024, - "type": "keyword" - }, - "pwd": { - "ignore_above": 1024, - "type": "keyword" - }, - "user": { - "ignore_above": 1024, - "type": "keyword" - }, - "command": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "useradd": { - "properties": { - "shell": { - "ignore_above": 1024, - "type": "keyword" - }, - "home": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - } - } - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "related": { - "properties": { - "hosts": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "user": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword", - "value": "logs" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - }, - "full": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "sequence": { - "type": "long" - }, - "ingested": { - "type": "date" - }, - "code": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "created": { - "type": "date" - }, - "kind": { - "ignore_above": 1024, - "type": "keyword" - }, - "module": { - "type": "constant_keyword", - "value": "system" - }, - "action": { - "ignore_above": 1024, - "type": "keyword" - }, - "category": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "dataset": { - "type": "constant_keyword", - "value": "system.auth" - }, - "outcome": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "user": { - "properties": { - "effective": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "group": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - } - }, - "_meta": { - "package": { - "name": "system" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.security@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.security@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-system.security@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.security@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.security@package.json deleted file mode 100644 index a74cd4a70..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-system.security@package.json +++ /dev/null @@ -1,1840 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-system.security-1.6.4", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "event.action", - "event.category", - "event.code", - "event.kind", - "event.outcome", - "event.provider", - "event.type", - "tags", - "input.type", - "ecs.version", - "group.domain", - "group.id", - "group.name", - "log.file.path", - "log.level", - "message", - "process.args", - "process.command_line", - "process.entity_id", - "process.executable", - "process.name", - "process.parent.executable", - "process.parent.name", - "process.title", - "related.hash", - "related.hosts", - "related.user", - "service.name", - "service.type", - "source.domain", - "user.domain", - "user.id", - "user.name", - "user.effective.domain", - "user.effective.id", - "user.effective.name", - "user.target.group.domain", - "user.target.group.id", - "user.target.group.name", - "user.target.name", - "user.target.domain", - "user.target.id", - "user.changes.name", - "winlog.logon.type", - "winlog.logon.id", - "winlog.logon.failure.reason", - "winlog.logon.failure.status", - "winlog.logon.failure.sub_status", - "winlog.api", - "winlog.activity_id", - "winlog.channel", - "winlog.computer_name", - "winlog.computerObject.domain", - "winlog.computerObject.id", - "winlog.computerObject.name", - "winlog.event_data.AccessGranted", - "winlog.event_data.AccessList", - "winlog.event_data.AccessListDescription", - "winlog.event_data.AccessMask", - "winlog.event_data.AccessMaskDescription", - "winlog.event_data.AccessRemoved", - "winlog.event_data.AccountDomain", - "winlog.event_data.AccountExpires", - "winlog.event_data.AccountName", - "winlog.event_data.AllowedToDelegateTo", - "winlog.event_data.AuditPolicyChanges", - "winlog.event_data.AuditPolicyChangesDescription", - "winlog.event_data.AuditSourceName", - "winlog.event_data.AuthenticationPackageName", - "winlog.event_data.Binary", - "winlog.event_data.BitlockerUserInputTime", - "winlog.event_data.BootMode", - "winlog.event_data.BootType", - "winlog.event_data.BuildVersion", - "winlog.event_data.CallerProcessId", - "winlog.event_data.CallerProcessName", - "winlog.event_data.Category", - "winlog.event_data.CategoryId", - "winlog.event_data.ClientAddress", - "winlog.event_data.ClientName", - "winlog.event_data.CommandLine", - "winlog.event_data.Company", - "winlog.event_data.CorruptionActionState", - "winlog.event_data.CrashOnAuditFailValue", - "winlog.event_data.CreationUtcTime", - "winlog.event_data.Description", - "winlog.event_data.Detail", - "winlog.event_data.DeviceName", - "winlog.event_data.DeviceNameLength", - "winlog.event_data.DeviceTime", - "winlog.event_data.DeviceVersionMajor", - "winlog.event_data.DeviceVersionMinor", - "winlog.event_data.DisplayName", - "winlog.event_data.DomainBehaviorVersion", - "winlog.event_data.DomainName", - "winlog.event_data.DomainPolicyChanged", - "winlog.event_data.DomainSid", - "winlog.event_data.DriveName", - "winlog.event_data.DriverName", - "winlog.event_data.DriverNameLength", - "winlog.event_data.Dummy", - "winlog.event_data.DwordVal", - "winlog.event_data.EntryCount", - "winlog.event_data.EventSourceId", - "winlog.event_data.ExtraInfo", - "winlog.event_data.FailureName", - "winlog.event_data.FailureNameLength", - "winlog.event_data.FailureReason", - "winlog.event_data.FileVersion", - "winlog.event_data.FinalStatus", - "winlog.event_data.Group", - "winlog.event_data.GroupTypeChange", - "winlog.event_data.HandleId", - "winlog.event_data.HomeDirectory", - "winlog.event_data.HomePath", - "winlog.event_data.IdleImplementation", - "winlog.event_data.IdleStateCount", - "winlog.event_data.ImpersonationLevel", - "winlog.event_data.IntegrityLevel", - "winlog.event_data.IpAddress", - "winlog.event_data.IpPort", - "winlog.event_data.KerberosPolicyChange", - "winlog.event_data.KeyLength", - "winlog.event_data.LastBootGood", - "winlog.event_data.LastShutdownGood", - "winlog.event_data.LmPackageName", - "winlog.event_data.LogonGuid", - "winlog.event_data.LogonHours", - "winlog.event_data.LogonId", - "winlog.event_data.LogonID", - "winlog.event_data.LogonProcessName", - "winlog.event_data.LogonType", - "winlog.event_data.MachineAccountQuota", - "winlog.event_data.MajorVersion", - "winlog.event_data.MandatoryLabel", - "winlog.event_data.MaximumPerformancePercent", - "winlog.event_data.MemberName", - "winlog.event_data.MemberSid", - "winlog.event_data.MinimumPerformancePercent", - "winlog.event_data.MinimumThrottlePercent", - "winlog.event_data.MinorVersion", - "winlog.event_data.MixedDomainMode", - "winlog.event_data.NewProcessId", - "winlog.event_data.NewProcessName", - "winlog.event_data.NewSchemeGuid", - "winlog.event_data.NewSd", - "winlog.event_data.NewSdDacl0", - "winlog.event_data.NewSdDacl1", - "winlog.event_data.NewSdDacl2", - "winlog.event_data.NewSdSacl0", - "winlog.event_data.NewSdSacl1", - "winlog.event_data.NewSdSacl2", - "winlog.event_data.NewTargetUserName", - "winlog.event_data.NewTime", - "winlog.event_data.NewUACList", - "winlog.event_data.NewUacValue", - "winlog.event_data.NominalFrequency", - "winlog.event_data.Number", - "winlog.event_data.ObjectName", - "winlog.event_data.ObjectServer", - "winlog.event_data.ObjectType", - "winlog.event_data.OemInformation", - "winlog.event_data.OldSchemeGuid", - "winlog.event_data.OldSd", - "winlog.event_data.OldSdDacl0", - "winlog.event_data.OldSdDacl1", - "winlog.event_data.OldSdDacl2", - "winlog.event_data.OldSdSacl0", - "winlog.event_data.OldSdSacl1", - "winlog.event_data.OldSdSacl2", - "winlog.event_data.OldTargetUserName", - "winlog.event_data.OldTime", - "winlog.event_data.OldUacValue", - "winlog.event_data.OriginalFileName", - "winlog.event_data.PackageName", - "winlog.event_data.PasswordLastSet", - "winlog.event_data.PasswordHistoryLength", - "winlog.event_data.Path", - "winlog.event_data.ParentProcessName", - "winlog.event_data.PerformanceImplementation", - "winlog.event_data.PreviousCreationUtcTime", - "winlog.event_data.PreAuthType", - "winlog.event_data.PreviousTime", - "winlog.event_data.PrimaryGroupId", - "winlog.event_data.PrivilegeList", - "winlog.event_data.ProcessId", - "winlog.event_data.ProcessName", - "winlog.event_data.ProcessPath", - "winlog.event_data.ProcessPid", - "winlog.event_data.Product", - "winlog.event_data.ProfilePath", - "winlog.event_data.PuaCount", - "winlog.event_data.PuaPolicyId", - "winlog.event_data.QfeVersion", - "winlog.event_data.Reason", - "winlog.event_data.ResourceAttributes", - "winlog.event_data.SamAccountName", - "winlog.event_data.SchemaVersion", - "winlog.event_data.ScriptPath", - "winlog.event_data.SidHistory", - "winlog.event_data.ScriptBlockText", - "winlog.event_data.Service", - "winlog.event_data.ServiceAccount", - "winlog.event_data.ServiceFileName", - "winlog.event_data.ServiceName", - "winlog.event_data.ServiceSid", - "winlog.event_data.ServiceStartType", - "winlog.event_data.ServiceType", - "winlog.event_data.ServiceVersion", - "winlog.event_data.SessionName", - "winlog.event_data.ShutdownActionType", - "winlog.event_data.ShutdownEventCode", - "winlog.event_data.ShutdownReason", - "winlog.event_data.SidFilteringEnabled", - "winlog.event_data.Signature", - "winlog.event_data.SignatureStatus", - "winlog.event_data.Signed", - "winlog.event_data.StartTime", - "winlog.event_data.State", - "winlog.event_data.Status", - "winlog.event_data.StatusDescription", - "winlog.event_data.StopTime", - "winlog.event_data.SubCategory", - "winlog.event_data.SubCategoryGuid", - "winlog.event_data.SubcategoryGuid", - "winlog.event_data.SubCategoryId", - "winlog.event_data.SubcategoryId", - "winlog.event_data.SubjectDomainName", - "winlog.event_data.SubjectLogonId", - "winlog.event_data.SubjectUserName", - "winlog.event_data.SubjectUserSid", - "winlog.event_data.SubStatus", - "winlog.event_data.TSId", - "winlog.event_data.TargetDomainName", - "winlog.event_data.TargetInfo", - "winlog.event_data.TargetLogonGuid", - "winlog.event_data.TargetLogonId", - "winlog.event_data.TargetServerName", - "winlog.event_data.TargetSid", - "winlog.event_data.TargetUserName", - "winlog.event_data.TargetUserSid", - "winlog.event_data.TdoAttributes", - "winlog.event_data.TdoDirection", - "winlog.event_data.TdoType", - "winlog.event_data.TerminalSessionId", - "winlog.event_data.TicketEncryptionType", - "winlog.event_data.TicketEncryptionTypeDescription", - "winlog.event_data.TicketOptions", - "winlog.event_data.TicketOptionsDescription", - "winlog.event_data.TokenElevationType", - "winlog.event_data.TransmittedServices", - "winlog.event_data.UserAccountControl", - "winlog.event_data.UserParameters", - "winlog.event_data.UserPrincipalName", - "winlog.event_data.UserSid", - "winlog.event_data.UserWorkstations", - "winlog.event_data.Version", - "winlog.event_data.Workstation", - "winlog.event_data.WorkstationName", - "winlog.event_data.param1", - "winlog.event_data.param2", - "winlog.event_data.param3", - "winlog.event_data.param4", - "winlog.event_data.param5", - "winlog.event_data.param6", - "winlog.event_data.param7", - "winlog.event_data.param8", - "winlog.event_id", - "winlog.keywords", - "winlog.level", - "winlog.outcome", - "winlog.record_id", - "winlog.related_activity_id", - "winlog.opcode", - "winlog.provider_guid", - "winlog.provider_name", - "winlog.task", - "winlog.time_created", - "winlog.trustAttribute", - "winlog.trustDirection", - "winlog.trustType", - "winlog.user_data.BackupPath", - "winlog.user_data.Channel", - "winlog.user_data.SubjectDomainName", - "winlog.user_data.SubjectLogonId", - "winlog.user_data.SubjectUserName", - "winlog.user_data.SubjectUserSid", - "winlog.user_data.xml_name", - "winlog.user.identifier", - "winlog.user.name", - "winlog.user.domain", - "winlog.user.type" - ] - } - } - }, - "mappings": { - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "process": { - "properties": { - "args": { - "ignore_above": 1024, - "type": "keyword" - }, - "parent": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "executable": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "pid": { - "type": "long" - }, - "args_count": { - "type": "long" - }, - "entity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "title": { - "ignore_above": 1024, - "type": "keyword" - }, - "command_line": { - "ignore_above": 1024, - "type": "wildcard" - }, - "executable": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "winlog": { - "properties": { - "related_activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "keywords": { - "ignore_above": 1024, - "type": "keyword" - }, - "logon": { - "properties": { - "failure": { - "properties": { - "reason": { - "ignore_above": 1024, - "type": "keyword" - }, - "sub_status": { - "ignore_above": 1024, - "type": "keyword" - }, - "status": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "channel": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_data": { - "properties": { - "ProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "OriginalFileName": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootMode": { - "ignore_above": 1024, - "type": "keyword" - }, - "Product": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonHours": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "FileVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "TicketOptions": { - "ignore_above": 1024, - "type": "keyword" - }, - "AllowedToDelegateTo": { - "ignore_above": 1024, - "type": "keyword" - }, - "TdoAttributes": { - "ignore_above": 1024, - "type": "keyword" - }, - "StopTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Status": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccessMask": { - "ignore_above": 1024, - "type": "keyword" - }, - "KeyLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "ResourceAttributes": { - "ignore_above": 1024, - "type": "keyword" - }, - "SessionName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PasswordHistoryLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSd": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Group": { - "ignore_above": 1024, - "type": "keyword" - }, - "PackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownActionType": { - "ignore_above": 1024, - "type": "keyword" - }, - "DwordVal": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMajor": { - "ignore_above": 1024, - "type": "keyword" - }, - "SidHistory": { - "ignore_above": 1024, - "type": "keyword" - }, - "TransmittedServices": { - "ignore_above": 1024, - "type": "keyword" - }, - "WorkstationName": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleStateCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Path": { - "ignore_above": 1024, - "type": "keyword" - }, - "SchemaVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "CrashOnAuditFailValue": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMinor": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "HandleId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpAddress": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastShutdownGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpPort": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "LmPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastBootGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccessListDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Version": { - "ignore_above": 1024, - "type": "keyword" - }, - "MachineAccountQuota": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldUacValue": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserParameters": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signed": { - "ignore_above": 1024, - "type": "keyword" - }, - "StartTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubCategoryId": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldTargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewUacValue": { - "ignore_above": 1024, - "type": "keyword" - }, - "CallerProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProfilePath": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "State": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureReason": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootType": { - "ignore_above": 1024, - "type": "keyword" - }, - "Binary": { - "ignore_above": 1024, - "type": "keyword" - }, - "ImpersonationLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DomainPolicyChanged": { - "ignore_above": 1024, - "type": "keyword" - }, - "CategoryId": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreAuthType": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccountDomain": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewUACList": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubcategoryGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownReason": { - "ignore_above": 1024, - "type": "keyword" - }, - "SidFilteringEnabled": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetServerName": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuditPolicyChanges": { - "ignore_above": 1024, - "type": "keyword" - }, - "Number": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "EventSourceId": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriveName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonType": { - "ignore_above": 1024, - "type": "keyword" - }, - "ExtraInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "PrimaryGroupId": { - "ignore_above": 1024, - "type": "keyword" - }, - "ObjectName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "Workstation": { - "ignore_above": 1024, - "type": "keyword" - }, - "PasswordLastSet": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumThrottlePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "GroupTypeChange": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccessList": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuthenticationPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NominalFrequency": { - "ignore_above": 1024, - "type": "keyword" - }, - "SignatureStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "DomainSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "ScriptPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "TicketEncryptionType": { - "ignore_above": 1024, - "type": "keyword" - }, - "TicketOptionsDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceType": { - "ignore_above": 1024, - "type": "keyword" - }, - "ObjectServer": { - "ignore_above": 1024, - "type": "keyword" - }, - "HomePath": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserWorkstations": { - "ignore_above": 1024, - "type": "keyword" - }, - "SamAccountName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "CorruptionActionState": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuditSourceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubCategoryGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousCreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuditPolicyChangesDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccessMaskDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccountName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PerformanceImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "TicketEncryptionTypeDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceAccount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Description": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPid": { - "ignore_above": 1024, - "type": "keyword" - }, - "ScriptBlockText": { - "ignore_above": 1024, - "type": "keyword" - }, - "ObjectType": { - "ignore_above": 1024, - "type": "keyword" - }, - "MaximumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "KerberosPolicyChange": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "FinalStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "MajorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "MandatoryLabel": { - "ignore_above": 1024, - "type": "keyword" - }, - "HomeDirectory": { - "ignore_above": 1024, - "type": "keyword" - }, - "TokenElevationType": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "QfeVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccountExpires": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceStartType": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserPrincipalName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdSacl1": { - "ignore_above": 1024, - "type": "keyword" - }, - "Dummy": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdSacl0": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdSacl2": { - "ignore_above": 1024, - "type": "keyword" - }, - "Company": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaPolicyId": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdSacl2": { - "ignore_above": 1024, - "type": "keyword" - }, - "IntegrityLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdSacl1": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdSacl0": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSd": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewTargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "ClientName": { - "ignore_above": 1024, - "type": "keyword" - }, - "StatusDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdDacl0": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdDacl2": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdDacl1": { - "ignore_above": 1024, - "type": "keyword" - }, - "DomainBehaviorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccessGranted": { - "ignore_above": 1024, - "type": "keyword" - }, - "ParentProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubcategoryId": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccessRemoved": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownEventCode": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "MixedDomainMode": { - "ignore_above": 1024, - "type": "keyword" - }, - "Detail": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdDacl1": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdDacl0": { - "ignore_above": 1024, - "type": "keyword" - }, - "Category": { - "ignore_above": 1024, - "type": "keyword" - }, - "TerminalSessionId": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdDacl2": { - "ignore_above": 1024, - "type": "keyword" - }, - "ClientAddress": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "CreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "CallerProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TdoType": { - "ignore_above": 1024, - "type": "keyword" - }, - "Reason": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceFileName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DisplayName": { - "ignore_above": 1024, - "type": "keyword" - }, - "BuildVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TSId": { - "ignore_above": 1024, - "type": "keyword" - }, - "PrivilegeList": { - "ignore_above": 1024, - "type": "keyword" - }, - "param7": { - "ignore_above": 1024, - "type": "keyword" - }, - "param8": { - "ignore_above": 1024, - "type": "keyword" - }, - "param5": { - "ignore_above": 1024, - "type": "keyword" - }, - "param6": { - "ignore_above": 1024, - "type": "keyword" - }, - "Service": { - "ignore_above": 1024, - "type": "keyword" - }, - "TdoDirection": { - "ignore_above": 1024, - "type": "keyword" - }, - "param3": { - "ignore_above": 1024, - "type": "keyword" - }, - "param4": { - "ignore_above": 1024, - "type": "keyword" - }, - "param1": { - "ignore_above": 1024, - "type": "keyword" - }, - "param2": { - "ignore_above": 1024, - "type": "keyword" - }, - "CommandLine": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserAccountControl": { - "ignore_above": 1024, - "type": "keyword" - }, - "OemInformation": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signature": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubCategory": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "EntryCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonID": { - "ignore_above": 1024, - "type": "keyword" - }, - "BitlockerUserInputTime": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "opcode": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_guid": { - "ignore_above": 1024, - "type": "keyword" - }, - "activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "time_created": { - "ignore_above": 1024, - "type": "keyword" - }, - "trustDirection": { - "ignore_above": 1024, - "type": "keyword" - }, - "api": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "outcome": { - "ignore_above": 1024, - "type": "keyword" - }, - "computer_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "process": { - "properties": { - "pid": { - "type": "long" - }, - "thread": { - "properties": { - "id": { - "type": "long" - } - } - } - } - }, - "trustAttribute": { - "ignore_above": 1024, - "type": "keyword" - }, - "level": { - "ignore_above": 1024, - "type": "keyword" - }, - "computerObject": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "user_data": { - "properties": { - "SubjectUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "BackupPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "Channel": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "xml_name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "version": { - "type": "long" - }, - "record_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "task": { - "ignore_above": 1024, - "type": "keyword" - }, - "trustType": { - "ignore_above": 1024, - "type": "keyword" - }, - "user": { - "properties": { - "identifier": { - "ignore_above": 1024, - "type": "keyword" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "log": { - "properties": { - "file": { - "properties": { - "path": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "source": { - "properties": { - "port": { - "type": "long" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - } - } - }, - "message": { - "type": "match_only_text" - }, - "tags": { - "ignore_above": 1024, - "type": "keyword" - }, - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "input": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "related": { - "properties": { - "hosts": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "user": { - "ignore_above": 1024, - "type": "keyword" - }, - "hash": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword", - "value": "logs" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "service": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "sequence": { - "type": "long" - }, - "ingested": { - "type": "date" - }, - "code": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "created": { - "type": "date" - }, - "kind": { - "ignore_above": 1024, - "type": "keyword" - }, - "module": { - "type": "constant_keyword", - "value": "system" - }, - "action": { - "ignore_above": 1024, - "type": "keyword" - }, - "category": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "dataset": { - "type": "constant_keyword", - "value": "system.security" - }, - "outcome": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "user": { - "properties": { - "effective": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "changes": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "target": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "group": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - } - } - }, - "group": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - } - }, - "_meta": { - "package": { - "name": "system" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@package.json deleted file mode 100644 index 30576a635..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-system.syslog@package.json +++ /dev/null @@ -1,327 +0,0 @@ -{ - "template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-system.syslog-1.6.4", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.os.full", - "host.type", - "event.action", - "event.category", - "event.code", - "event.kind", - "event.outcome", - "event.provider", - "event.type", - "ecs.version", - "message", - "process.name" - ] - } - } - }, - "mappings": { - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "process": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "pid": { - "type": "long" - } - } - }, - "@timestamp": { - "type": "date" - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword", - "value": "logs" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - }, - "full": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "sequence": { - "type": "long" - }, - "ingested": { - "type": "date" - }, - "code": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "created": { - "type": "date" - }, - "kind": { - "ignore_above": 1024, - "type": "keyword" - }, - "module": { - "type": "constant_keyword", - "value": "system" - }, - "action": { - "ignore_above": 1024, - "type": "keyword" - }, - "category": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "dataset": { - "type": "constant_keyword", - "value": "system.syslog" - }, - "outcome": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "message": { - "type": "match_only_text" - } - } - } - }, - "_meta": { - "package": { - "name": "system" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.system@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.system@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-system.system@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-system.system@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-system.system@package.json deleted file mode 100644 index 068e6846b..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-system.system@package.json +++ /dev/null @@ -1,986 +0,0 @@ -{ - "template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-system.system-1.6.4", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "event.action", - "event.category", - "event.code", - "event.kind", - "event.original", - "event.outcome", - "event.provider", - "event.type", - "error.message", - "message", - "winlog.api", - "winlog.activity_id", - "winlog.computer_name", - "winlog.event_data.AuthenticationPackageName", - "winlog.event_data.Binary", - "winlog.event_data.BitlockerUserInputTime", - "winlog.event_data.BootMode", - "winlog.event_data.BootType", - "winlog.event_data.BuildVersion", - "winlog.event_data.Company", - "winlog.event_data.CorruptionActionState", - "winlog.event_data.CreationUtcTime", - "winlog.event_data.Description", - "winlog.event_data.Detail", - "winlog.event_data.DeviceName", - "winlog.event_data.DeviceNameLength", - "winlog.event_data.DeviceTime", - "winlog.event_data.DeviceVersionMajor", - "winlog.event_data.DeviceVersionMinor", - "winlog.event_data.DriveName", - "winlog.event_data.DriverName", - "winlog.event_data.DriverNameLength", - "winlog.event_data.DwordVal", - "winlog.event_data.EntryCount", - "winlog.event_data.ExtraInfo", - "winlog.event_data.FailureName", - "winlog.event_data.FailureNameLength", - "winlog.event_data.FileVersion", - "winlog.event_data.FinalStatus", - "winlog.event_data.Group", - "winlog.event_data.IdleImplementation", - "winlog.event_data.IdleStateCount", - "winlog.event_data.ImpersonationLevel", - "winlog.event_data.IntegrityLevel", - "winlog.event_data.IpAddress", - "winlog.event_data.IpPort", - "winlog.event_data.KeyLength", - "winlog.event_data.LastBootGood", - "winlog.event_data.LastShutdownGood", - "winlog.event_data.LmPackageName", - "winlog.event_data.LogonGuid", - "winlog.event_data.LogonId", - "winlog.event_data.LogonProcessName", - "winlog.event_data.LogonType", - "winlog.event_data.MajorVersion", - "winlog.event_data.MaximumPerformancePercent", - "winlog.event_data.MemberName", - "winlog.event_data.MemberSid", - "winlog.event_data.MinimumPerformancePercent", - "winlog.event_data.MinimumThrottlePercent", - "winlog.event_data.MinorVersion", - "winlog.event_data.NewProcessId", - "winlog.event_data.NewProcessName", - "winlog.event_data.NewSchemeGuid", - "winlog.event_data.NewTime", - "winlog.event_data.NominalFrequency", - "winlog.event_data.Number", - "winlog.event_data.OldSchemeGuid", - "winlog.event_data.OldTime", - "winlog.event_data.OriginalFileName", - "winlog.event_data.Path", - "winlog.event_data.PerformanceImplementation", - "winlog.event_data.PreviousCreationUtcTime", - "winlog.event_data.PreviousTime", - "winlog.event_data.PrivilegeList", - "winlog.event_data.ProcessId", - "winlog.event_data.ProcessName", - "winlog.event_data.ProcessPath", - "winlog.event_data.ProcessPid", - "winlog.event_data.Product", - "winlog.event_data.PuaCount", - "winlog.event_data.PuaPolicyId", - "winlog.event_data.QfeVersion", - "winlog.event_data.Reason", - "winlog.event_data.SchemaVersion", - "winlog.event_data.ScriptBlockText", - "winlog.event_data.ServiceName", - "winlog.event_data.ServiceVersion", - "winlog.event_data.ShutdownActionType", - "winlog.event_data.ShutdownEventCode", - "winlog.event_data.ShutdownReason", - "winlog.event_data.Signature", - "winlog.event_data.SignatureStatus", - "winlog.event_data.Signed", - "winlog.event_data.StartTime", - "winlog.event_data.State", - "winlog.event_data.Status", - "winlog.event_data.StopTime", - "winlog.event_data.SubjectDomainName", - "winlog.event_data.SubjectLogonId", - "winlog.event_data.SubjectUserName", - "winlog.event_data.SubjectUserSid", - "winlog.event_data.TSId", - "winlog.event_data.TargetDomainName", - "winlog.event_data.TargetInfo", - "winlog.event_data.TargetLogonGuid", - "winlog.event_data.TargetLogonId", - "winlog.event_data.TargetServerName", - "winlog.event_data.TargetUserName", - "winlog.event_data.TargetUserSid", - "winlog.event_data.TerminalSessionId", - "winlog.event_data.TokenElevationType", - "winlog.event_data.TransmittedServices", - "winlog.event_data.UserSid", - "winlog.event_data.Version", - "winlog.event_data.Workstation", - "winlog.event_data.param1", - "winlog.event_data.param2", - "winlog.event_data.param3", - "winlog.event_data.param4", - "winlog.event_data.param5", - "winlog.event_data.param6", - "winlog.event_data.param7", - "winlog.event_data.param8", - "winlog.event_id", - "winlog.keywords", - "winlog.channel", - "winlog.record_id", - "winlog.related_activity_id", - "winlog.opcode", - "winlog.provider_guid", - "winlog.provider_name", - "winlog.task", - "winlog.user.identifier", - "winlog.user.name", - "winlog.user.domain", - "winlog.user.type" - ] - } - } - }, - "mappings": { - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - }, - { - "winlog.user_data": { - "path_match": "winlog.user_data.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "winlog": { - "properties": { - "related_activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "computer_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "process": { - "properties": { - "pid": { - "type": "long" - }, - "thread": { - "properties": { - "id": { - "type": "long" - } - } - } - } - }, - "keywords": { - "ignore_above": 1024, - "type": "keyword" - }, - "channel": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_data": { - "properties": { - "SignatureStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "OriginalFileName": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootMode": { - "ignore_above": 1024, - "type": "keyword" - }, - "Product": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "FileVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "StopTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Status": { - "ignore_above": 1024, - "type": "keyword" - }, - "CorruptionActionState": { - "ignore_above": 1024, - "type": "keyword" - }, - "KeyLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousCreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "PerformanceImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Group": { - "ignore_above": 1024, - "type": "keyword" - }, - "Description": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownActionType": { - "ignore_above": 1024, - "type": "keyword" - }, - "DwordVal": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMajor": { - "ignore_above": 1024, - "type": "keyword" - }, - "ScriptBlockText": { - "ignore_above": 1024, - "type": "keyword" - }, - "TransmittedServices": { - "ignore_above": 1024, - "type": "keyword" - }, - "MaximumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "FinalStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleStateCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "MajorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "Path": { - "ignore_above": 1024, - "type": "keyword" - }, - "SchemaVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "TokenElevationType": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "QfeVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMinor": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpAddress": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Company": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaPolicyId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IntegrityLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastShutdownGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpPort": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "LmPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastBootGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Version": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signed": { - "ignore_above": 1024, - "type": "keyword" - }, - "StartTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownEventCode": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "State": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootType": { - "ignore_above": 1024, - "type": "keyword" - }, - "Binary": { - "ignore_above": 1024, - "type": "keyword" - }, - "ImpersonationLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Detail": { - "ignore_above": 1024, - "type": "keyword" - }, - "TerminalSessionId": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "CreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Reason": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownReason": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetServerName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Number": { - "ignore_above": 1024, - "type": "keyword" - }, - "BuildVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TSId": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PrivilegeList": { - "ignore_above": 1024, - "type": "keyword" - }, - "param7": { - "ignore_above": 1024, - "type": "keyword" - }, - "param8": { - "ignore_above": 1024, - "type": "keyword" - }, - "param5": { - "ignore_above": 1024, - "type": "keyword" - }, - "param6": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriveName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonType": { - "ignore_above": 1024, - "type": "keyword" - }, - "ExtraInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "param3": { - "ignore_above": 1024, - "type": "keyword" - }, - "param4": { - "ignore_above": 1024, - "type": "keyword" - }, - "param1": { - "ignore_above": 1024, - "type": "keyword" - }, - "param2": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "Workstation": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signature": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumThrottlePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "EntryCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "BitlockerUserInputTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuthenticationPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NominalFrequency": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "opcode": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "type": "long" - }, - "record_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "task": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_guid": { - "ignore_above": 1024, - "type": "keyword" - }, - "activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "api": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "user": { - "properties": { - "identifier": { - "ignore_above": 1024, - "type": "keyword" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "code": { - "ignore_above": 1024, - "type": "keyword" - }, - "original": { - "ignore_above": 1024, - "type": "keyword" - }, - "created": { - "type": "date" - }, - "kind": { - "ignore_above": 1024, - "type": "keyword" - }, - "module": { - "type": "constant_keyword", - "value": "system" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "sequence": { - "type": "long" - }, - "ingested": { - "type": "date" - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "action": { - "ignore_above": 1024, - "type": "keyword" - }, - "category": { - "ignore_above": 1024, - "type": "keyword" - }, - "dataset": { - "type": "constant_keyword", - "value": "system.system" - }, - "outcome": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "error": { - "properties": { - "message": { - "type": "match_only_text" - } - } - }, - "message": { - "type": "match_only_text" - } - } - } - }, - "_meta": { - "package": { - "name": "system" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.forwarded@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-windows.forwarded@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.forwarded@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.forwarded@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-windows.forwarded@package.json deleted file mode 100644 index 967641107..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.forwarded@package.json +++ /dev/null @@ -1,2544 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-windows.forwarded-1.20.1", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "analysis": { - "analyzer": { - "powershell_script_analyzer": { - "pattern": "[\\W&&[^-]]+", - "type": "pattern" - } - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "event.action", - "event.category", - "event.code", - "event.kind", - "event.outcome", - "event.provider", - "event.type", - "tags", - "input.type", - "destination.domain", - "destination.user.domain", - "destination.user.id", - "destination.user.name", - "dns.answers.class", - "dns.answers.data", - "dns.answers.name", - "dns.answers.type", - "dns.header_flags", - "dns.id", - "dns.op_code", - "dns.question.class", - "dns.question.name", - "dns.question.registered_domain", - "dns.question.subdomain", - "dns.question.top_level_domain", - "dns.question.type", - "dns.response_code", - "dns.type", - "ecs.version", - "file.code_signature.status", - "file.code_signature.subject_name", - "file.directory", - "file.extension", - "file.hash.md5", - "file.hash.sha1", - "file.hash.sha256", - "file.hash.sha512", - "file.name", - "file.path", - "file.pe.architecture", - "file.pe.company", - "file.pe.description", - "file.pe.file_version", - "file.pe.imphash", - "file.pe.original_file_name", - "file.pe.product", - "group.domain", - "group.id", - "group.name", - "log.file.path", - "log.level", - "message", - "network.community_id", - "network.direction", - "network.protocol", - "network.transport", - "network.type", - "process.args", - "process.command_line", - "process.entity_id", - "process.executable", - "process.hash.md5", - "process.hash.sha1", - "process.hash.sha256", - "process.hash.sha512", - "process.name", - "process.parent.args", - "process.parent.command_line", - "process.parent.entity_id", - "process.parent.executable", - "process.parent.hash.md5", - "process.parent.hash.sha1", - "process.parent.hash.sha256", - "process.parent.hash.sha512", - "process.parent.name", - "process.parent.pe.architecture", - "process.parent.pe.company", - "process.parent.pe.description", - "process.parent.pe.file_version", - "process.parent.pe.imphash", - "process.parent.pe.original_file_name", - "process.parent.pe.product", - "process.parent.title", - "process.pe.architecture", - "process.pe.company", - "process.pe.description", - "process.pe.file_version", - "process.pe.imphash", - "process.pe.original_file_name", - "process.pe.product", - "process.title", - "process.working_directory", - "registry.data.strings", - "registry.data.type", - "registry.hive", - "registry.key", - "registry.path", - "registry.value", - "related.hash", - "related.hosts", - "related.user", - "rule.name", - "service.name", - "service.type", - "source.domain", - "source.user.domain", - "source.user.id", - "source.user.name", - "user.domain", - "user.id", - "user.name", - "user.target.group.domain", - "user.target.group.id", - "user.target.group.name", - "user.target.name", - "sysmon.dns.status", - "winlog.logon.type", - "winlog.logon.id", - "winlog.logon.failure.reason", - "winlog.logon.failure.status", - "winlog.logon.failure.sub_status", - "winlog.api", - "winlog.activity_id", - "winlog.computer_name", - "winlog.level", - "winlog.outcome", - "winlog.trustAttribute", - "winlog.trustDirection", - "winlog.trustType", - "winlog.computerObject.domain", - "winlog.computerObject.id", - "winlog.computerObject.name", - "winlog.event_data.AccessGranted", - "winlog.event_data.AccessMask", - "winlog.event_data.AccessMaskDescription", - "winlog.event_data.AccessRemoved", - "winlog.event_data.AccountDomain", - "winlog.event_data.AccountExpires", - "winlog.event_data.AccountName", - "winlog.event_data.AllowedToDelegateTo", - "winlog.event_data.AuditPolicyChanges", - "winlog.event_data.AuditPolicyChangesDescription", - "winlog.event_data.AuditSourceName", - "winlog.event_data.AuthenticationPackageName", - "winlog.event_data.Binary", - "winlog.event_data.BitlockerUserInputTime", - "winlog.event_data.BootMode", - "winlog.event_data.BootType", - "winlog.event_data.BuildVersion", - "winlog.event_data.CallerProcessId", - "winlog.event_data.CallerProcessName", - "winlog.event_data.Category", - "winlog.event_data.CategoryId", - "winlog.event_data.ClientAddress", - "winlog.event_data.ClientInfo", - "winlog.event_data.ClientName", - "winlog.event_data.CommandLine", - "winlog.event_data.Company", - "winlog.event_data.ComputerAccountChange", - "winlog.event_data.Configuration", - "winlog.event_data.CorruptionActionState", - "winlog.event_data.CrashOnAuditFailValue", - "winlog.event_data.CreationUtcTime", - "winlog.event_data.Description", - "winlog.event_data.Detail", - "winlog.event_data.DeviceName", - "winlog.event_data.DeviceNameLength", - "winlog.event_data.DeviceTime", - "winlog.event_data.DeviceVersionMajor", - "winlog.event_data.DeviceVersionMinor", - "winlog.event_data.DisplayName", - "winlog.event_data.DnsHostName", - "winlog.event_data.DomainBehaviorVersion", - "winlog.event_data.DomainName", - "winlog.event_data.DomainPolicyChanged", - "winlog.event_data.DomainSid", - "winlog.event_data.DriveName", - "winlog.event_data.DriverName", - "winlog.event_data.DriverNameLength", - "winlog.event_data.Dummy", - "winlog.event_data.DwordVal", - "winlog.event_data.EntryCount", - "winlog.event_data.EventSourceId", - "winlog.event_data.EventType", - "winlog.event_data.ExtraInfo", - "winlog.event_data.FailureName", - "winlog.event_data.FailureNameLength", - "winlog.event_data.FailureReason", - "winlog.event_data.FileVersion", - "winlog.event_data.FinalStatus", - "winlog.event_data.Group", - "winlog.event_data.GroupTypeChange", - "winlog.event_data.HandleId", - "winlog.event_data.HomeDirectory", - "winlog.event_data.HomePath", - "winlog.event_data.IdleImplementation", - "winlog.event_data.IdleStateCount", - "winlog.event_data.ImpersonationLevel", - "winlog.event_data.IntegrityLevel", - "winlog.event_data.IpAddress", - "winlog.event_data.IpPort", - "winlog.event_data.KerberosPolicyChange", - "winlog.event_data.KeyLength", - "winlog.event_data.LastBootGood", - "winlog.event_data.LastShutdownGood", - "winlog.event_data.LmPackageName", - "winlog.event_data.LogonGuid", - "winlog.event_data.LogonHours", - "winlog.event_data.LogonId", - "winlog.event_data.LogonID", - "winlog.event_data.LogonProcessName", - "winlog.event_data.LogonType", - "winlog.event_data.MachineAccountQuota", - "winlog.event_data.MajorVersion", - "winlog.event_data.MandatoryLabel", - "winlog.event_data.MaximumPerformancePercent", - "winlog.event_data.MemberName", - "winlog.event_data.MemberSid", - "winlog.event_data.MinimumPerformancePercent", - "winlog.event_data.MinimumThrottlePercent", - "winlog.event_data.MinorVersion", - "winlog.event_data.MixedDomainMode", - "winlog.event_data.NewProcessId", - "winlog.event_data.NewProcessName", - "winlog.event_data.NewSchemeGuid", - "winlog.event_data.NewSd", - "winlog.event_data.NewSdDacl0", - "winlog.event_data.NewSdDacl1", - "winlog.event_data.NewSdDacl2", - "winlog.event_data.NewSdSacl0", - "winlog.event_data.NewSdSacl1", - "winlog.event_data.NewSdSacl2", - "winlog.event_data.NewTargetUserName", - "winlog.event_data.NewTime", - "winlog.event_data.NewUACList", - "winlog.event_data.NewUacValue", - "winlog.event_data.NominalFrequency", - "winlog.event_data.Number", - "winlog.event_data.ObjectName", - "winlog.event_data.ObjectServer", - "winlog.event_data.ObjectType", - "winlog.event_data.OemInformation", - "winlog.event_data.OldSchemeGuid", - "winlog.event_data.OldSd", - "winlog.event_data.OldSdDacl0", - "winlog.event_data.OldSdDacl1", - "winlog.event_data.OldSdDacl2", - "winlog.event_data.OldSdSacl0", - "winlog.event_data.OldSdSacl1", - "winlog.event_data.OldSdSacl2", - "winlog.event_data.OldTargetUserName", - "winlog.event_data.OldTime", - "winlog.event_data.OldUacValue", - "winlog.event_data.OriginalFileName", - "winlog.event_data.PackageName", - "winlog.event_data.PasswordLastSet", - "winlog.event_data.PasswordHistoryLength", - "winlog.event_data.Path", - "winlog.event_data.ParentProcessName", - "winlog.event_data.PerformanceImplementation", - "winlog.event_data.PreviousCreationUtcTime", - "winlog.event_data.PreAuthType", - "winlog.event_data.PreviousTime", - "winlog.event_data.PrimaryGroupId", - "winlog.event_data.PrivilegeList", - "winlog.event_data.ProcessId", - "winlog.event_data.ProcessName", - "winlog.event_data.ProcessPath", - "winlog.event_data.ProcessPid", - "winlog.event_data.Product", - "winlog.event_data.ProfilePath", - "winlog.event_data.PuaCount", - "winlog.event_data.PuaPolicyId", - "winlog.event_data.QfeVersion", - "winlog.event_data.Reason", - "winlog.event_data.SamAccountName", - "winlog.event_data.SchemaVersion", - "winlog.event_data.ScriptPath", - "winlog.event_data.Session", - "winlog.event_data.SidHistory", - "winlog.event_data.ScriptBlockText", - "winlog.event_data.Service", - "winlog.event_data.ServiceAccount", - "winlog.event_data.ServiceFileName", - "winlog.event_data.ServiceName", - "winlog.event_data.ServicePrincipalNames", - "winlog.event_data.ServiceSid", - "winlog.event_data.ServiceStartType", - "winlog.event_data.ServiceType", - "winlog.event_data.ServiceVersion", - "winlog.event_data.SessionName", - "winlog.event_data.ShutdownActionType", - "winlog.event_data.ShutdownEventCode", - "winlog.event_data.ShutdownReason", - "winlog.event_data.SidFilteringEnabled", - "winlog.event_data.Signature", - "winlog.event_data.SignatureStatus", - "winlog.event_data.Signed", - "winlog.event_data.StartTime", - "winlog.event_data.State", - "winlog.event_data.Status", - "winlog.event_data.StatusDescription", - "winlog.event_data.StopTime", - "winlog.event_data.SubCategory", - "winlog.event_data.SubCategoryGuid", - "winlog.event_data.SubcategoryGuid", - "winlog.event_data.SubCategoryId", - "winlog.event_data.SubcategoryId", - "winlog.event_data.SubjectDomainName", - "winlog.event_data.SubjectLogonId", - "winlog.event_data.SubjectUserName", - "winlog.event_data.SubjectUserSid", - "winlog.event_data.SubStatus", - "winlog.event_data.TSId", - "winlog.event_data.TargetDomainName", - "winlog.event_data.TargetInfo", - "winlog.event_data.TargetLogonGuid", - "winlog.event_data.TargetLogonId", - "winlog.event_data.TargetServerName", - "winlog.event_data.TargetSid", - "winlog.event_data.TargetUserName", - "winlog.event_data.TargetUserSid", - "winlog.event_data.TdoAttributes", - "winlog.event_data.TdoDirection", - "winlog.event_data.TdoType", - "winlog.event_data.TerminalSessionId", - "winlog.event_data.TicketEncryptionType", - "winlog.event_data.TicketEncryptionTypeDescription", - "winlog.event_data.TicketOptions", - "winlog.event_data.TicketOptionsDescription", - "winlog.event_data.TokenElevationType", - "winlog.event_data.TransmittedServices", - "winlog.event_data.UserAccountControl", - "winlog.event_data.UserParameters", - "winlog.event_data.UserPrincipalName", - "winlog.event_data.UserSid", - "winlog.event_data.UserWorkstations", - "winlog.event_data.Version", - "winlog.event_data.Workstation", - "winlog.event_data.WorkstationName", - "winlog.event_data.param1", - "winlog.event_data.param2", - "winlog.event_data.param3", - "winlog.event_data.param4", - "winlog.event_data.param5", - "winlog.event_data.param6", - "winlog.event_data.param7", - "winlog.event_data.param8", - "winlog.event_id", - "winlog.keywords", - "winlog.channel", - "winlog.record_id", - "winlog.related_activity_id", - "winlog.opcode", - "winlog.provider_guid", - "winlog.provider_name", - "winlog.task", - "winlog.user_data.BackupPath", - "winlog.user_data.Channel", - "winlog.user_data.SubjectDomainName", - "winlog.user_data.SubjectLogonId", - "winlog.user_data.SubjectUserName", - "winlog.user_data.SubjectUserSid", - "winlog.user_data.xml_name", - "winlog.user.identifier", - "winlog.user.name", - "winlog.user.domain", - "winlog.user.type", - "powershell.id", - "powershell.pipeline_id", - "powershell.runspace_id", - "powershell.command.path", - "powershell.command.name", - "powershell.command.type", - "powershell.command.value", - "powershell.connected_user.domain", - "powershell.connected_user.name", - "powershell.engine.version", - "powershell.engine.previous_state", - "powershell.engine.new_state", - "powershell.file.script_block_id", - "powershell.file.script_block_text", - "powershell.process.executable_version", - "powershell.provider.new_state", - "powershell.provider.name" - ] - } - } - }, - "mappings": { - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "sysmon": { - "properties": { - "file": { - "properties": { - "archived": { - "type": "boolean" - }, - "is_executable": { - "type": "boolean" - } - } - }, - "dns": { - "properties": { - "status": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "log": { - "properties": { - "file": { - "properties": { - "path": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "destination": { - "properties": { - "port": { - "type": "long" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "rule": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "source": { - "properties": { - "port": { - "type": "long" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "network": { - "properties": { - "community_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "protocol": { - "ignore_above": 1024, - "type": "keyword" - }, - "transport": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "direction": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "file": { - "properties": { - "path": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "extension": { - "ignore_above": 1024, - "type": "keyword" - }, - "code_signature": { - "properties": { - "valid": { - "type": "boolean" - }, - "trusted": { - "type": "boolean" - }, - "subject_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "exists": { - "type": "boolean" - }, - "status": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "pe": { - "properties": { - "file_version": { - "ignore_above": 1024, - "type": "keyword" - }, - "product": { - "ignore_above": 1024, - "type": "keyword" - }, - "imphash": { - "ignore_above": 1024, - "type": "keyword" - }, - "description": { - "ignore_above": 1024, - "type": "keyword" - }, - "company": { - "ignore_above": 1024, - "type": "keyword" - }, - "original_file_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "directory": { - "ignore_above": 1024, - "type": "keyword" - }, - "hash": { - "properties": { - "sha1": { - "ignore_above": 1024, - "type": "keyword" - }, - "sha256": { - "ignore_above": 1024, - "type": "keyword" - }, - "sha512": { - "ignore_above": 1024, - "type": "keyword" - }, - "md5": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "related": { - "properties": { - "hosts": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "user": { - "ignore_above": 1024, - "type": "keyword" - }, - "hash": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "powershell": { - "properties": { - "sequence": { - "type": "long" - }, - "total": { - "type": "long" - }, - "connected_user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "process": { - "properties": { - "executable_version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "file": { - "properties": { - "script_block_text": { - "search_analyzer": "powershell_script_analyzer", - "analyzer": "powershell_script_analyzer", - "type": "text" - }, - "script_block_id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "engine": { - "properties": { - "previous_state": { - "ignore_above": 1024, - "type": "keyword" - }, - "new_state": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "new_state": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "runspace_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "pipeline_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "command": { - "properties": { - "path": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "value": { - "type": "text" - } - } - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "sequence": { - "type": "long" - }, - "ingested": { - "type": "date" - }, - "code": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "created": { - "type": "date" - }, - "kind": { - "ignore_above": 1024, - "type": "keyword" - }, - "module": { - "type": "constant_keyword", - "value": "windows" - }, - "action": { - "ignore_above": 1024, - "type": "keyword" - }, - "category": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "dataset": { - "type": "constant_keyword", - "value": "windows.forwarded" - }, - "outcome": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "group": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "registry": { - "properties": { - "hive": { - "ignore_above": 1024, - "type": "keyword" - }, - "path": { - "ignore_above": 1024, - "type": "keyword" - }, - "data": { - "properties": { - "strings": { - "ignore_above": 1024, - "type": "wildcard" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "value": { - "ignore_above": 1024, - "type": "keyword" - }, - "key": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "process": { - "properties": { - "args": { - "ignore_above": 1024, - "type": "keyword" - }, - "parent": { - "properties": { - "args": { - "ignore_above": 1024, - "type": "keyword" - }, - "pe": { - "properties": { - "file_version": { - "ignore_above": 1024, - "type": "keyword" - }, - "product": { - "ignore_above": 1024, - "type": "keyword" - }, - "imphash": { - "ignore_above": 1024, - "type": "keyword" - }, - "description": { - "ignore_above": 1024, - "type": "keyword" - }, - "company": { - "ignore_above": 1024, - "type": "keyword" - }, - "original_file_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "start": { - "type": "date" - }, - "pid": { - "type": "long" - }, - "args_count": { - "type": "long" - }, - "entity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "title": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "command_line": { - "ignore_above": 1024, - "type": "wildcard", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "executable": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "hash": { - "properties": { - "sha1": { - "ignore_above": 1024, - "type": "keyword" - }, - "sha256": { - "ignore_above": 1024, - "type": "keyword" - }, - "sha512": { - "ignore_above": 1024, - "type": "keyword" - }, - "md5": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "pe": { - "properties": { - "file_version": { - "ignore_above": 1024, - "type": "keyword" - }, - "product": { - "ignore_above": 1024, - "type": "keyword" - }, - "imphash": { - "ignore_above": 1024, - "type": "keyword" - }, - "description": { - "ignore_above": 1024, - "type": "keyword" - }, - "company": { - "ignore_above": 1024, - "type": "keyword" - }, - "original_file_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "pid": { - "type": "long" - }, - "working_directory": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "args_count": { - "type": "long" - }, - "entity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "title": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "command_line": { - "ignore_above": 1024, - "type": "wildcard", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "executable": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "hash": { - "properties": { - "sha1": { - "ignore_above": 1024, - "type": "keyword" - }, - "sha256": { - "ignore_above": 1024, - "type": "keyword" - }, - "sha512": { - "ignore_above": 1024, - "type": "keyword" - }, - "md5": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "winlog": { - "properties": { - "related_activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "keywords": { - "ignore_above": 1024, - "type": "keyword" - }, - "logon": { - "properties": { - "failure": { - "properties": { - "reason": { - "ignore_above": 1024, - "type": "keyword" - }, - "sub_status": { - "ignore_above": 1024, - "type": "keyword" - }, - "status": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "channel": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_data": { - "properties": { - "ProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Configuration": { - "ignore_above": 1024, - "type": "keyword" - }, - "OriginalFileName": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootMode": { - "ignore_above": 1024, - "type": "keyword" - }, - "Product": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonHours": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "FileVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "TicketOptions": { - "ignore_above": 1024, - "type": "keyword" - }, - "AllowedToDelegateTo": { - "ignore_above": 1024, - "type": "keyword" - }, - "TdoAttributes": { - "ignore_above": 1024, - "type": "keyword" - }, - "StopTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Status": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccessMask": { - "ignore_above": 1024, - "type": "keyword" - }, - "KeyLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "SessionName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PasswordHistoryLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSd": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Group": { - "ignore_above": 1024, - "type": "keyword" - }, - "PackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownActionType": { - "ignore_above": 1024, - "type": "keyword" - }, - "DwordVal": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMajor": { - "ignore_above": 1024, - "type": "keyword" - }, - "SidHistory": { - "ignore_above": 1024, - "type": "keyword" - }, - "TransmittedServices": { - "ignore_above": 1024, - "type": "keyword" - }, - "WorkstationName": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleStateCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Path": { - "ignore_above": 1024, - "type": "keyword" - }, - "SchemaVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "CrashOnAuditFailValue": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMinor": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "HandleId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpAddress": { - "ignore_above": 1024, - "type": "keyword" - }, - "DnsHostName": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastShutdownGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpPort": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "LmPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastBootGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Version": { - "ignore_above": 1024, - "type": "keyword" - }, - "MachineAccountQuota": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldUacValue": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserParameters": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signed": { - "ignore_above": 1024, - "type": "keyword" - }, - "StartTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubCategoryId": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldTargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewUacValue": { - "ignore_above": 1024, - "type": "keyword" - }, - "CallerProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProfilePath": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "State": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureReason": { - "ignore_above": 1024, - "type": "keyword" - }, - "ComputerAccountChange": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootType": { - "ignore_above": 1024, - "type": "keyword" - }, - "Binary": { - "ignore_above": 1024, - "type": "keyword" - }, - "ImpersonationLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DomainPolicyChanged": { - "ignore_above": 1024, - "type": "keyword" - }, - "CategoryId": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreAuthType": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccountDomain": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewUACList": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubcategoryGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownReason": { - "ignore_above": 1024, - "type": "keyword" - }, - "SidFilteringEnabled": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetServerName": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuditPolicyChanges": { - "ignore_above": 1024, - "type": "keyword" - }, - "Number": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "EventSourceId": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriveName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonType": { - "ignore_above": 1024, - "type": "keyword" - }, - "ExtraInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "PrimaryGroupId": { - "ignore_above": 1024, - "type": "keyword" - }, - "ObjectName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "Workstation": { - "ignore_above": 1024, - "type": "keyword" - }, - "PasswordLastSet": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumThrottlePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "GroupTypeChange": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuthenticationPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NominalFrequency": { - "ignore_above": 1024, - "type": "keyword" - }, - "SignatureStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "DomainSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "ScriptPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "TicketEncryptionType": { - "ignore_above": 1024, - "type": "keyword" - }, - "TicketOptionsDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceType": { - "ignore_above": 1024, - "type": "keyword" - }, - "ObjectServer": { - "ignore_above": 1024, - "type": "keyword" - }, - "HomePath": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserWorkstations": { - "ignore_above": 1024, - "type": "keyword" - }, - "SamAccountName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "CorruptionActionState": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuditSourceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubCategoryGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousCreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuditPolicyChangesDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccessMaskDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccountName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PerformanceImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "TicketEncryptionTypeDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceAccount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Description": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPid": { - "ignore_above": 1024, - "type": "keyword" - }, - "ScriptBlockText": { - "ignore_above": 1024, - "type": "keyword" - }, - "ObjectType": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServicePrincipalNames": { - "ignore_above": 1024, - "type": "keyword" - }, - "MaximumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "KerberosPolicyChange": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "FinalStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "MajorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "MandatoryLabel": { - "ignore_above": 1024, - "type": "keyword" - }, - "HomeDirectory": { - "ignore_above": 1024, - "type": "keyword" - }, - "TokenElevationType": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "QfeVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccountExpires": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceStartType": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserPrincipalName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdSacl1": { - "ignore_above": 1024, - "type": "keyword" - }, - "Dummy": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdSacl0": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdSacl2": { - "ignore_above": 1024, - "type": "keyword" - }, - "Company": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaPolicyId": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdSacl2": { - "ignore_above": 1024, - "type": "keyword" - }, - "EventType": { - "ignore_above": 1024, - "type": "keyword" - }, - "IntegrityLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdSacl1": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdSacl0": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSd": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewTargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "ClientName": { - "ignore_above": 1024, - "type": "keyword" - }, - "StatusDescription": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdDacl0": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdDacl2": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSdDacl1": { - "ignore_above": 1024, - "type": "keyword" - }, - "DomainBehaviorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccessGranted": { - "ignore_above": 1024, - "type": "keyword" - }, - "ParentProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubcategoryId": { - "ignore_above": 1024, - "type": "keyword" - }, - "AccessRemoved": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownEventCode": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "MixedDomainMode": { - "ignore_above": 1024, - "type": "keyword" - }, - "ClientInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "Detail": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdDacl1": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdDacl0": { - "ignore_above": 1024, - "type": "keyword" - }, - "Category": { - "ignore_above": 1024, - "type": "keyword" - }, - "TerminalSessionId": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSdDacl2": { - "ignore_above": 1024, - "type": "keyword" - }, - "ClientAddress": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "CreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "CallerProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TdoType": { - "ignore_above": 1024, - "type": "keyword" - }, - "Reason": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceFileName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DisplayName": { - "ignore_above": 1024, - "type": "keyword" - }, - "BuildVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TSId": { - "ignore_above": 1024, - "type": "keyword" - }, - "PrivilegeList": { - "ignore_above": 1024, - "type": "keyword" - }, - "param7": { - "ignore_above": 1024, - "type": "keyword" - }, - "param8": { - "ignore_above": 1024, - "type": "keyword" - }, - "param5": { - "ignore_above": 1024, - "type": "keyword" - }, - "param6": { - "ignore_above": 1024, - "type": "keyword" - }, - "Service": { - "ignore_above": 1024, - "type": "keyword" - }, - "TdoDirection": { - "ignore_above": 1024, - "type": "keyword" - }, - "param3": { - "ignore_above": 1024, - "type": "keyword" - }, - "param4": { - "ignore_above": 1024, - "type": "keyword" - }, - "param1": { - "ignore_above": 1024, - "type": "keyword" - }, - "param2": { - "ignore_above": 1024, - "type": "keyword" - }, - "CommandLine": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserAccountControl": { - "ignore_above": 1024, - "type": "keyword" - }, - "OemInformation": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signature": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubCategory": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "EntryCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonID": { - "ignore_above": 1024, - "type": "keyword" - }, - "BitlockerUserInputTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Session": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "opcode": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_guid": { - "ignore_above": 1024, - "type": "keyword" - }, - "activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "time_created": { - "type": "date" - }, - "trustDirection": { - "ignore_above": 1024, - "type": "keyword" - }, - "api": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "outcome": { - "ignore_above": 1024, - "type": "keyword" - }, - "computer_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "trustAttribute": { - "ignore_above": 1024, - "type": "keyword" - }, - "process": { - "properties": { - "pid": { - "type": "long" - }, - "thread": { - "properties": { - "id": { - "type": "long" - } - } - } - } - }, - "level": { - "ignore_above": 1024, - "type": "keyword" - }, - "computerObject": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "user_data": { - "properties": { - "SubjectUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "BackupPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "Channel": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "xml_name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "version": { - "type": "long" - }, - "record_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "task": { - "ignore_above": 1024, - "type": "keyword" - }, - "trustType": { - "ignore_above": 1024, - "type": "keyword" - }, - "user": { - "properties": { - "identifier": { - "ignore_above": 1024, - "type": "keyword" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "dns": { - "properties": { - "op_code": { - "ignore_above": 1024, - "type": "keyword" - }, - "resolved_ip": { - "type": "ip" - }, - "response_code": { - "ignore_above": 1024, - "type": "keyword" - }, - "question": { - "properties": { - "registered_domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "top_level_domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "subdomain": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "class": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "answers": { - "properties": { - "data": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "class": { - "ignore_above": 1024, - "type": "keyword" - }, - "ttl": { - "type": "long" - } - } - }, - "header_flags": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "message": { - "type": "match_only_text" - }, - "tags": { - "ignore_above": 1024, - "type": "keyword" - }, - "input": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "service": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "dataset": { - "properties": { - "name": { - "type": "constant_keyword" - }, - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - } - } - }, - "user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "target": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "group": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - } - } - } - } - } - }, - "_meta": { - "package": { - "name": "windows" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell@package.json deleted file mode 100644 index ad0ff857e..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell@package.json +++ /dev/null @@ -1,1335 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-windows.powershell-1.20.1", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "analysis": { - "analyzer": { - "powershell_script_analyzer": { - "pattern": "[\\W&&[^-]]+", - "type": "pattern" - } - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "event.action", - "event.category", - "event.code", - "event.kind", - "event.outcome", - "event.provider", - "event.type", - "tags", - "input.type", - "destination.user.domain", - "destination.user.id", - "destination.user.name", - "ecs.version", - "file.directory", - "file.extension", - "file.name", - "file.path", - "log.level", - "message", - "process.args", - "process.command_line", - "process.entity_id", - "process.executable", - "process.name", - "process.title", - "related.hash", - "related.hosts", - "related.user", - "source.user.domain", - "source.user.id", - "source.user.name", - "user.domain", - "user.id", - "user.name", - "powershell.id", - "powershell.pipeline_id", - "powershell.runspace_id", - "powershell.command.path", - "powershell.command.name", - "powershell.command.type", - "powershell.command.value", - "powershell.connected_user.domain", - "powershell.connected_user.name", - "powershell.engine.version", - "powershell.engine.previous_state", - "powershell.engine.new_state", - "powershell.file.script_block_id", - "powershell.file.script_block_text", - "powershell.process.executable_version", - "powershell.provider.new_state", - "powershell.provider.name", - "winlog.api", - "winlog.activity_id", - "winlog.computer_name", - "winlog.event_data.AuthenticationPackageName", - "winlog.event_data.Binary", - "winlog.event_data.BitlockerUserInputTime", - "winlog.event_data.BootMode", - "winlog.event_data.BootType", - "winlog.event_data.BuildVersion", - "winlog.event_data.Company", - "winlog.event_data.CorruptionActionState", - "winlog.event_data.CreationUtcTime", - "winlog.event_data.Description", - "winlog.event_data.Detail", - "winlog.event_data.DeviceName", - "winlog.event_data.DeviceNameLength", - "winlog.event_data.DeviceTime", - "winlog.event_data.DeviceVersionMajor", - "winlog.event_data.DeviceVersionMinor", - "winlog.event_data.DriveName", - "winlog.event_data.DriverName", - "winlog.event_data.DriverNameLength", - "winlog.event_data.DwordVal", - "winlog.event_data.EntryCount", - "winlog.event_data.ExtraInfo", - "winlog.event_data.FailureName", - "winlog.event_data.FailureNameLength", - "winlog.event_data.FileVersion", - "winlog.event_data.FinalStatus", - "winlog.event_data.Group", - "winlog.event_data.IdleImplementation", - "winlog.event_data.IdleStateCount", - "winlog.event_data.ImpersonationLevel", - "winlog.event_data.IntegrityLevel", - "winlog.event_data.IpAddress", - "winlog.event_data.IpPort", - "winlog.event_data.KeyLength", - "winlog.event_data.LastBootGood", - "winlog.event_data.LastShutdownGood", - "winlog.event_data.LmPackageName", - "winlog.event_data.LogonGuid", - "winlog.event_data.LogonId", - "winlog.event_data.LogonProcessName", - "winlog.event_data.LogonType", - "winlog.event_data.MajorVersion", - "winlog.event_data.MaximumPerformancePercent", - "winlog.event_data.MemberName", - "winlog.event_data.MemberSid", - "winlog.event_data.MinimumPerformancePercent", - "winlog.event_data.MinimumThrottlePercent", - "winlog.event_data.MinorVersion", - "winlog.event_data.NewProcessId", - "winlog.event_data.NewProcessName", - "winlog.event_data.NewSchemeGuid", - "winlog.event_data.NewTime", - "winlog.event_data.NominalFrequency", - "winlog.event_data.Number", - "winlog.event_data.OldSchemeGuid", - "winlog.event_data.OldTime", - "winlog.event_data.OriginalFileName", - "winlog.event_data.Path", - "winlog.event_data.PerformanceImplementation", - "winlog.event_data.PreviousCreationUtcTime", - "winlog.event_data.PreviousTime", - "winlog.event_data.PrivilegeList", - "winlog.event_data.ProcessId", - "winlog.event_data.ProcessName", - "winlog.event_data.ProcessPath", - "winlog.event_data.ProcessPid", - "winlog.event_data.Product", - "winlog.event_data.PuaCount", - "winlog.event_data.PuaPolicyId", - "winlog.event_data.QfeVersion", - "winlog.event_data.Reason", - "winlog.event_data.SchemaVersion", - "winlog.event_data.ScriptBlockText", - "winlog.event_data.ServiceName", - "winlog.event_data.ServiceVersion", - "winlog.event_data.ShutdownActionType", - "winlog.event_data.ShutdownEventCode", - "winlog.event_data.ShutdownReason", - "winlog.event_data.Signature", - "winlog.event_data.SignatureStatus", - "winlog.event_data.Signed", - "winlog.event_data.StartTime", - "winlog.event_data.State", - "winlog.event_data.Status", - "winlog.event_data.StopTime", - "winlog.event_data.SubjectDomainName", - "winlog.event_data.SubjectLogonId", - "winlog.event_data.SubjectUserName", - "winlog.event_data.SubjectUserSid", - "winlog.event_data.TSId", - "winlog.event_data.TargetDomainName", - "winlog.event_data.TargetInfo", - "winlog.event_data.TargetLogonGuid", - "winlog.event_data.TargetLogonId", - "winlog.event_data.TargetServerName", - "winlog.event_data.TargetUserName", - "winlog.event_data.TargetUserSid", - "winlog.event_data.TerminalSessionId", - "winlog.event_data.TokenElevationType", - "winlog.event_data.TransmittedServices", - "winlog.event_data.UserSid", - "winlog.event_data.Version", - "winlog.event_data.Workstation", - "winlog.event_data.param1", - "winlog.event_data.param2", - "winlog.event_data.param3", - "winlog.event_data.param4", - "winlog.event_data.param5", - "winlog.event_data.param6", - "winlog.event_data.param7", - "winlog.event_data.param8", - "winlog.event_id", - "winlog.keywords", - "winlog.channel", - "winlog.record_id", - "winlog.related_activity_id", - "winlog.opcode", - "winlog.provider_guid", - "winlog.provider_name", - "winlog.task", - "winlog.user.identifier", - "winlog.user.name", - "winlog.user.domain", - "winlog.user.type" - ] - } - } - }, - "mappings": { - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - }, - { - "winlog.user_data": { - "path_match": "winlog.user_data.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "process": { - "properties": { - "args": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "pid": { - "type": "long" - }, - "args_count": { - "type": "long" - }, - "entity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "title": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "command_line": { - "ignore_above": 1024, - "type": "wildcard", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "executable": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - } - } - }, - "winlog": { - "properties": { - "related_activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "computer_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "process": { - "properties": { - "pid": { - "type": "long" - }, - "thread": { - "properties": { - "id": { - "type": "long" - } - } - } - } - }, - "keywords": { - "ignore_above": 1024, - "type": "keyword" - }, - "channel": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_data": { - "properties": { - "SignatureStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "OriginalFileName": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootMode": { - "ignore_above": 1024, - "type": "keyword" - }, - "Product": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "FileVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "StopTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Status": { - "ignore_above": 1024, - "type": "keyword" - }, - "CorruptionActionState": { - "ignore_above": 1024, - "type": "keyword" - }, - "KeyLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousCreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "PerformanceImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Group": { - "ignore_above": 1024, - "type": "keyword" - }, - "Description": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownActionType": { - "ignore_above": 1024, - "type": "keyword" - }, - "DwordVal": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMajor": { - "ignore_above": 1024, - "type": "keyword" - }, - "ScriptBlockText": { - "ignore_above": 1024, - "type": "keyword" - }, - "TransmittedServices": { - "ignore_above": 1024, - "type": "keyword" - }, - "MaximumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "FinalStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleStateCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "MajorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "Path": { - "ignore_above": 1024, - "type": "keyword" - }, - "SchemaVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "TokenElevationType": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "QfeVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMinor": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpAddress": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Company": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaPolicyId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IntegrityLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastShutdownGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpPort": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "LmPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastBootGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Version": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signed": { - "ignore_above": 1024, - "type": "keyword" - }, - "StartTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownEventCode": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "State": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootType": { - "ignore_above": 1024, - "type": "keyword" - }, - "Binary": { - "ignore_above": 1024, - "type": "keyword" - }, - "ImpersonationLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Detail": { - "ignore_above": 1024, - "type": "keyword" - }, - "TerminalSessionId": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "CreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Reason": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownReason": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetServerName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Number": { - "ignore_above": 1024, - "type": "keyword" - }, - "BuildVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TSId": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PrivilegeList": { - "ignore_above": 1024, - "type": "keyword" - }, - "param7": { - "ignore_above": 1024, - "type": "keyword" - }, - "param8": { - "ignore_above": 1024, - "type": "keyword" - }, - "param5": { - "ignore_above": 1024, - "type": "keyword" - }, - "param6": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriveName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonType": { - "ignore_above": 1024, - "type": "keyword" - }, - "ExtraInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "param3": { - "ignore_above": 1024, - "type": "keyword" - }, - "param4": { - "ignore_above": 1024, - "type": "keyword" - }, - "param1": { - "ignore_above": 1024, - "type": "keyword" - }, - "param2": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "Workstation": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signature": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumThrottlePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "EntryCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "BitlockerUserInputTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuthenticationPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NominalFrequency": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "opcode": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "type": "long" - }, - "record_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "task": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_guid": { - "ignore_above": 1024, - "type": "keyword" - }, - "activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "api": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "user": { - "properties": { - "identifier": { - "ignore_above": 1024, - "type": "keyword" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "destination": { - "properties": { - "user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "source": { - "properties": { - "user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "message": { - "type": "match_only_text" - }, - "tags": { - "ignore_above": 1024, - "type": "keyword" - }, - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "input": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "file": { - "properties": { - "path": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "extension": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "directory": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "related": { - "properties": { - "hosts": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "user": { - "ignore_above": 1024, - "type": "keyword" - }, - "hash": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "powershell": { - "properties": { - "sequence": { - "type": "long" - }, - "total": { - "type": "long" - }, - "connected_user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "process": { - "properties": { - "executable_version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "file": { - "properties": { - "script_block_text": { - "search_analyzer": "powershell_script_analyzer", - "analyzer": "powershell_script_analyzer", - "type": "text" - }, - "script_block_id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "engine": { - "properties": { - "previous_state": { - "ignore_above": 1024, - "type": "keyword" - }, - "new_state": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "new_state": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "runspace_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "pipeline_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "command": { - "properties": { - "path": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "value": { - "type": "text" - } - } - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "sequence": { - "type": "long" - }, - "ingested": { - "type": "date" - }, - "code": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "created": { - "type": "date" - }, - "kind": { - "ignore_above": 1024, - "type": "keyword" - }, - "module": { - "type": "constant_keyword", - "value": "windows" - }, - "action": { - "ignore_above": 1024, - "type": "keyword" - }, - "category": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "dataset": { - "type": "constant_keyword", - "value": "windows.powershell" - }, - "outcome": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "dataset": { - "properties": { - "name": { - "type": "constant_keyword" - }, - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - } - } - }, - "user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - } - }, - "_meta": { - "package": { - "name": "windows" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell_operational@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell_operational@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell_operational@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell_operational@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell_operational@package.json deleted file mode 100644 index b5cc588c9..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.powershell_operational@package.json +++ /dev/null @@ -1,1334 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-windows.powershell_operational-1.20.1", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "analysis": { - "analyzer": { - "powershell_script_analyzer": { - "pattern": "[\\W&&[^-]]+", - "type": "pattern" - } - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "event.action", - "event.category", - "event.code", - "event.kind", - "event.outcome", - "event.provider", - "event.type", - "tags", - "input.type", - "destination.user.domain", - "destination.user.id", - "destination.user.name", - "ecs.version", - "file.directory", - "file.extension", - "file.name", - "file.path", - "log.level", - "message", - "process.args", - "process.command_line", - "process.entity_id", - "process.executable", - "process.name", - "process.title", - "related.hash", - "related.hosts", - "related.user", - "source.user.domain", - "source.user.id", - "source.user.name", - "user.domain", - "user.id", - "user.name", - "powershell.id", - "powershell.pipeline_id", - "powershell.runspace_id", - "powershell.command.path", - "powershell.command.name", - "powershell.command.type", - "powershell.command.value", - "powershell.connected_user.domain", - "powershell.connected_user.name", - "powershell.engine.version", - "powershell.engine.previous_state", - "powershell.engine.new_state", - "powershell.file.script_block_id", - "powershell.file.script_block_text", - "powershell.process.executable_version", - "powershell.provider.new_state", - "powershell.provider.name", - "winlog.api", - "winlog.activity_id", - "winlog.computer_name", - "winlog.event_data.AuthenticationPackageName", - "winlog.event_data.Binary", - "winlog.event_data.BitlockerUserInputTime", - "winlog.event_data.BootMode", - "winlog.event_data.BootType", - "winlog.event_data.BuildVersion", - "winlog.event_data.Company", - "winlog.event_data.CorruptionActionState", - "winlog.event_data.CreationUtcTime", - "winlog.event_data.Description", - "winlog.event_data.Detail", - "winlog.event_data.DeviceName", - "winlog.event_data.DeviceNameLength", - "winlog.event_data.DeviceTime", - "winlog.event_data.DeviceVersionMajor", - "winlog.event_data.DeviceVersionMinor", - "winlog.event_data.DriveName", - "winlog.event_data.DriverName", - "winlog.event_data.DriverNameLength", - "winlog.event_data.DwordVal", - "winlog.event_data.EntryCount", - "winlog.event_data.ExtraInfo", - "winlog.event_data.FailureName", - "winlog.event_data.FailureNameLength", - "winlog.event_data.FileVersion", - "winlog.event_data.FinalStatus", - "winlog.event_data.Group", - "winlog.event_data.IdleImplementation", - "winlog.event_data.IdleStateCount", - "winlog.event_data.ImpersonationLevel", - "winlog.event_data.IntegrityLevel", - "winlog.event_data.IpAddress", - "winlog.event_data.IpPort", - "winlog.event_data.KeyLength", - "winlog.event_data.LastBootGood", - "winlog.event_data.LastShutdownGood", - "winlog.event_data.LmPackageName", - "winlog.event_data.LogonGuid", - "winlog.event_data.LogonId", - "winlog.event_data.LogonProcessName", - "winlog.event_data.LogonType", - "winlog.event_data.MajorVersion", - "winlog.event_data.MaximumPerformancePercent", - "winlog.event_data.MemberName", - "winlog.event_data.MemberSid", - "winlog.event_data.MinimumPerformancePercent", - "winlog.event_data.MinimumThrottlePercent", - "winlog.event_data.MinorVersion", - "winlog.event_data.NewProcessId", - "winlog.event_data.NewProcessName", - "winlog.event_data.NewSchemeGuid", - "winlog.event_data.NewTime", - "winlog.event_data.NominalFrequency", - "winlog.event_data.Number", - "winlog.event_data.OldSchemeGuid", - "winlog.event_data.OldTime", - "winlog.event_data.OriginalFileName", - "winlog.event_data.Path", - "winlog.event_data.PerformanceImplementation", - "winlog.event_data.PreviousCreationUtcTime", - "winlog.event_data.PreviousTime", - "winlog.event_data.PrivilegeList", - "winlog.event_data.ProcessId", - "winlog.event_data.ProcessName", - "winlog.event_data.ProcessPath", - "winlog.event_data.ProcessPid", - "winlog.event_data.Product", - "winlog.event_data.PuaCount", - "winlog.event_data.PuaPolicyId", - "winlog.event_data.QfeVersion", - "winlog.event_data.Reason", - "winlog.event_data.SchemaVersion", - "winlog.event_data.ScriptBlockText", - "winlog.event_data.ServiceName", - "winlog.event_data.ServiceVersion", - "winlog.event_data.ShutdownActionType", - "winlog.event_data.ShutdownEventCode", - "winlog.event_data.ShutdownReason", - "winlog.event_data.Signature", - "winlog.event_data.SignatureStatus", - "winlog.event_data.Signed", - "winlog.event_data.StartTime", - "winlog.event_data.State", - "winlog.event_data.Status", - "winlog.event_data.StopTime", - "winlog.event_data.SubjectDomainName", - "winlog.event_data.SubjectLogonId", - "winlog.event_data.SubjectUserName", - "winlog.event_data.SubjectUserSid", - "winlog.event_data.TSId", - "winlog.event_data.TargetDomainName", - "winlog.event_data.TargetInfo", - "winlog.event_data.TargetLogonGuid", - "winlog.event_data.TargetLogonId", - "winlog.event_data.TargetServerName", - "winlog.event_data.TargetUserName", - "winlog.event_data.TargetUserSid", - "winlog.event_data.TerminalSessionId", - "winlog.event_data.TokenElevationType", - "winlog.event_data.TransmittedServices", - "winlog.event_data.UserSid", - "winlog.event_data.Version", - "winlog.event_data.Workstation", - "winlog.event_data.param1", - "winlog.event_data.param2", - "winlog.event_data.param3", - "winlog.event_data.param4", - "winlog.event_data.param5", - "winlog.event_data.param6", - "winlog.event_data.param7", - "winlog.event_data.param8", - "winlog.event_id", - "winlog.keywords", - "winlog.channel", - "winlog.record_id", - "winlog.related_activity_id", - "winlog.opcode", - "winlog.provider_guid", - "winlog.provider_name", - "winlog.task", - "winlog.user.identifier", - "winlog.user.name", - "winlog.user.domain", - "winlog.user.type" - ] - } - } - }, - "mappings": { - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - }, - { - "winlog.user_data": { - "path_match": "winlog.user_data.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "process": { - "properties": { - "args": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "pid": { - "type": "long" - }, - "args_count": { - "type": "long" - }, - "entity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "title": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "command_line": { - "ignore_above": 1024, - "type": "wildcard", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "executable": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - } - } - }, - "winlog": { - "properties": { - "related_activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "computer_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "process": { - "properties": { - "pid": { - "type": "long" - }, - "thread": { - "properties": { - "id": { - "type": "long" - } - } - } - } - }, - "keywords": { - "ignore_above": 1024, - "type": "keyword" - }, - "channel": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_data": { - "properties": { - "SignatureStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "OriginalFileName": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootMode": { - "ignore_above": 1024, - "type": "keyword" - }, - "Product": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "FileVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "StopTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Status": { - "ignore_above": 1024, - "type": "keyword" - }, - "CorruptionActionState": { - "ignore_above": 1024, - "type": "keyword" - }, - "KeyLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousCreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "PerformanceImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Group": { - "ignore_above": 1024, - "type": "keyword" - }, - "Description": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownActionType": { - "ignore_above": 1024, - "type": "keyword" - }, - "DwordVal": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMajor": { - "ignore_above": 1024, - "type": "keyword" - }, - "ScriptBlockText": { - "ignore_above": 1024, - "type": "keyword" - }, - "TransmittedServices": { - "ignore_above": 1024, - "type": "keyword" - }, - "MaximumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "FinalStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleStateCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "MajorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "Path": { - "ignore_above": 1024, - "type": "keyword" - }, - "SchemaVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "TokenElevationType": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "QfeVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMinor": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpAddress": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Company": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaPolicyId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IntegrityLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastShutdownGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpPort": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "LmPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastBootGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Version": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signed": { - "ignore_above": 1024, - "type": "keyword" - }, - "StartTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownEventCode": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "State": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootType": { - "ignore_above": 1024, - "type": "keyword" - }, - "Binary": { - "ignore_above": 1024, - "type": "keyword" - }, - "ImpersonationLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Detail": { - "ignore_above": 1024, - "type": "keyword" - }, - "TerminalSessionId": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "CreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Reason": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownReason": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetServerName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Number": { - "ignore_above": 1024, - "type": "keyword" - }, - "BuildVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TSId": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PrivilegeList": { - "ignore_above": 1024, - "type": "keyword" - }, - "param7": { - "ignore_above": 1024, - "type": "keyword" - }, - "param8": { - "ignore_above": 1024, - "type": "keyword" - }, - "param5": { - "ignore_above": 1024, - "type": "keyword" - }, - "param6": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriveName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonType": { - "ignore_above": 1024, - "type": "keyword" - }, - "ExtraInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "param3": { - "ignore_above": 1024, - "type": "keyword" - }, - "param4": { - "ignore_above": 1024, - "type": "keyword" - }, - "param1": { - "ignore_above": 1024, - "type": "keyword" - }, - "param2": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "Workstation": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signature": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumThrottlePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "EntryCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "BitlockerUserInputTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuthenticationPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NominalFrequency": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "opcode": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "type": "long" - }, - "record_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "task": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_guid": { - "ignore_above": 1024, - "type": "keyword" - }, - "activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "api": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "user": { - "properties": { - "identifier": { - "ignore_above": 1024, - "type": "keyword" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "destination": { - "properties": { - "user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "source": { - "properties": { - "user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "message": { - "type": "match_only_text" - }, - "tags": { - "ignore_above": 1024, - "type": "keyword" - }, - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "input": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "file": { - "properties": { - "path": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "extension": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "directory": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "related": { - "properties": { - "hosts": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "user": { - "ignore_above": 1024, - "type": "keyword" - }, - "hash": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "powershell": { - "properties": { - "sequence": { - "type": "long" - }, - "total": { - "type": "long" - }, - "connected_user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "process": { - "properties": { - "executable_version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "file": { - "properties": { - "script_block_text": { - "analyzer": "powershell_script_analyzer", - "type": "text" - }, - "script_block_id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "engine": { - "properties": { - "previous_state": { - "ignore_above": 1024, - "type": "keyword" - }, - "new_state": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "new_state": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "runspace_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "pipeline_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "command": { - "properties": { - "path": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "value": { - "type": "text" - } - } - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "sequence": { - "type": "long" - }, - "ingested": { - "type": "date" - }, - "code": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "created": { - "type": "date" - }, - "kind": { - "ignore_above": 1024, - "type": "keyword" - }, - "module": { - "type": "constant_keyword", - "value": "windows" - }, - "action": { - "ignore_above": 1024, - "type": "keyword" - }, - "category": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "dataset": { - "type": "constant_keyword", - "value": "windows.powershell_operational" - }, - "outcome": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "dataset": { - "properties": { - "name": { - "type": "constant_keyword" - }, - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - } - } - }, - "user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - } - }, - "_meta": { - "package": { - "name": "windows" - }, - "managed_by": "fleet", - "managed": true - } - } diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.sysmon_operational@custom.json b/salt/elasticsearch/templates/component/elastic-agent/logs-windows.sysmon_operational@custom.json deleted file mode 100644 index fe77af1db..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.sysmon_operational@custom.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "template": { - "settings": {} - }, - "_meta": { - "package": { - "name": "elastic_agent" - }, - "managed_by": "fleet", - "managed": true - } -} diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.sysmon_operational@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-windows.sysmon_operational@package.json deleted file mode 100644 index 451eaf7aa..000000000 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-windows.sysmon_operational@package.json +++ /dev/null @@ -1,1752 +0,0 @@ - {"template": { - "settings": { - "index": { - "lifecycle": { - "name": "logs" - }, - "codec": "best_compression", - "default_pipeline": "logs-windows.sysmon_operational-1.20.1", - "mapping": { - "total_fields": { - "limit": "10000" - } - }, - "query": { - "default_field": [ - "cloud.account.id", - "cloud.availability_zone", - "cloud.instance.id", - "cloud.instance.name", - "cloud.machine.type", - "cloud.provider", - "cloud.region", - "cloud.project.id", - "cloud.image.id", - "container.id", - "container.image.name", - "container.name", - "host.architecture", - "host.hostname", - "host.id", - "host.mac", - "host.name", - "host.os.family", - "host.os.kernel", - "host.os.name", - "host.os.platform", - "host.os.version", - "host.os.build", - "host.os.codename", - "host.type", - "event.action", - "event.category", - "event.code", - "event.kind", - "event.outcome", - "event.provider", - "event.type", - "tags", - "input.type", - "destination.domain", - "dns.answers.class", - "dns.answers.data", - "dns.answers.name", - "dns.answers.type", - "dns.header_flags", - "dns.id", - "dns.op_code", - "dns.question.class", - "dns.question.name", - "dns.question.registered_domain", - "dns.question.subdomain", - "dns.question.top_level_domain", - "dns.question.type", - "dns.response_code", - "dns.type", - "ecs.version", - "error.code", - "error.message", - "file.code_signature.status", - "file.code_signature.subject_name", - "file.directory", - "file.extension", - "file.hash.md5", - "file.hash.sha1", - "file.hash.sha256", - "file.hash.sha512", - "file.name", - "file.path", - "file.pe.architecture", - "file.pe.company", - "file.pe.description", - "file.pe.file_version", - "file.pe.imphash", - "file.pe.original_file_name", - "file.pe.product", - "group.domain", - "group.id", - "group.name", - "log.level", - "message", - "network.community_id", - "network.direction", - "network.protocol", - "network.transport", - "network.type", - "process.args", - "process.command_line", - "process.entity_id", - "process.executable", - "process.hash.md5", - "process.hash.sha1", - "process.hash.sha256", - "process.hash.sha512", - "process.name", - "process.parent.args", - "process.parent.command_line", - "process.parent.entity_id", - "process.parent.executable", - "process.parent.name", - "process.pe.architecture", - "process.pe.company", - "process.pe.description", - "process.pe.file_version", - "process.pe.imphash", - "process.pe.original_file_name", - "process.pe.product", - "process.title", - "process.working_directory", - "registry.data.strings", - "registry.data.type", - "registry.hive", - "registry.key", - "registry.path", - "registry.value", - "related.hash", - "related.hosts", - "related.user", - "rule.name", - "service.name", - "service.type", - "source.domain", - "user.domain", - "user.id", - "user.name", - "user.target.group.domain", - "user.target.group.id", - "user.target.group.name", - "user.target.name", - "sysmon.dns.status", - "winlog.api", - "winlog.activity_id", - "winlog.computer_name", - "winlog.event_data.AuthenticationPackageName", - "winlog.event_data.Binary", - "winlog.event_data.BitlockerUserInputTime", - "winlog.event_data.BootMode", - "winlog.event_data.BootType", - "winlog.event_data.BuildVersion", - "winlog.event_data.CallTrace", - "winlog.event_data.ClientInfo", - "winlog.event_data.Company", - "winlog.event_data.Configuration", - "winlog.event_data.CorruptionActionState", - "winlog.event_data.CreationUtcTime", - "winlog.event_data.Description", - "winlog.event_data.Detail", - "winlog.event_data.DeviceName", - "winlog.event_data.DeviceNameLength", - "winlog.event_data.DeviceTime", - "winlog.event_data.DeviceVersionMajor", - "winlog.event_data.DeviceVersionMinor", - "winlog.event_data.DriveName", - "winlog.event_data.DriverName", - "winlog.event_data.DriverNameLength", - "winlog.event_data.DwordVal", - "winlog.event_data.EntryCount", - "winlog.event_data.EventType", - "winlog.event_data.EventNamespace", - "winlog.event_data.ExtraInfo", - "winlog.event_data.FailureName", - "winlog.event_data.FailureNameLength", - "winlog.event_data.FileVersion", - "winlog.event_data.FinalStatus", - "winlog.event_data.GrantedAccess", - "winlog.event_data.Group", - "winlog.event_data.IdleImplementation", - "winlog.event_data.IdleStateCount", - "winlog.event_data.ImpersonationLevel", - "winlog.event_data.IntegrityLevel", - "winlog.event_data.IpAddress", - "winlog.event_data.IpPort", - "winlog.event_data.KeyLength", - "winlog.event_data.LastBootGood", - "winlog.event_data.LastShutdownGood", - "winlog.event_data.LmPackageName", - "winlog.event_data.LogonGuid", - "winlog.event_data.LogonId", - "winlog.event_data.LogonProcessName", - "winlog.event_data.LogonType", - "winlog.event_data.MajorVersion", - "winlog.event_data.MaximumPerformancePercent", - "winlog.event_data.MemberName", - "winlog.event_data.MemberSid", - "winlog.event_data.MinimumPerformancePercent", - "winlog.event_data.MinimumThrottlePercent", - "winlog.event_data.MinorVersion", - "winlog.event_data.Name", - "winlog.event_data.NewProcessId", - "winlog.event_data.NewProcessName", - "winlog.event_data.NewSchemeGuid", - "winlog.event_data.NewThreadId", - "winlog.event_data.NewTime", - "winlog.event_data.NominalFrequency", - "winlog.event_data.Number", - "winlog.event_data.OldSchemeGuid", - "winlog.event_data.OldTime", - "winlog.event_data.Operation", - "winlog.event_data.OriginalFileName", - "winlog.event_data.Path", - "winlog.event_data.PerformanceImplementation", - "winlog.event_data.PreviousCreationUtcTime", - "winlog.event_data.PreviousTime", - "winlog.event_data.PrivilegeList", - "winlog.event_data.ProcessId", - "winlog.event_data.ProcessName", - "winlog.event_data.ProcessPath", - "winlog.event_data.ProcessPid", - "winlog.event_data.Product", - "winlog.event_data.PuaCount", - "winlog.event_data.PuaPolicyId", - "winlog.event_data.QfeVersion", - "winlog.event_data.Query", - "winlog.event_data.Reason", - "winlog.event_data.SchemaVersion", - "winlog.event_data.ScriptBlockText", - "winlog.event_data.ServiceName", - "winlog.event_data.ServiceVersion", - "winlog.event_data.Session", - "winlog.event_data.ShutdownActionType", - "winlog.event_data.ShutdownEventCode", - "winlog.event_data.ShutdownReason", - "winlog.event_data.Signature", - "winlog.event_data.SignatureStatus", - "winlog.event_data.Signed", - "winlog.event_data.StartAddress", - "winlog.event_data.StartFunction", - "winlog.event_data.StartModule", - "winlog.event_data.StartTime", - "winlog.event_data.State", - "winlog.event_data.Status", - "winlog.event_data.StopTime", - "winlog.event_data.SubjectDomainName", - "winlog.event_data.SubjectLogonId", - "winlog.event_data.SubjectUserName", - "winlog.event_data.SubjectUserSid", - "winlog.event_data.TSId", - "winlog.event_data.TargetDomainName", - "winlog.event_data.TargetImage", - "winlog.event_data.TargetInfo", - "winlog.event_data.TargetLogonGuid", - "winlog.event_data.TargetLogonId", - "winlog.event_data.TargetProcessGUID", - "winlog.event_data.TargetProcessId", - "winlog.event_data.TargetServerName", - "winlog.event_data.TargetUserName", - "winlog.event_data.TargetUserSid", - "winlog.event_data.TerminalSessionId", - "winlog.event_data.TokenElevationType", - "winlog.event_data.TransmittedServices", - "winlog.event_data.Type", - "winlog.event_data.UserSid", - "winlog.event_data.Version", - "winlog.event_data.Workstation", - "winlog.event_data.param1", - "winlog.event_data.param2", - "winlog.event_data.param3", - "winlog.event_data.param4", - "winlog.event_data.param5", - "winlog.event_data.param6", - "winlog.event_data.param7", - "winlog.event_data.param8", - "winlog.event_id", - "winlog.keywords", - "winlog.channel", - "winlog.record_id", - "winlog.related_activity_id", - "winlog.opcode", - "winlog.provider_guid", - "winlog.provider_name", - "winlog.task", - "winlog.user.identifier", - "winlog.user.name", - "winlog.user.domain", - "winlog.user.type" - ] - } - } - }, - "mappings": { - "dynamic_templates": [ - { - "container.labels": { - "path_match": "container.labels.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - }, - { - "winlog.user_data": { - "path_match": "winlog.user_data.*", - "mapping": { - "type": "keyword" - }, - "match_mapping_type": "string" - } - } - ], - "properties": { - "container": { - "properties": { - "image": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "sysmon": { - "properties": { - "file": { - "properties": { - "archived": { - "type": "boolean" - }, - "is_executable": { - "type": "boolean" - } - } - }, - "dns": { - "properties": { - "status": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "log": { - "properties": { - "level": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "destination": { - "properties": { - "port": { - "type": "long" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - } - } - }, - "rule": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "source": { - "properties": { - "port": { - "type": "long" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - } - } - }, - "error": { - "properties": { - "code": { - "ignore_above": 1024, - "type": "keyword" - }, - "message": { - "type": "match_only_text" - } - } - }, - "network": { - "properties": { - "community_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "protocol": { - "ignore_above": 1024, - "type": "keyword" - }, - "transport": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "direction": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "cloud": { - "properties": { - "availability_zone": { - "ignore_above": 1024, - "type": "keyword" - }, - "image": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "instance": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "machine": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "project": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "region": { - "ignore_above": 1024, - "type": "keyword" - }, - "account": { - "properties": { - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "file": { - "properties": { - "path": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "extension": { - "ignore_above": 1024, - "type": "keyword" - }, - "code_signature": { - "properties": { - "valid": { - "type": "boolean" - }, - "trusted": { - "type": "boolean" - }, - "subject_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "exists": { - "type": "boolean" - }, - "status": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "pe": { - "properties": { - "file_version": { - "ignore_above": 1024, - "type": "keyword" - }, - "product": { - "ignore_above": 1024, - "type": "keyword" - }, - "imphash": { - "ignore_above": 1024, - "type": "keyword" - }, - "description": { - "ignore_above": 1024, - "type": "keyword" - }, - "company": { - "ignore_above": 1024, - "type": "keyword" - }, - "original_file_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "directory": { - "ignore_above": 1024, - "type": "keyword" - }, - "hash": { - "properties": { - "sha1": { - "ignore_above": 1024, - "type": "keyword" - }, - "sha256": { - "ignore_above": 1024, - "type": "keyword" - }, - "sha512": { - "ignore_above": 1024, - "type": "keyword" - }, - "md5": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "ecs": { - "properties": { - "version": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "related": { - "properties": { - "hosts": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "user": { - "ignore_above": 1024, - "type": "keyword" - }, - "hash": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "host": { - "properties": { - "hostname": { - "ignore_above": 1024, - "type": "keyword" - }, - "os": { - "properties": { - "build": { - "ignore_above": 1024, - "type": "keyword" - }, - "kernel": { - "ignore_above": 1024, - "type": "keyword" - }, - "codename": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "text" - } - } - }, - "family": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "ignore_above": 1024, - "type": "keyword" - }, - "platform": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "ip": { - "type": "ip" - }, - "containerized": { - "type": "boolean" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "mac": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "event": { - "properties": { - "sequence": { - "type": "long" - }, - "ingested": { - "type": "date" - }, - "code": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider": { - "ignore_above": 1024, - "type": "keyword" - }, - "created": { - "type": "date" - }, - "kind": { - "ignore_above": 1024, - "type": "keyword" - }, - "module": { - "type": "constant_keyword", - "value": "windows" - }, - "action": { - "ignore_above": 1024, - "type": "keyword" - }, - "category": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "dataset": { - "type": "constant_keyword", - "value": "windows.sysmon_operational" - }, - "outcome": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "group": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "registry": { - "properties": { - "hive": { - "ignore_above": 1024, - "type": "keyword" - }, - "path": { - "ignore_above": 1024, - "type": "keyword" - }, - "data": { - "properties": { - "strings": { - "ignore_above": 1024, - "type": "wildcard" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "value": { - "ignore_above": 1024, - "type": "keyword" - }, - "key": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "process": { - "properties": { - "args": { - "ignore_above": 1024, - "type": "keyword" - }, - "parent": { - "properties": { - "args": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "pid": { - "type": "long" - }, - "args_count": { - "type": "long" - }, - "entity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "command_line": { - "ignore_above": 1024, - "type": "wildcard", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "executable": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - } - } - }, - "pe": { - "properties": { - "file_version": { - "ignore_above": 1024, - "type": "keyword" - }, - "product": { - "ignore_above": 1024, - "type": "keyword" - }, - "imphash": { - "ignore_above": 1024, - "type": "keyword" - }, - "description": { - "ignore_above": 1024, - "type": "keyword" - }, - "company": { - "ignore_above": 1024, - "type": "keyword" - }, - "original_file_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "architecture": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "pid": { - "type": "long" - }, - "working_directory": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "args_count": { - "type": "long" - }, - "entity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "title": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "command_line": { - "ignore_above": 1024, - "type": "wildcard", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "executable": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "hash": { - "properties": { - "sha1": { - "ignore_above": 1024, - "type": "keyword" - }, - "sha256": { - "ignore_above": 1024, - "type": "keyword" - }, - "sha512": { - "ignore_above": 1024, - "type": "keyword" - }, - "md5": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "winlog": { - "properties": { - "related_activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "computer_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "process": { - "properties": { - "pid": { - "type": "long" - }, - "thread": { - "properties": { - "id": { - "type": "long" - } - } - } - } - }, - "keywords": { - "ignore_above": 1024, - "type": "keyword" - }, - "channel": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_data": { - "properties": { - "SignatureStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Configuration": { - "ignore_above": 1024, - "type": "keyword" - }, - "OriginalFileName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Query": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootMode": { - "ignore_above": 1024, - "type": "keyword" - }, - "Product": { - "ignore_above": 1024, - "type": "keyword" - }, - "StartAddress": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "FileVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "CallTrace": { - "ignore_above": 1024, - "type": "keyword" - }, - "StopTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Status": { - "ignore_above": 1024, - "type": "keyword" - }, - "GrantedAccess": { - "ignore_above": 1024, - "type": "keyword" - }, - "CorruptionActionState": { - "ignore_above": 1024, - "type": "keyword" - }, - "KeyLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousCreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "PerformanceImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Group": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewThreadId": { - "ignore_above": 1024, - "type": "keyword" - }, - "Description": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownActionType": { - "ignore_above": 1024, - "type": "keyword" - }, - "DwordVal": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMajor": { - "ignore_above": 1024, - "type": "keyword" - }, - "ScriptBlockText": { - "ignore_above": 1024, - "type": "keyword" - }, - "TransmittedServices": { - "ignore_above": 1024, - "type": "keyword" - }, - "MaximumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "FinalStatus": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleStateCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "MajorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "Path": { - "ignore_above": 1024, - "type": "keyword" - }, - "SchemaVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "TokenElevationType": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinorVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "IdleImplementation": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessPath": { - "ignore_above": 1024, - "type": "keyword" - }, - "QfeVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceVersionMinor": { - "ignore_above": 1024, - "type": "keyword" - }, - "Type": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpAddress": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Company": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaPolicyId": { - "ignore_above": 1024, - "type": "keyword" - }, - "EventType": { - "ignore_above": 1024, - "type": "keyword" - }, - "IntegrityLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastShutdownGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "IpPort": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "LmPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Name": { - "ignore_above": 1024, - "type": "keyword" - }, - "UserSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "LastBootGood": { - "ignore_above": 1024, - "type": "keyword" - }, - "PuaCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "Version": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetProcessGUID": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signed": { - "ignore_above": 1024, - "type": "keyword" - }, - "StartTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownEventCode": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "ServiceName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PreviousTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "State": { - "ignore_above": 1024, - "type": "keyword" - }, - "StartFunction": { - "ignore_above": 1024, - "type": "keyword" - }, - "BootType": { - "ignore_above": 1024, - "type": "keyword" - }, - "Binary": { - "ignore_above": 1024, - "type": "keyword" - }, - "ClientInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "ImpersonationLevel": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Detail": { - "ignore_above": 1024, - "type": "keyword" - }, - "TerminalSessionId": { - "ignore_above": 1024, - "type": "keyword" - }, - "MemberSid": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriverName": { - "ignore_above": 1024, - "type": "keyword" - }, - "DeviceNameLength": { - "ignore_above": 1024, - "type": "keyword" - }, - "OldSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Operation": { - "ignore_above": 1024, - "type": "keyword" - }, - "CreationUtcTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "Reason": { - "ignore_above": 1024, - "type": "keyword" - }, - "ShutdownReason": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetServerName": { - "ignore_above": 1024, - "type": "keyword" - }, - "Number": { - "ignore_above": 1024, - "type": "keyword" - }, - "BuildVersion": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetImage": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumPerformancePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonProcessName": { - "ignore_above": 1024, - "type": "keyword" - }, - "TSId": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetDomainName": { - "ignore_above": 1024, - "type": "keyword" - }, - "PrivilegeList": { - "ignore_above": 1024, - "type": "keyword" - }, - "param7": { - "ignore_above": 1024, - "type": "keyword" - }, - "param8": { - "ignore_above": 1024, - "type": "keyword" - }, - "param5": { - "ignore_above": 1024, - "type": "keyword" - }, - "param6": { - "ignore_above": 1024, - "type": "keyword" - }, - "DriveName": { - "ignore_above": 1024, - "type": "keyword" - }, - "EventNamespace": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "LogonType": { - "ignore_above": 1024, - "type": "keyword" - }, - "ExtraInfo": { - "ignore_above": 1024, - "type": "keyword" - }, - "StartModule": { - "ignore_above": 1024, - "type": "keyword" - }, - "param3": { - "ignore_above": 1024, - "type": "keyword" - }, - "param4": { - "ignore_above": 1024, - "type": "keyword" - }, - "param1": { - "ignore_above": 1024, - "type": "keyword" - }, - "param2": { - "ignore_above": 1024, - "type": "keyword" - }, - "TargetLogonId": { - "ignore_above": 1024, - "type": "keyword" - }, - "Workstation": { - "ignore_above": 1024, - "type": "keyword" - }, - "SubjectUserName": { - "ignore_above": 1024, - "type": "keyword" - }, - "FailureName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NewSchemeGuid": { - "ignore_above": 1024, - "type": "keyword" - }, - "Signature": { - "ignore_above": 1024, - "type": "keyword" - }, - "MinimumThrottlePercent": { - "ignore_above": 1024, - "type": "keyword" - }, - "ProcessId": { - "ignore_above": 1024, - "type": "keyword" - }, - "EntryCount": { - "ignore_above": 1024, - "type": "keyword" - }, - "BitlockerUserInputTime": { - "ignore_above": 1024, - "type": "keyword" - }, - "AuthenticationPackageName": { - "ignore_above": 1024, - "type": "keyword" - }, - "NominalFrequency": { - "ignore_above": 1024, - "type": "keyword" - }, - "Session": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "opcode": { - "ignore_above": 1024, - "type": "keyword" - }, - "version": { - "type": "long" - }, - "record_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "event_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "task": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_guid": { - "ignore_above": 1024, - "type": "keyword" - }, - "activity_id": { - "ignore_above": 1024, - "type": "keyword" - }, - "api": { - "ignore_above": 1024, - "type": "keyword" - }, - "provider_name": { - "ignore_above": 1024, - "type": "keyword" - }, - "user": { - "properties": { - "identifier": { - "ignore_above": 1024, - "type": "keyword" - }, - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - }, - "dns": { - "properties": { - "op_code": { - "ignore_above": 1024, - "type": "keyword" - }, - "resolved_ip": { - "type": "ip" - }, - "response_code": { - "ignore_above": 1024, - "type": "keyword" - }, - "question": { - "properties": { - "registered_domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "top_level_domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "subdomain": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "class": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "answers": { - "properties": { - "data": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - }, - "class": { - "ignore_above": 1024, - "type": "keyword" - }, - "ttl": { - "type": "long" - } - } - }, - "header_flags": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "message": { - "type": "match_only_text" - }, - "tags": { - "ignore_above": 1024, - "type": "keyword" - }, - "input": { - "properties": { - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "@timestamp": { - "type": "date" - }, - "data_stream": { - "properties": { - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - }, - "dataset": { - "type": "constant_keyword" - } - } - }, - "service": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "type": { - "ignore_above": 1024, - "type": "keyword" - } - } - }, - "dataset": { - "properties": { - "name": { - "type": "constant_keyword" - }, - "namespace": { - "type": "constant_keyword" - }, - "type": { - "type": "constant_keyword" - } - } - }, - "user": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - }, - "target": { - "properties": { - "name": { - "ignore_above": 1024, - "type": "keyword", - "fields": { - "text": { - "type": "match_only_text" - } - } - }, - "group": { - "properties": { - "domain": { - "ignore_above": 1024, - "type": "keyword" - }, - "name": { - "ignore_above": 1024, - "type": "keyword" - }, - "id": { - "ignore_above": 1024, - "type": "keyword" - } - } - } - } - } - } - } - } - } - }, - "_meta": { - "package": { - "name": "windows" - }, - "managed_by": "fleet", - "managed": true - } - } From b66be9c22640b3809a86134d01cbd42edcf4917a Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 5 Sep 2023 12:46:49 -0400 Subject: [PATCH 077/417] only ingest pfsense on sensor nodes --- salt/common/tools/sbin/so-test | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-test b/salt/common/tools/sbin/so-test index 1758a44bb..01b4da637 100755 --- a/salt/common/tools/sbin/so-test +++ b/salt/common/tools/sbin/so-test @@ -5,10 +5,14 @@ # https://securityonion.net/license; you may not use this file except in compliance with the # Elastic License 2.0. +. /usr/sbin/so-common + set -e # Playback live sample data onto monitor interface so-tcpreplay /opt/samples/* 2> /dev/null # Ingest sample pfsense log entry -echo "<134>$(date '+%b %d %H:%M:%S') filterlog[31624]: 84,,,1567509287,igb0.244,match,pass,in,4,0x0,,64,0,0,DF,6,tcp,64,192.168.1.1,10.10.10.10,56320,443,0,S,3333585167,,65535,,mss;nop;wscale;nop;nop;TS;sackOK;eol" | nc -uv -w1 127.0.0.1 514 > /dev/null 2>&1 +if is_sensor_node; then + echo "<134>$(date '+%b %d %H:%M:%S') filterlog[31624]: 84,,,1567509287,igb0.244,match,pass,in,4,0x0,,64,0,0,DF,6,tcp,64,192.168.1.1,10.10.10.10,56320,443,0,S,3333585167,,65535,,mss;nop;wscale;nop;nop;TS;sackOK;eol" | nc -uv -w1 127.0.0.1 514 > /dev/null 2>&1 +fi From ffaab4a1b47d7949ab6ff061d97d0b59f95ad049 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 6 Sep 2023 14:19:53 -0400 Subject: [PATCH 078/417] only add endgame to action if it is populated --- salt/soc/merged.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index dc2f889bb..c17c23b25 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -35,7 +35,7 @@ {% endif %} {% set standard_actions = SOCMERGED.config.pop('actions') %} -{% if pillar.global.endgamehost is defined %} +{% if pillar.global.endgamehost != '' %} {% set endgame_dict = { "name": "Endgame", "description": "Endgame Endpoint Investigation and Response", From 60f1947eb4b4f5a6d2b5c43507164246a92e63cd Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 7 Sep 2023 14:01:19 -0400 Subject: [PATCH 079/417] prevent endgame_dict from being added to standard_actions if it is already present --- salt/soc/merged.map.jinja | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index c17c23b25..052ff9941 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -35,7 +35,17 @@ {% endif %} {% set standard_actions = SOCMERGED.config.pop('actions') %} + {% if pillar.global.endgamehost != '' %} +{# this is added to prevent endgame_dict from being added to standard_actions for each time this file is rendered #} +{% set endgame = namespace(add=true) %} +{% for d in standard_actions %} +{% if d.name is defined %} +{% if d.name == 'Endgame' %} +{% set endgame.add = false %} +{% endif %} +{% endif %} +{% endfor %} {% set endgame_dict = { "name": "Endgame", "description": "Endgame Endpoint Investigation and Response", @@ -44,7 +54,9 @@ "links": ["https://" ~ pillar.global.endgamehost ~ "/endpoints/{:agent.id}"] } %} -{% do standard_actions.append(endgame_dict) %} +{% if endgame.add %} +{% do standard_actions.append(endgame_dict) %} +{% endif %} {% endif %} {% do SOCMERGED.config.server.client.hunt.update({'actions': standard_actions}) %} From 35157f2e8b27c313235a4cbd95fa4e0bb77ea12f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 7 Sep 2023 15:46:04 -0400 Subject: [PATCH 080/417] add comment --- salt/soc/merged.map.jinja | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/soc/merged.map.jinja b/salt/soc/merged.map.jinja index 052ff9941..33c0070ad 100644 --- a/salt/soc/merged.map.jinja +++ b/salt/soc/merged.map.jinja @@ -38,6 +38,7 @@ {% if pillar.global.endgamehost != '' %} {# this is added to prevent endgame_dict from being added to standard_actions for each time this file is rendered #} +{# since this map file is rendered 3 times, it causes endgame_dict to appened 3 times if custom actions are defined in the pillar #} {% set endgame = namespace(add=true) %} {% for d in standard_actions %} {% if d.name is defined %} From f8ae3f12e65aeb6a5efa851b6a55f81adeab94df Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 7 Sep 2023 17:22:10 -0400 Subject: [PATCH 081/417] addl node types --- setup/so-setup | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/setup/so-setup b/setup/so-setup index c1d92ec62..030afdf47 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -194,6 +194,18 @@ if [ -n "$test_profile" ]; then install_type=DESKTOP MSRVIP_OFFSET=-3 is_desktop_grid=true + elif [[ "$test_profile" =~ "-idh" ]]; then + install_type=IDH + HOSTNAME=idh + MSRVIP_OFFSET=-4 + elif [[ "$test_profile" =~ "-receiver" ]]; then + install_type=RECEIVER + HOSTNAME=receiver + MSRVIP_OFFSET=-5 + elif [[ "$test_profile" =~ "-fleet" ]]; then + install_type=FLEET + HOSTNAME=fleet + MSRVIP_OFFSET=-6 else HOSTNAME=manager fi From 598515e5b447770bb9cee1dae8d8e7974ff08112 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 8 Sep 2023 09:21:13 -0400 Subject: [PATCH 082/417] give priority to presets --- setup/so-functions | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 5015b4bff..5d6ada340 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -398,20 +398,22 @@ collect_mngr_hostname() { sed -i "/$MSRV/d" /etc/hosts fi - if ! getent hosts "$MSRV"; then - whiptail_manager_ip + if [[ -z "$MSRVIP" ]]; then + if ! getent hosts "$MSRV"; then + whiptail_manager_ip - while ! valid_ip4 "$MSRVIP" || [[ $MSRVIP == "$MAINIP" || $MSRVIP == "127.0.0.1" ]]; do - whiptail_invalid_input + while ! valid_ip4 "$MSRVIP" || [[ $MSRVIP == "$MAINIP" || $MSRVIP == "127.0.0.1" ]]; do + whiptail_invalid_input + whiptail_manager_ip "$MSRVIP" + done + else + MSRVIP=$(getent hosts "$MSRV" | awk 'NR==1{print $1}') whiptail_manager_ip "$MSRVIP" - done - else - MSRVIP=$(getent hosts "$MSRV" | awk 'NR==1{print $1}') - whiptail_manager_ip "$MSRVIP" - while ! valid_ip4 "$MSRVIP" || [[ $MSRVIP == "$MAINIP" || $MSRVIP == "127.0.0.1" ]]; do - whiptail_invalid_input - whiptail_manager_ip "$MSRVIP" - done + while ! valid_ip4 "$MSRVIP" || [[ $MSRVIP == "$MAINIP" || $MSRVIP == "127.0.0.1" ]]; do + whiptail_invalid_input + whiptail_manager_ip "$MSRVIP" + done + fi fi } From e814a3409f4dc2ac56fcdc5c32d79c6231dff1c7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 8 Sep 2023 15:28:24 -0400 Subject: [PATCH 083/417] fix rule location for rulecat.conf. run so-rule-update if rules change in /opt/so/rules/nids --- salt/idstools/enabled.sls | 1 + salt/idstools/etc/rulecat.conf | 4 ++-- salt/idstools/sorules/extraction.rules | 26 -------------------------- salt/idstools/sorules/filters.rules | 11 ----------- 4 files changed, 3 insertions(+), 39 deletions(-) delete mode 100644 salt/idstools/sorules/extraction.rules delete mode 100644 salt/idstools/sorules/filters.rules diff --git a/salt/idstools/enabled.sls b/salt/idstools/enabled.sls index 3f5acda19..31afc5113 100644 --- a/salt/idstools/enabled.sls +++ b/salt/idstools/enabled.sls @@ -77,6 +77,7 @@ run_so-rule-update: - docker_container: so-idstools - onchanges: - file: idstoolsetcsync + - file: synclocalnidsrules - order: last {% else %} diff --git a/salt/idstools/etc/rulecat.conf b/salt/idstools/etc/rulecat.conf index 8be3aa1ce..d6f3d93d8 100644 --- a/salt/idstools/etc/rulecat.conf +++ b/salt/idstools/etc/rulecat.conf @@ -3,8 +3,8 @@ --merged=/opt/so/rules/nids/all.rules --local=/opt/so/rules/nids/local.rules {%- if GLOBALS.md_engine == "SURICATA" %} ---local=/opt/so/rules/nids/sorules/extraction.rules ---local=/opt/so/rules/nids/sorules/filters.rules +--local=/opt/so/rules/nids/extraction.rules +--local=/opt/so/rules/nids/filters.rules {%- endif %} --url=http://{{ GLOBALS.manager }}:7788/suricata/emerging-all.rules --disable=/opt/so/idstools/etc/disable.conf diff --git a/salt/idstools/sorules/extraction.rules b/salt/idstools/sorules/extraction.rules deleted file mode 100644 index bccfc69d6..000000000 --- a/salt/idstools/sorules/extraction.rules +++ /dev/null @@ -1,26 +0,0 @@ -# Extract all PDF mime type -alert http any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100000; rev:1;) -alert smtp any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100001; rev:1;) -alert nfs any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100002; rev:1;) -alert smb any any -> any any (msg:"FILE pdf detected"; filemagic:"PDF document"; filestore; sid:1100003; rev:1;) -# Extract EXE/DLL file types -alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100004; rev:1;) -alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100005; rev:1;) -alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100006; rev:1;) -alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"PE32 executable"; filestore; sid:1100007; rev:1;) -alert http any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100008; rev:1;) -alert smtp any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100009; rev:1;) -alert nfs any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100010; rev:1;) -alert smb any any -> any any (msg:"FILE EXE detected"; filemagic:"MS-DOS executable"; filestore; sid:1100011; rev:1;) - -# Extract all Zip files -alert http any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100012; rev:1;) -alert smtp any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100013; rev:1;) -alert nfs any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100014; rev:1;) -alert smb any any -> any any (msg:"FILE ZIP detected"; filemagic:"Zip"; filestore; sid:1100015; rev:1;) - -# Extract Word Docs -alert http any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100016; rev:1;) -alert smtp any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100017; rev:1;) -alert nfs any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100018; rev:1;) -alert smb any any -> any any (msg:"FILE WORDDOC detected"; filemagic:"Composite Document File V2 Document"; filestore; sid:1100019; rev:1;) \ No newline at end of file diff --git a/salt/idstools/sorules/filters.rules b/salt/idstools/sorules/filters.rules deleted file mode 100644 index 051d1913f..000000000 --- a/salt/idstools/sorules/filters.rules +++ /dev/null @@ -1,11 +0,0 @@ -# Start the filters at sid 1200000 -# Example of filtering out *google.com from being in the dns log. -#config dns any any -> any any (dns.query; content:"google.com"; config: logging disable, type tx, scope tx; sid:1200000;) -# Example of filtering out *google.com from being in the http log. -#config http any any -> any any (http.host; content:"google.com"; config: logging disable, type tx, scope tx; sid:1200001;) -# Example of filtering out someuseragent from being in the http log. -#config http any any -> any any (http.user_agent; content:"someuseragent"; config: logging disable, type tx, scope tx; sid:1200002;) -# Example of filtering out Google's certificate from being in the ssl log. -#config tls any any -> any any (tls.fingerprint; content:"4f:a4:5e:58:7e:d9:db:20:09:d7:b6:c7:ff:58:c4:7b:dc:3f:55:b4"; config: logging disable, type tx, scope tx; sid:1200003;) -# Example of filtering out a md5 of a file from being in the files log. -#config fileinfo any any -> any any (fileinfo.filemd5; content:"7a125dc69c82d5caf94d3913eecde4b5"; config: logging disable, type tx, scope tx; sid:1200004;) From f1d0db81714941b6652337638291cac55124e8e8 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 11 Sep 2023 13:30:11 +0000 Subject: [PATCH 084/417] /app to /kibana/app --- salt/nginx/etc/nginx.conf | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index 05da0b5d8..b2616e946 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -230,7 +230,19 @@ http { proxy_cookie_path /api/ /influxdb/api/; } - location /kibana/ { + location /app/ { + auth_request /auth/sessions/whoami; + rewrite /app/(.*) /app/$1 break; + proxy_pass http://{{ GLOBALS.manager }}:5601/app/; + proxy_read_timeout 300; + proxy_connect_timeout 300; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + + location /kibana/ { auth_request /auth/sessions/whoami; rewrite /kibana/(.*) /$1 break; proxy_pass http://{{ GLOBALS.manager }}:5601/; From 35ebbc974c370ac0ac4a4f0d8c8403d677656c85 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 11 Sep 2023 13:52:16 +0000 Subject: [PATCH 085/417] Change description to indicate that opencanary modules only apply to IDH nodes --- salt/idh/soc_idh.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/idh/soc_idh.yaml b/salt/idh/soc_idh.yaml index f792812e4..1d6918405 100644 --- a/salt/idh/soc_idh.yaml +++ b/salt/idh/soc_idh.yaml @@ -23,7 +23,7 @@ idh: class: *loggingOptions filename: *loggingOptions portscan_x_enabled: &serviceOptions - description: To enable this opencanary module, set this value to true. To disable set to false. + description: To enable this opencanary module, set this value to true. To disable set to false. This option only applies to IDH nodes within your grid. helpLink: idh.html portscan_x_logfile: *loggingOptions portscan_x_synrate: From 30c3255cb28f5d62dc9fcae4186a7a2b4554faf7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 12 Sep 2023 08:39:42 -0400 Subject: [PATCH 086/417] dont manage sorules --- salt/idstools/sync_files.sls | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/salt/idstools/sync_files.sls b/salt/idstools/sync_files.sls index e8d5edda6..64479e937 100644 --- a/salt/idstools/sync_files.sls +++ b/salt/idstools/sync_files.sls @@ -26,13 +26,6 @@ rulesdir: - group: 939 - makedirs: True -SOrulesdir: - file.directory: - - name: /opt/so/rules/nids/sorules - - user: 939 - - group: 939 - - makedirs: True - # Don't show changes because all.rules can be large synclocalnidsrules: file.recurse: @@ -42,13 +35,3 @@ synclocalnidsrules: - group: 939 - show_changes: False - include_pat: 'E@.rules' - -# Don't show changes because all.rules can be large -syncnidsSOrules: - file.recurse: - - name: /opt/so/rules/nids/sorules - - source: salt://idstools/sorules/ - - user: 939 - - group: 939 - - show_changes: False - - include_pat: 'E@.rules' From 11b8e1341885118bd22ccee21d5c6e1776f474f8 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 13 Sep 2023 07:37:54 -0400 Subject: [PATCH 087/417] FIX: SOC Config pcap doc links should point to steno docs #11302 --- salt/pcap/soc_pcap.yaml | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/salt/pcap/soc_pcap.yaml b/salt/pcap/soc_pcap.yaml index 0f4b7e1e4..32204a23a 100644 --- a/salt/pcap/soc_pcap.yaml +++ b/salt/pcap/soc_pcap.yaml @@ -1,35 +1,35 @@ pcap: enabled: description: You can enable or disable Stenographer on all sensors or a single sensor. - helpLink: pcap.html + helpLink: stenographer.html config: maxdirectoryfiles: description: The maximum number of packet/index files to create before deleting old files. - helpLink: pcap.html + helpLink: stenographer.html diskfreepercentage: description: The disk space percent to always keep free for PCAP - helpLink: pcap.html + helpLink: stenographer.html blocks: description: The number of 1MB packet blocks used by AF_PACKET to store packets in memory, per thread. You shouldn't need to change this. advanced: True - helpLink: pcap.html + helpLink: stenographer.html preallocate_file_mb: description: File size to pre-allocate for individual PCAP files. You shouldn't need to change this. advanced: True - helpLink: pcap.html + helpLink: stenographer.html aiops: description: The max number of async writes to allow at once. advanced: True - helpLink: pcap.html + helpLink: stenographer.html pin_to_cpu: description: Enable CPU pinning for PCAP. advanced: True - helpLink: pcap.html + helpLink: stenographer.html cpus_to_pin_to: description: CPU to pin PCAP to. Currently only a single CPU is supported. advanced: True - helpLink: pcap.html + helpLink: stenographer.html disks: description: List of disks to use for PCAP. This is currently not used. advanced: True - helpLink: pcap.html + helpLink: stenographer.html From 22c0323bdae337f5cb4431ff6a1f54a0ddcc9f81 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 13 Sep 2023 10:57:45 -0400 Subject: [PATCH 088/417] Update so-minion --- salt/manager/tools/sbin/so-minion | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 075632985..01a58585f 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -239,6 +239,10 @@ function add_sensor_to_minion() { echo " threads: '$CORECOUNT'" >> $PILLARFILE echo "pcap:" >> $PILLARFILE echo " enabled: True" >> $PILLARFILE + if [[ $is_pcaplimit ]]; then + echo " config:" >> $PILLARFILE + echo " diskfreepercentage: 40" >> $PILLARFILE + fi echo " " >> $PILLARFILE } @@ -409,6 +413,7 @@ function apply_ES_state() { salt-call state.apply elasticsearch concurrent=True } function createEVAL() { + is_pcaplimit=true add_elasticsearch_to_minion add_sensor_to_minion add_strelka_to_minion @@ -429,6 +434,7 @@ function createEVAL() { } function createSTANDALONE() { + is_pcaplimit=true add_elasticsearch_to_minion add_logstash_to_minion add_sensor_to_minion @@ -520,8 +526,9 @@ function createIDH() { } function createHEAVYNODE() { + is_pcaplimit=true add_elasticsearch_to_minion - add_elastic_agent_to_minion + add_elastic_agent_to_minion add_logstash_to_minion add_sensor_to_minion add_strelka_to_minion From 33d68478b6678e6707061b4f8e2755f2b705b0a6 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 13 Sep 2023 11:48:16 -0400 Subject: [PATCH 089/417] Update so-minion --- salt/manager/tools/sbin/so-minion | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 01a58585f..64084dbd0 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -241,7 +241,7 @@ function add_sensor_to_minion() { echo " enabled: True" >> $PILLARFILE if [[ $is_pcaplimit ]]; then echo " config:" >> $PILLARFILE - echo " diskfreepercentage: 40" >> $PILLARFILE + echo " diskfreepercentage: 60" >> $PILLARFILE fi echo " " >> $PILLARFILE } From e067b7134e3ed344af40eabfe6a281567705e6e7 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 14 Sep 2023 07:38:07 -0400 Subject: [PATCH 090/417] exclude docker pull unauth errors from failing setup since they'll be retried --- setup/so-verify | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-verify b/setup/so-verify index 07d24d114..e907e8bdc 100755 --- a/setup/so-verify +++ b/setup/so-verify @@ -52,6 +52,7 @@ log_has_errors() { grep -vE "/nsm/rules/yara*" | \ grep -vE "Failed to restart snapd" | \ grep -vE "Login Failed Details" | \ + grep -vE "response from daemon: unauthorized" | \ grep -vE "Running scope as unit" &> "$error_log" if [[ $? -eq 0 ]]; then From 59d077f3ffb94df96c183b817786b738ff5d0432 Mon Sep 17 00:00:00 2001 From: defensivedepth Date: Thu, 14 Sep 2023 08:32:17 -0400 Subject: [PATCH 091/417] Fix regex --- .../tools/sbin_jinja/so-elastic-agent-gen-installers | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers index c935521fd..275bc6a11 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers @@ -46,7 +46,7 @@ do done printf "\n### Stripping out unused components" -find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -regex '.*fleet.*\|.*packet.*\|.*apm*.*\|.*audit.*\|.*heart.*\|.*cloud.*' -delete +find /nsm/elastic-agent-workspace/elastic-agent-*/data/elastic-agent-*/components -maxdepth 1 -regex '.*fleet.*\|.*packet.*\|.*apm.*\|.*audit.*\|.*heart.*\|.*cloud.*' -delete printf "\n### Tarring everything up again" for OS in "${OSARCH[@]}" From 0c11a9b7337c523cf24396cdd264cdcf69c6d979 Mon Sep 17 00:00:00 2001 From: defensivedepth Date: Thu, 14 Sep 2023 09:33:17 -0400 Subject: [PATCH 092/417] Add transform role --- salt/elasticsearch/config.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/config.map.jinja b/salt/elasticsearch/config.map.jinja index ed4a5033f..37447cabb 100644 --- a/salt/elasticsearch/config.map.jinja +++ b/salt/elasticsearch/config.map.jinja @@ -21,7 +21,7 @@ {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %} {% endfor %} {% if grains.id.split('_') | last == 'manager' %} - {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master','data','remote_cluster_client']}) %} + {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master','data','remote_cluster_client','transform']}) %} {% else %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master', 'data_hot', 'remote_cluster_client']}) %} {% endif %} From c65c9777bdb8590faf056039f7602e2254249137 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 14 Sep 2023 17:42:25 -0400 Subject: [PATCH 093/417] improvents for checking system requirements --- setup/so-functions | 58 ++++++++++++++++++++++++++++++++++------------ setup/so-setup | 22 +++++++++--------- setup/so-variables | 8 +++---- setup/so-whiptail | 17 +------------- 4 files changed, 59 insertions(+), 46 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 5d6ada340..3707e3141 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -707,8 +707,6 @@ checkin_at_boot() { } check_requirements() { - local standalone_or_dist=$1 - local node_type=$2 # optional local req_mem local req_cores local req_storage @@ -716,27 +714,57 @@ check_requirements() { readarray -t nic_list <<< "$(ip link| awk -F: '$0 !~ "lo|vir|veth|br|docker|wl|^[^0-9]"{print $2}' | grep -vwe "bond0" | sed 's/ //g' | sed -r 's/(.*)(\.[0-9]+)@\1/\1\2/g')" local num_nics=${#nic_list[@]} - if [[ "$standalone_or_dist" == 'standalone' ]]; then + if [[ $is_eval ]]; then req_mem=12 req_cores=4 req_nics=2 - elif [[ "$standalone_or_dist" == 'dist' ]]; then - req_mem=8 + elif [[ $is_standalone ]]; then + req_mem=24 req_cores=4 - if [[ "$node_type" == 'sensor' ]]; then req_nics=2; else req_nics=1; fi - if [[ "$node_type" == 'fleet' ]]; then req_mem=4; fi - if [[ "$node_type" == 'idh' ]]; then req_mem=1 req_cores=2; fi - elif [[ "$standalone_or_dist" == 'import' ]]; then + req_nics=2 + elif [[ $is_manager ]]; then + req_mem=16 + req_cores=4 + req_nics=1 + elif [[ $is_managersearch ]]; then + req_mem=16 + req_cores=8 + req_nics=1 + elif [[ $is_sensor ]]; then + req_mem=12 + req_cores=4 + req_nics=2 + elif [[ $is_fleet ]]; then req_mem=4 + req_cores=4 + req_nics=1 + elif [[ $is_searchnode ]]; then + req_mem=16 + req_cores=4 + req_nics=1 + elif [[ $is_heavynode ]]; then + req_mem=24 + req_cores=4 + req_nics=2 + elif [[ $is_idh ]]; then + req_mem=1 + req_cores=2 + req_nics=1 + elif [[ $is_import ]]; then + req_mem=4 + req_cores=2 + req_nics=1 + elif [[ $is_receiver ]]; then + req_mem=8 req_cores=2 req_nics=1 fi if [[ $setup_type == 'network' ]] ; then - if [[ -n $nsm_mount ]]; then - if [[ "$standalone_or_dist" == 'import' ]]; then + if [[ -n $nsm_mount ]]; then # does a /nsm mount exist + if [[ $is_import ]]; then req_storage=50 - elif [[ "$node_type" == 'idh' ]]; then + elif [[ $is_idh ]]; then req_storage=12 else req_storage=100 @@ -748,10 +776,10 @@ check_requirements() { whiptail_storage_requirements "/nsm" "${free_space_nsm} GB" "${req_storage} GB" fi else - if [[ "$standalone_or_dist" == 'import' ]]; then + if [[ $is_import ]]; then req_storage=50 - elif [[ "$node_type" == 'idh' ]]; then - req_storage=12 + elif [[ $is_idh ]]; then + req_storage=12 else req_storage=200 fi diff --git a/setup/so-setup b/setup/so-setup index 030afdf47..e35dde579 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -422,7 +422,7 @@ if ! [[ -f $install_opt_file ]]; then # If it is an install from ISO is this airgap? [[ $is_iso ]] && whiptail_airgap # Make sure minimum requirements are met - check_requirements "manager" + check_requirements # Do networking things networking_needful # Do we need a proxy? @@ -453,7 +453,7 @@ if ! [[ -f $install_opt_file ]]; then monints=true check_elastic_license [[ $is_iso ]] && whiptail_airgap - check_requirements "manager" + check_requirements networking_needful [[ ! $is_airgap ]] && collect_net_method collect_dockernet @@ -474,7 +474,7 @@ if ! [[ -f $install_opt_file ]]; then check_elastic_license waitforstate=true [[ $is_iso ]] && whiptail_airgap - check_requirements "manager" + check_requirements networking_needful [[ ! $is_airgap ]] && collect_net_method collect_dockernet @@ -494,7 +494,7 @@ if ! [[ -f $install_opt_file ]]; then check_elastic_license waitforstate=true [[ $is_iso ]] && whiptail_airgap - check_requirements "manager" + check_requirements networking_needful [[ ! $is_airgap ]] && collect_net_method collect_dockernet @@ -512,7 +512,7 @@ if ! [[ -f $install_opt_file ]]; then elif [[ $is_sensor ]]; then info "Setting up as node type sensor" monints=true - check_requirements "sensor" + check_requirements calculate_useable_cores networking_needful check_network_manager_conf @@ -527,7 +527,7 @@ if ! [[ -f $install_opt_file ]]; then elif [[ $is_fleet ]]; then info "Setting up as node type fleet" - check_requirements "fleet" + check_requirements networking_needful check_network_manager_conf set_network_dev_status_list @@ -540,7 +540,7 @@ if ! [[ -f $install_opt_file ]]; then elif [[ $is_searchnode ]]; then info "Setting up as node type searchnode" - check_requirements "elasticsearch" + check_requirements networking_needful check_network_manager_conf set_network_dev_status_list @@ -554,7 +554,7 @@ if ! [[ -f $install_opt_file ]]; then elif [[ $is_heavynode ]]; then info "Setting up as node type heavynode" monints=true - check_requirements "heavynode" + check_requirements calculate_useable_cores networking_needful check_network_manager_conf @@ -569,7 +569,7 @@ if ! [[ -f $install_opt_file ]]; then elif [[ $is_idh ]]; then info "Setting up as node type idh" - check_requirements "idh" + check_requirements networking_needful collect_mngr_hostname add_mngr_ip_to_hosts @@ -583,7 +583,7 @@ if ! [[ -f $install_opt_file ]]; then waitforstate=true [[ $is_iso ]] && whiptail_airgap check_elastic_license - check_requirements "import" + check_requirements networking_needful [[ ! $is_airgap ]] && detect_cloud collect_dockernet @@ -601,7 +601,7 @@ if ! [[ -f $install_opt_file ]]; then elif [[ $is_receiver ]]; then info "Setting up as node type receiver" - check_requirements "receiver" + check_requirements networking_needful collect_mngr_hostname add_mngr_ip_to_hosts diff --git a/setup/so-variables b/setup/so-variables index 7c5e51c6c..7f6522487 100644 --- a/setup/so-variables +++ b/setup/so-variables @@ -5,7 +5,7 @@ mkdir -p /nsm total_mem=$(grep MemTotal /proc/meminfo | awk '{print $2}' | sed -r 's/.{3}$//') export total_mem -total_mem_hr=$(grep MemTotal /proc/meminfo | awk '{ printf("%.0f", $2/1024/1024); }') +total_mem_hr=$(grep MemTotal /proc/meminfo | awk '{ printf("%.0f", $2/1000/1000); }') export total_mem_hr num_cpu_cores=$(nproc) @@ -32,10 +32,10 @@ export filesystem_root filesystem_nsm=$(df /nsm | awk '$3 ~ /[0-9]+/ { print $2 * 1000 }') export filesystem_nsm -free_space_nsm=$(df -Pk /nsm | sed 1d | grep -v used | awk '{ print $4 / 1048576 }' | awk '{ printf("%.0f", $1) }') +free_space_nsm=$(df -Pk /nsm | sed 1d | grep -v used | awk '{ print $4 / 1042803 }' | awk '{ printf("%.0f", $1) }') export free_space_nsm -free_space_root=$(df -Pk / | sed 1d | grep -v used | awk '{ print $4 / 1048576 }' | awk '{ printf("%.0f", $1) }') +free_space_root=$(df -Pk / | sed 1d | grep -v used | awk '{ print $4 / 1042803 }' | awk '{ printf("%.0f", $1) }') export free_space_root readarray -t mountpoints <<< "$(lsblk -nlo MOUNTPOINT)" @@ -218,4 +218,4 @@ patch_pillar_file="$local_salt_dir/pillar/patch/soc_patch.sls" export patch_pillar_file adv_patch_pillar_file="$local_salt_dir/pillar/patch/adv_patch.sls" -export adv_patch_pillar_file \ No newline at end of file +export adv_patch_pillar_file diff --git a/setup/so-whiptail b/setup/so-whiptail index 9622ad44a..ede138d26 100755 --- a/setup/so-whiptail +++ b/setup/so-whiptail @@ -232,7 +232,7 @@ whiptail_requirements_error() { [ -n "$TESTING" ] && return - if [[ $(echo "$requirement_needed" | tr '[:upper:]' '[:lower:]') == 'nics' ]]; then + if [[ $(echo "$requirement_needed" | tr '[:upper:]' '[:lower:]') =~ 'nic' ]]; then whiptail --title "$whiptail_title" \ --msgbox "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Select OK to exit setup and reconfigure the machine." 10 75 @@ -1184,21 +1184,6 @@ whiptail_reinstall() { whiptail_check_exitstatus $exitstatus } -whiptail_requirements_error() { - - local requirement_needed=$1 - local current_val=$2 - local needed_val=$3 - - [ -n "$TESTING" ] && return - - whiptail --title "$whiptail_title" \ - --yesno "This machine currently has $current_val $requirement_needed, but needs $needed_val to meet minimum requirements. Select YES to continue anyway, or select NO to cancel." 10 75 - - local exitstatus=$? - whiptail_check_exitstatus $exitstatus -} - whiptail_sensor_config() { [ -n "$TESTING" ] && return From 98499c3963e564af5694d11f736ca93877e930c6 Mon Sep 17 00:00:00 2001 From: Wes Date: Fri, 15 Sep 2023 13:51:46 +0000 Subject: [PATCH 094/417] Clean component template directory --- salt/elasticsearch/enabled.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/enabled.sls b/salt/elasticsearch/enabled.sls index 8baff4901..fa0f824b4 100644 --- a/salt/elasticsearch/enabled.sls +++ b/salt/elasticsearch/enabled.sls @@ -108,6 +108,7 @@ escomponenttemplates: - source: salt://elasticsearch/templates/component - user: 930 - group: 939 + - clean: True - onchanges_in: - cmd: so-elasticsearch-templates From f9cbde10a6787d5eefa11696d85cdc35b1d5e3d3 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 18 Sep 2023 11:19:21 -0400 Subject: [PATCH 095/417] avoid volume sprawl --- salt/influxdb/config.sls | 8 ++++++++ salt/influxdb/enabled.sls | 1 + salt/playbook/config.sls | 8 ++++++++ salt/playbook/enabled.sls | 1 + salt/redis/config.sls | 7 +++++++ salt/redis/enabled.sls | 1 + 6 files changed, 26 insertions(+) diff --git a/salt/influxdb/config.sls b/salt/influxdb/config.sls index 54e20b713..3520e46b3 100644 --- a/salt/influxdb/config.sls +++ b/salt/influxdb/config.sls @@ -25,6 +25,14 @@ influxlogdir: - group: 939 - makedirs: True +influxlogdir: + file.directory: + - name: /opt/so/conf/influxdb/etc + - dir_mode: 750 + - user: 939 + - group: 939 + - makedirs: True + influxdbdir: file.directory: - name: /nsm/influxdb diff --git a/salt/influxdb/enabled.sls b/salt/influxdb/enabled.sls index 70f4c404f..c0733c12c 100644 --- a/salt/influxdb/enabled.sls +++ b/salt/influxdb/enabled.sls @@ -38,6 +38,7 @@ so-influxdb: - binds: - /opt/so/log/influxdb/:/log:rw - /opt/so/conf/influxdb/config.yaml:/conf/config.yaml:ro + - /opt/so/conf/influxdb/etc:/etc/influxdb2:rw - /nsm/influxdb:/var/lib/influxdb2:rw - /etc/pki/influxdb.crt:/conf/influxdb.crt:ro - /etc/pki/influxdb.key:/conf/influxdb.key:ro diff --git a/salt/playbook/config.sls b/salt/playbook/config.sls index 7d37f8873..f4c2cf137 100644 --- a/salt/playbook/config.sls +++ b/salt/playbook/config.sls @@ -91,6 +91,14 @@ playbooklogdir: - group: 939 - makedirs: True +playbookfilesdir: + file.directory: + - name: /opt/so/conf/playbook/redmine-files + - dir_mode: 775 + - user: 939 + - group: 939 + - makedirs: True + {% if 'idh' in salt['cmd.shell']("ls /opt/so/saltstack/local/pillar/minions/|awk -F'_' {'print $2'}|awk -F'.' {'print $1'}").split() %} idh-plays: file.recurse: diff --git a/salt/playbook/enabled.sls b/salt/playbook/enabled.sls index 434cb18e4..e70fec693 100644 --- a/salt/playbook/enabled.sls +++ b/salt/playbook/enabled.sls @@ -33,6 +33,7 @@ so-playbook: - sobridge: - ipv4_address: {{ DOCKER.containers['so-playbook'].ip }} - binds: + - /opt/so/conf/playbook/redmine-files:/usr/src/redmine/files:rw - /opt/so/log/playbook:/playbook/log:rw {% if DOCKER.containers['so-playbook'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-playbook'].custom_bind_mounts %} diff --git a/salt/redis/config.sls b/salt/redis/config.sls index d698040f8..053d46707 100644 --- a/salt/redis/config.sls +++ b/salt/redis/config.sls @@ -25,6 +25,13 @@ redisworkdir: - group: 939 - makedirs: True +redisdatadir: + file.directory: + - name: /nsm/redis/data + - user: 939 + - group: 939 + - makedirs: True + redislogdir: file.directory: - name: /opt/so/log/redis diff --git a/salt/redis/enabled.sls b/salt/redis/enabled.sls index 27177d217..fc206e3cb 100644 --- a/salt/redis/enabled.sls +++ b/salt/redis/enabled.sls @@ -28,6 +28,7 @@ so-redis: - /opt/so/log/redis:/var/log/redis:rw - /opt/so/conf/redis/etc/redis.conf:/usr/local/etc/redis/redis.conf:ro - /opt/so/conf/redis/working:/redis:rw + - /nsm/redis/data:/data:rw - /etc/pki/redis.crt:/certs/redis.crt:ro - /etc/pki/redis.key:/certs/redis.key:ro {% if grains['role'] in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %} From bbef96ac25fafe91f29b432133a4ba0773b7a367 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 18 Sep 2023 12:12:57 -0400 Subject: [PATCH 096/417] use unique name --- salt/influxdb/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/influxdb/config.sls b/salt/influxdb/config.sls index 3520e46b3..66c681a0d 100644 --- a/salt/influxdb/config.sls +++ b/salt/influxdb/config.sls @@ -25,7 +25,7 @@ influxlogdir: - group: 939 - makedirs: True -influxlogdir: +influxetcdir: file.directory: - name: /opt/so/conf/influxdb/etc - dir_mode: 750 From 66bb1272aef598de001d9f134847ceecdd36a4fe Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 18 Sep 2023 13:39:56 -0400 Subject: [PATCH 097/417] avoid volume sprawl --- salt/strelka/config.sls | 14 ++++++++++++++ salt/strelka/coordinator/enabled.sls | 1 + salt/strelka/gatekeeper/enabled.sls | 1 + 3 files changed, 16 insertions(+) diff --git a/salt/strelka/config.sls b/salt/strelka/config.sls index bf3ac3dca..1d0f75adf 100644 --- a/salt/strelka/config.sls +++ b/salt/strelka/config.sls @@ -43,6 +43,20 @@ strelka_sbin: - group: 939 - file_mode: 755 +strelkagkredisdatadir: + file.directory: + - name: /nsm/strelka/gk-redis-data + - user: 939 + - group: 939 + - makedirs: True + +strelkacoordredisdatadir: + file.directory: + - name: /nsm/strelka/coord-redis-data + - user: 939 + - group: 939 + - makedirs: True + {% else %} {{sls}}_state_not_allowed: diff --git a/salt/strelka/coordinator/enabled.sls b/salt/strelka/coordinator/enabled.sls index 7a156bc9a..1222378f7 100644 --- a/salt/strelka/coordinator/enabled.sls +++ b/salt/strelka/coordinator/enabled.sls @@ -39,6 +39,7 @@ strelka_coordinator: {% endif %} {% if DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %} - binds: + - /nsm/strelka/coord-redis-data:/data:rw {% for BIND in DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %} - {{ BIND }} {% endfor %} diff --git a/salt/strelka/gatekeeper/enabled.sls b/salt/strelka/gatekeeper/enabled.sls index b309403f4..185910f83 100644 --- a/salt/strelka/gatekeeper/enabled.sls +++ b/salt/strelka/gatekeeper/enabled.sls @@ -33,6 +33,7 @@ strelka_gatekeeper: {% endfor %} {% if DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %} - binds: + - /nsm/strelka/gk-redis-data:/data:rw {% for BIND in DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %} - {{ BIND }} {% endfor %} From bb3632d1b262a8cdaaead837be15edd0f33019a9 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 18 Sep 2023 14:38:15 -0400 Subject: [PATCH 098/417] fix bind if statement --- salt/strelka/coordinator/enabled.sls | 4 ++-- salt/strelka/gatekeeper/enabled.sls | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/salt/strelka/coordinator/enabled.sls b/salt/strelka/coordinator/enabled.sls index 1222378f7..3440cd5a4 100644 --- a/salt/strelka/coordinator/enabled.sls +++ b/salt/strelka/coordinator/enabled.sls @@ -37,13 +37,13 @@ strelka_coordinator: - {{ XTRAENV }} {% endfor %} {% endif %} - {% if DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %} - binds: - /nsm/strelka/coord-redis-data:/data:rw + {% if DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %} {% for BIND in DOCKER.containers['so-strelka-coordinator'].custom_bind_mounts %} - {{ BIND }} {% endfor %} - {% endif %} + {% endif %} delete_so-strelka-coordinator_so-status.disabled: file.uncomment: - name: /opt/so/conf/so-status/so-status.conf diff --git a/salt/strelka/gatekeeper/enabled.sls b/salt/strelka/gatekeeper/enabled.sls index 185910f83..8d06ddf6a 100644 --- a/salt/strelka/gatekeeper/enabled.sls +++ b/salt/strelka/gatekeeper/enabled.sls @@ -31,13 +31,13 @@ strelka_gatekeeper: {% for BINDING in DOCKER.containers['so-strelka-gatekeeper'].port_bindings %} - {{ BINDING }} {% endfor %} - {% if DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %} - binds: - /nsm/strelka/gk-redis-data:/data:rw - {% for BIND in DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %} + {% if DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %} + {% for BIND in DOCKER.containers['so-strelka-gatekeeper'].custom_bind_mounts %} - {{ BIND }} - {% endfor %} - {% endif %} + {% endfor %} + {% endif %} {% if DOCKER.containers['so-strelka-gatekeeper'].extra_env %} - environment: {% for XTRAENV in DOCKER.containers['so-strelka-gatekeeper'].extra_env %} From a914a022732a3691ab52dff7bf10f37d4121cffc Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 18 Sep 2023 14:43:02 -0400 Subject: [PATCH 099/417] prune unused volumes during upgrade --- salt/manager/tools/sbin/soup | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 45e3df530..1251f9a57 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -434,7 +434,8 @@ post_to_2.4.10() { } post_to_2.4.20() { - echo "Nothing to apply" + echo "Pruning unused volumes" + docker volume prune -f POSTVERSION=2.4.20 } From 151e8bfc4e4c19e8daaffbd902cfd0169ced9721 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 18 Sep 2023 15:21:45 -0400 Subject: [PATCH 100/417] fix idstool extra_env for container --- salt/idstools/enabled.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/idstools/enabled.sls b/salt/idstools/enabled.sls index 31afc5113..decc5a5b2 100644 --- a/salt/idstools/enabled.sls +++ b/salt/idstools/enabled.sls @@ -26,8 +26,8 @@ so-idstools: - http_proxy={{ proxy }} - https_proxy={{ proxy }} - no_proxy={{ salt['pillar.get']('manager:no_proxy') }} - {% if DOCKER.containers['so-elastalert'].extra_env %} - {% for XTRAENV in DOCKER.containers['so-elastalert'].extra_env %} + {% if DOCKER.containers['so-idstools'].extra_env %} + {% for XTRAENV in DOCKER.containers['so-idstools'].extra_env %} - {{ XTRAENV }} {% endfor %} {% endif %} From 5bac1e4d15f65ba01e3337acf7be0921e6a6fa99 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 18 Sep 2023 21:31:15 +0000 Subject: [PATCH 101/417] Show correct dates and Kibana URL for already processed EVTX files --- salt/common/tools/sbin_jinja/so-import-evtx | 51 +++++++++++---------- 1 file changed, 26 insertions(+), 25 deletions(-) diff --git a/salt/common/tools/sbin_jinja/so-import-evtx b/salt/common/tools/sbin_jinja/so-import-evtx index 59a13612c..f48f935bc 100755 --- a/salt/common/tools/sbin_jinja/so-import-evtx +++ b/salt/common/tools/sbin_jinja/so-import-evtx @@ -80,8 +80,8 @@ function evtx2es() { -e "SHIFTTS=$SHIFTDATE" \ -v "$EVTX:/tmp/data.evtx" \ -v "/nsm/import/$HASH/evtx/:/tmp/evtx/" \ - -v "/nsm/import/evtx-end_newest:/tmp/newest" \ - -v "/nsm/import/evtx-start_oldest:/tmp/oldest" \ + -v "/nsm/import/$HASH/evtx-end_newest:/tmp/newest" \ + -v "/nsm/import/$HASH/evtx-start_oldest:/tmp/oldest" \ --entrypoint "/evtx_calc_timestamps.sh" \ {{ MANAGER }}:5000/{{ IMAGEREPO }}/so-pcaptools:{{ VERSION }} >> $LOG_FILE 2>&1 } @@ -111,12 +111,6 @@ INVALID_EVTXS_COUNT=0 VALID_EVTXS_COUNT=0 SKIPPED_EVTXS_COUNT=0 -touch /nsm/import/evtx-start_oldest -touch /nsm/import/evtx-end_newest - -echo $START_OLDEST > /nsm/import/evtx-start_oldest -echo $END_NEWEST > /nsm/import/evtx-end_newest - # paths must be quoted in case they include spaces for EVTX in $INPUT_FILES; do EVTX=$(/usr/bin/realpath "$EVTX") @@ -141,8 +135,15 @@ for EVTX in $INPUT_FILES; do status "- this EVTX has already been imported; skipping" SKIPPED_EVTXS_COUNT=$((SKIPPED_EVTXS_COUNT + 1)) else + # create EVTX directory EVTX_DIR=$HASH_DIR/evtx mkdir -p $EVTX_DIR + # create import timestamp files + for i in evtx-start_oldest evtx-end_newest; do + if ! [ -f "$i" ]; then + touch /nsm/import/$HASH/$i + fi + done # import evtx and write them to import ingest pipeline status "- importing logs to Elasticsearch..." @@ -154,28 +155,28 @@ for EVTX in $INPUT_FILES; do VALID_EVTXS_COUNT=$((VALID_EVTXS_COUNT + 1)) fi - # compare $START to $START_OLDEST - START=$(cat /nsm/import/evtx-start_oldest) - START_COMPARE=$(date -d $START +%s) - START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s) - if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then - START_OLDEST=$START - fi - - # compare $ENDNEXT to $END_NEWEST - END=$(cat /nsm/import/evtx-end_newest) - ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"` - ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s) - END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s) - if [ $ENDNEXT_COMPARE -gt $END_NEWEST_COMPARE ]; then - END_NEWEST=$ENDNEXT - fi - cp -f "${EVTX}" "${EVTX_DIR}"/data.evtx chmod 644 "${EVTX_DIR}"/data.evtx fi # end of valid evtx + # compare $START to $START_OLDEST + START=$(cat /nsm/import/$HASH/evtx-start_oldest) + START_COMPARE=$(date -d $START +%s) + START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s) + if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then + START_OLDEST=$START + fi + + # compare $ENDNEXT to $END_NEWEST + END=$(cat /nsm/import/$HASH/evtx-end_newest) + ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"` + ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s) + END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s) + if [ $ENDNEXT_COMPARE -gt $END_NEWEST_COMPARE ]; then + END_NEWEST=$ENDNEXT + fi + status done # end of for-loop processing evtx files From 47e611682a5849c08ad93e2170567c658fd64b7c Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 19 Sep 2023 09:24:12 -0400 Subject: [PATCH 102/417] ignore debian apt update output --- setup/so-verify | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-verify b/setup/so-verify index e907e8bdc..e9a8a375c 100755 --- a/setup/so-verify +++ b/setup/so-verify @@ -53,6 +53,7 @@ log_has_errors() { grep -vE "Failed to restart snapd" | \ grep -vE "Login Failed Details" | \ grep -vE "response from daemon: unauthorized" | \ + grep -vE "Reading first line of patchfile" | \ grep -vE "Running scope as unit" &> "$error_log" if [[ $? -eq 0 ]]; then From a1e963f834918a909245c74551f98bb37933b7a8 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 19 Sep 2023 13:28:20 +0000 Subject: [PATCH 103/417] Reverse timestamps where necessary --- salt/common/tools/sbin_jinja/so-import-evtx | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin_jinja/so-import-evtx b/salt/common/tools/sbin_jinja/so-import-evtx index f48f935bc..d12f34593 100755 --- a/salt/common/tools/sbin_jinja/so-import-evtx +++ b/salt/common/tools/sbin_jinja/so-import-evtx @@ -160,8 +160,18 @@ for EVTX in $INPUT_FILES; do fi # end of valid evtx - # compare $START to $START_OLDEST + # determine start and end and make sure they aren't reversed START=$(cat /nsm/import/$HASH/evtx-start_oldest) + END=$(cat /nsm/import/$HASH/evtx-end_newest) + START_EPOCH=`date -d "$START" +"%s"` + END_EPOCH=`date -d "$END" +"%s"` + if [ "$START_EPOCH" -gt "$END_EPOCH" ]; then + TEMP=$START + START=$END + END=$TEMP + fi + + # compare $START to $START_OLDEST START_COMPARE=$(date -d $START +%s) START_OLDEST_COMPARE=$(date -d $START_OLDEST +%s) if [ $START_COMPARE -lt $START_OLDEST_COMPARE ]; then @@ -169,7 +179,6 @@ for EVTX in $INPUT_FILES; do fi # compare $ENDNEXT to $END_NEWEST - END=$(cat /nsm/import/$HASH/evtx-end_newest) ENDNEXT=`date +%Y-%m-%d --date="$END 1 day"` ENDNEXT_COMPARE=$(date -d $ENDNEXT +%s) END_NEWEST_COMPARE=$(date -d $END_NEWEST +%s) From 508260bd468bbeafaa86f0b05b879df75a32ec70 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 19 Sep 2023 13:32:03 +0000 Subject: [PATCH 104/417] Use event.created for timestamp --- salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 index 688000fb7..52b6bae7a 100644 --- a/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 +++ b/salt/elasticsearch/files/ingest/.fleet_final_pipeline-1 @@ -80,6 +80,7 @@ { "set": { "if": "ctx.network?.type == 'ipv6'", "override": true, "field": "destination.ipv6", "value": "true" } }, { "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.dataset", "value": "import" } }, { "set": { "if": "ctx.tags.0 == 'import'", "override": true, "field": "data_stream.namespace", "value": "so" } }, + { "date": { "if": "ctx.event?.module == 'system'", "field": "event.created", "target_field": "@timestamp", "formats": ["yyyy-MM-dd'T'HH:mm:ss.SSSSSS'Z'"] } }, { "community_id":{ "if": "ctx.event?.dataset == 'endpoint.events.network'", "ignore_failure":true } }, { "remove": { "field": [ "message2", "type", "fields", "category", "module", "dataset", "event.dataset_temp", "dataset_tag_temp", "module_temp" ], "ignore_missing": true, "ignore_failure": true } } ], From 2e0ea3f37412b766773a7725ddd19e4d15df0590 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 19 Sep 2023 13:33:12 +0000 Subject: [PATCH 105/417] Set final pipeline --- salt/elasticsearch/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index cc2f5e1cd..91e5191f6 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -3689,6 +3689,7 @@ elasticsearch: refresh_interval: 30s number_of_shards: 1 number_of_replicas: 0 + final_pipeline: ".fleet_final_pipeline-1" composed_of: - agent-mappings - dtc-agent-mappings From 3fa3f83007e216f29579778595a66654d24518d4 Mon Sep 17 00:00:00 2001 From: Doug Burks Date: Wed, 20 Sep 2023 08:22:52 -0400 Subject: [PATCH 106/417] Update soc_sensoroni.yaml --- salt/sensoroni/soc_sensoroni.yaml | 54 +++++++++++++++---------------- 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/salt/sensoroni/soc_sensoroni.yaml b/salt/sensoroni/soc_sensoroni.yaml index eb63dbe25..db51da358 100644 --- a/salt/sensoroni/soc_sensoroni.yaml +++ b/salt/sensoroni/soc_sensoroni.yaml @@ -2,53 +2,53 @@ sensoroni: enabled: description: Enable or disable Sensoroni. advanced: True - helpLink: sensoroni.html + helpLink: grid.html config: analyze: enabled: description: Enable or disable the analyzer. advanced: True - helpLink: sensoroni.html + helpLink: cases.html timeout_ms: description: Timeout period for the analyzer. advanced: True - helpLink: sensoroni.html + helpLink: cases.html parallel_limit: description: Parallel limit for the analyzer. advanced: True - helpLink: sensoroni.html + helpLink: cases.html node_checkin_interval_ms: description: Interval in ms to checkin to the soc_host. advanced: True - helpLink: sensoroni.html + helpLink: grid.html node_description: description: Description of the specific node. - helpLink: sensoroni.html + helpLink: grid.html node: True forcedType: string sensoronikey: description: Shared key for sensoroni authentication. - helpLink: sensoroni.html + helpLink: grid.html global: True sensitive: True advanced: True soc_host: description: Host for sensoroni agents to connect to. - helpLink: sensoroni.html + helpLink: grid.html global: True advanced: True analyzers: emailrep: api_key: description: API key for the EmailRep analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: True advanced: True forcedType: string base_url: description: Base URL for the EmailRep analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True @@ -56,21 +56,21 @@ sensoroni: greynoise: api_key: description: API key for the GreyNoise analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: True advanced: True forcedType: string api_version: description: API version for the GreyNoise analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True forcedType: string base_url: description: Base URL for the GreyNoise analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True @@ -78,7 +78,7 @@ sensoroni: localfile: file_path: description: File path for the LocalFile analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True @@ -86,14 +86,14 @@ sensoroni: otx: api_key: description: API key for the OTX analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: True advanced: True forcedType: string base_url: description: Base URL for the OTX analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True @@ -101,14 +101,14 @@ sensoroni: pulsedive: api_key: description: API key for the Pulsedive analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: True advanced: True forcedType: string base_url: description: Base URL for the Pulsedive analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True @@ -116,14 +116,14 @@ sensoroni: spamhaus: lookup_host: description: Host to use for lookups. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True forcedType: string nameservers: description: Nameservers used for queries. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True @@ -131,35 +131,35 @@ sensoroni: urlscan: api_key: description: API key for the Urlscan analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: True advanced: True forcedType: string base_url: description: Base URL for the Urlscan analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True forcedType: string enabled: description: Analyzer enabled - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True forcedType: bool timeout: description: Timeout for the Urlscan analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True forcedType: int visibility: description: Type of visibility. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True @@ -167,14 +167,14 @@ sensoroni: virustotal: api_key: description: API key for the VirusTotal analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: True advanced: True forcedType: string base_url: description: Base URL for the VirusTotal analyzer. - helpLink: sensoroni.html + helpLink: cases.html global: False sensitive: False advanced: True From fa3a79a7875e03dc115cd8d93fb98c93764d532f Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 21 Sep 2023 09:41:44 -0400 Subject: [PATCH 107/417] Update soup to prune in background --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 1251f9a57..c3f9f29d4 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -434,8 +434,8 @@ post_to_2.4.10() { } post_to_2.4.20() { - echo "Pruning unused volumes" - docker volume prune -f + echo "Pruning unused docker volumes on all nodes - This process will run in the background." + salt --async \* cmd.run "docker volume prune -f" POSTVERSION=2.4.20 } From eeeae08ec885ae8be7d56b96f1d52ca3530688e5 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 21 Sep 2023 18:39:06 +0000 Subject: [PATCH 108/417] /app/ to /app/dashboards/ --- salt/nginx/etc/nginx.conf | 25 +++++++++++++------------ 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index b2616e946..795663384 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -230,18 +230,19 @@ http { proxy_cookie_path /api/ /influxdb/api/; } - location /app/ { - auth_request /auth/sessions/whoami; - rewrite /app/(.*) /app/$1 break; - proxy_pass http://{{ GLOBALS.manager }}:5601/app/; - proxy_read_timeout 300; - proxy_connect_timeout 300; - proxy_set_header Host $host; - proxy_set_header X-Real-IP $remote_addr; - proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; - proxy_set_header Proxy ""; - proxy_set_header X-Forwarded-Proto $scheme; - + location /app/dashboards/ { + auth_request /auth/sessions/whoami; + rewrite /app/dashboards/(.*) /app/dashboards/$1 break; + proxy_pass http://{{ GLOBALS.manager }}:5601/app/; + proxy_read_timeout 300; + proxy_connect_timeout 300; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header Proxy ""; + proxy_set_header X-Forwarded-Proto $scheme; + } + location /kibana/ { auth_request /auth/sessions/whoami; rewrite /kibana/(.*) /$1 break; From c95af6b9922d7b8cbf13d9b2b951243594ca7c2d Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 25 Sep 2023 14:39:33 -0400 Subject: [PATCH 109/417] Add a note about testing analyzers outside of the Sensoroni Docker container --- salt/sensoroni/files/analyzers/README.md | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/salt/sensoroni/files/analyzers/README.md b/salt/sensoroni/files/analyzers/README.md index 8b1f44f29..a75799558 100644 --- a/salt/sensoroni/files/analyzers/README.md +++ b/salt/sensoroni/files/analyzers/README.md @@ -141,7 +141,6 @@ Additionally, to support airgapped users, the dependency packages themselves, an pip download -r /requirements.txt -d /source-packages ``` - ### Analyzer Architecture The Sensoroni Docker container is responsible for executing analyzers. Only the manager's Sensoroni container will process analyzer jobs. Other nodes in the grid, such as sensors and search nodes, will not be assigned analyzer jobs. @@ -154,6 +153,12 @@ The analyzer itself will only run when a user in SOC enqueues an analyzer job, s python -m urlhaus '{"artifactType":"url","value":"https://bigbadbotnet.invalid",...}' ``` +To manually test an analyzer outside of the Sensoroni Docker container, use a command similar to the following: + +```bash +PYTHONPATH=. python urlhaus/urlhaus.py '{"artifactType":"url","value":"https://bigbadbotnet.invalid",...}' +``` + It is up to each analyzer to determine whether the provided input is compatible with that analyzer. This is assisted by the analyzer metadata, as described earlier in this document, with the use of the `supportedTypes` list. Once the analyzer completes its functionality, it must terminate promptly. See the following sections for more details on expected internal behavior of the analyzer. From 7cb9b5f2577b92cbcd8d908050eaffb40812807d Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 25 Sep 2023 14:41:20 -0400 Subject: [PATCH 110/417] Add the blank line that was removed from the previous commit --- salt/sensoroni/files/analyzers/README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/sensoroni/files/analyzers/README.md b/salt/sensoroni/files/analyzers/README.md index a75799558..19335a545 100644 --- a/salt/sensoroni/files/analyzers/README.md +++ b/salt/sensoroni/files/analyzers/README.md @@ -141,6 +141,7 @@ Additionally, to support airgapped users, the dependency packages themselves, an pip download -r /requirements.txt -d /source-packages ``` + ### Analyzer Architecture The Sensoroni Docker container is responsible for executing analyzers. Only the manager's Sensoroni container will process analyzer jobs. Other nodes in the grid, such as sensors and search nodes, will not be assigned analyzer jobs. From e25d1c0ff34a37a895c2725dc1e247141b5d6e59 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 26 Sep 2023 10:01:21 -0400 Subject: [PATCH 111/417] so-salt-minion-check is jinja template --- salt/common/tools/{sbin => sbin_jinja}/so-salt-minion-check | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename salt/common/tools/{sbin => sbin_jinja}/so-salt-minion-check (100%) diff --git a/salt/common/tools/sbin/so-salt-minion-check b/salt/common/tools/sbin_jinja/so-salt-minion-check similarity index 100% rename from salt/common/tools/sbin/so-salt-minion-check rename to salt/common/tools/sbin_jinja/so-salt-minion-check From 0bba68769bf6602dfeb8a1484adfa246c17d1fd0 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 26 Sep 2023 14:05:12 +0000 Subject: [PATCH 112/417] Make scan.pe.image_version type of 'float' --- .../templates/component/so/so-scan-mappings.json | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/templates/component/so/so-scan-mappings.json b/salt/elasticsearch/templates/component/so/so-scan-mappings.json index 8ddbe6077..008a6ab10 100644 --- a/salt/elasticsearch/templates/component/so/so-scan-mappings.json +++ b/salt/elasticsearch/templates/component/so/so-scan-mappings.json @@ -20,7 +20,10 @@ "type": "float" } } - } + }, + "image_version": { + "type": "float" + } } }, "elf": { From 2abf434ebefb502b5bf2abfcf8c3cec3b173cde4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 26 Sep 2023 10:56:20 -0400 Subject: [PATCH 113/417] create snapshots of default, local salt and pillars during soup. rsync soup with --delete --- salt/common/tools/sbin/so-common | 6 ++---- salt/manager/tools/sbin/soup | 12 ++++++++++++ 2 files changed, 14 insertions(+), 4 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 03b19d756..0dfb19bbe 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -154,13 +154,11 @@ check_salt_minion_status() { return $status } - - copy_new_files() { # Copy new files over to the salt dir cd $UPDATE_DIR - rsync -a salt $DEFAULT_SALT_DIR/ - rsync -a pillar $DEFAULT_SALT_DIR/ + rsync -a salt $DEFAULT_SALT_DIR/ --delete + rsync -a pillar $DEFAULT_SALT_DIR/ --delete chown -R socore:socore $DEFAULT_SALT_DIR/ chmod 755 $DEFAULT_SALT_DIR/pillar/firewall/addfirewall.sh cd /tmp diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 1251f9a57..8ec9f9bad 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -171,6 +171,13 @@ airgap_update_dockers() { fi } +backup_old_states_pillars() { + + tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_default_states_pillars.tar.gz /opt/so/saltstack/default/ + tar czf /nsm/backup/$(echo $INSTALLEDVERSION)_$(date +%Y%m%d-%H%M%S)_soup_local_states_pillars.tar.gz /opt/so/saltstack/local/ + +} + update_registry() { docker stop so-dockerregistry docker rm so-dockerregistry @@ -789,6 +796,7 @@ main() { if [ "$is_hotfix" == "true" ]; then echo "Applying $HOTFIXVERSION hotfix" + backup_old_states_pillars copy_new_files apply_hotfix echo "Hotfix applied" @@ -845,6 +853,10 @@ main() { update_centos_repo fi + echo "" + echo "Creating snapshots of default and local Salt states and pillars and saving to /nsm/backup/" + backup_old_states_pillars + echo "" echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR." copy_new_files From 48801da44e9df1c589a165fa42f7778bfed26b93 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 26 Sep 2023 18:12:20 -0400 Subject: [PATCH 114/417] log check tool initial --- salt/common/tools/sbin/so-log-check | 174 ++++++++++++++++++++++++++++ 1 file changed, 174 insertions(+) create mode 100755 salt/common/tools/sbin/so-log-check diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check new file mode 100755 index 000000000..6a3ca9876 --- /dev/null +++ b/salt/common/tools/sbin/so-log-check @@ -0,0 +1,174 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +. /usr/sbin/so-common + +RECENT_LOG_LINES=200 +EXCLUDE_STARTUP_ERRORS=N +EXCLUDE_FALSE_POSITIVE_ERRORS=N +EXCLUDE_KNOWN_ERRORS=N + +while [[ $# -gt 0 ]]; do + case $1 in + --exclude-connection-errors) + EXCLUDE_STARTUP_ERRORS=Y + ;; + --exclude-false-positives) + EXCLUDE_FALSE_POSITIVE_ERRORS=Y + ;; + --exclude-known-errors) + EXCLUDE_KNOWN_ERRORS=Y + ;; + --unknown) + EXCLUDE_STARTUP_ERRORS=Y + EXCLUDE_FALSE_POSITIVE_ERRORS=Y + EXCLUDE_KNOWN_ERRORS=Y + ;; + --recent-log-lines) + shift + RECENT_LOG_LINES=$1 + ;; + *) + echo "Usage: $0 [options]" + echo "" + echo "where options are:" + echo " --recent-log-lines N looks at the most recent N log lines per file or container; defaults to 200" + echo " --exclude-connection-errors exclude errors caused by a recent server or container restart" + echo " --exclude-false-positives exclude logs that are not actual errors but contain the error string" + echo " --exclude-known-errors exclude errors that are known and non-critical issues" + echo " --unknown exclude everthing mentioned above; only show unknown errors" + echo "" + echo "A non-zero return value indicates errors were found" + exit 1 + ;; + esac + shift +done + +echo "Security Onion Log Check - $(date)" +echo "-------------------------------------------" +echo "" +echo "- RECENT_LOG_LINES: $RECENT_LOG_LINES" +echo "- EXCLUDE_STARTUP_ERRORS: $EXCLUDE_STARTUP_ERRORS" +echo "- EXCLUDE_FALSE_POSITIVE_ERRORS: $EXCLUDE_FALSE_POSITIVE_ERRORS" +echo "- EXCLUDE_KNOWN_ERRORS: $EXCLUDE_KNOWN_ERRORS" +echo "" + +function status() { + header "$1" +} + +function exclude_container() { + name=$1 + + exclude_id=$(docker ps | grep "$name" | awk '{print $1}') + if [[ -n "$exclude_id" ]]; then + CONTAINER_IDS=$(echo $CONTAINER_IDS | sed -e "s/$exclude_id//g") + return $? + fi + return $? +} + +function exclude_log() { + name=$1 + + LOG_FILES=$(echo "$LOG_FILES" | sed -e "s/$name//g") +} + +function check_for_errors() { + if cat /tmp/log_check | grep -i error | grep -vEi "$EXCLUDED_ERRORS"; then + RESULT=1 + fi +} + +EXCLUDED_ERRORS="__LOG_CHECK_PLACEHOLDER_EXCLUSION__" + +if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|database is locked" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|econnreset" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unreachable" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no route to host" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not running" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unavailable" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|request.py" # server not yet ready (python stack output) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|httperror" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|servfail" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|connection refused" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing shards" # server not yet ready +fi + +if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|elastalert_status_error" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|elastalert_error.json" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error: '0'" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|errors_index" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|noerror" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|emerging-all.rules" # false positive (error in rulename) +fi + +if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|eof" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|bookkeeper" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|noindices" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to start transient scope" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # request successful, contained error string in content + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so-user.lock exists" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|systemd-run" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|retcode: 1" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|telemetry-task" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|redisqueue" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fleet_detail_query" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|num errors=0" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|provisioning/alerting" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|provisioning/notifiers" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|provisoning/plugins" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|active-responses.log" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|scanentropy" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|integration policy" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|blob unknown" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|token required" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|zeekcaptureloss" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unable to create detection" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error installing new prebuilt rules" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parent.error" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # example test data + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so_long_term" # setup in progress, influxdb not yet setup + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded" +fi + +RESULT=0 + +# Check Security Onion container stdout/stderr logs +CONTAINER_IDS=$(docker ps -q) +exclude_container so-kibana +exclude_container so-idstools + +for container_id in $CONTAINER_IDS; do + status "Checking container $container_id" + docker logs -n $RECENT_LOG_LINES $container_id > /tmp/log_check 2>&1 + check_for_errors +done + +# Check Security Onion related log files +LOG_FILES=$(find /opt/so/log/ /nsm -name \*.log) +exclude_log "\s?.*kibana.log" +LOG_FILES="$LOG_FILES /var/log/cron" + +for log_file in $LOG_FILES; do + status "Checking log file $log_file" + tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check + check_for_errors +done + +exit $RESULT \ No newline at end of file From 2c8d413f168fe2dedcf9e7eb91dfc806377ee3b5 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 26 Sep 2023 18:14:37 -0400 Subject: [PATCH 115/417] log check tool initial --- salt/common/tools/sbin/so-log-check | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 6a3ca9876..752a6d51e 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -40,7 +40,7 @@ while [[ $# -gt 0 ]]; do echo " --exclude-connection-errors exclude errors caused by a recent server or container restart" echo " --exclude-false-positives exclude logs that are not actual errors but contain the error string" echo " --exclude-known-errors exclude errors that are known and non-critical issues" - echo " --unknown exclude everthing mentioned above; only show unknown errors" + echo " --unknown exclude everything mentioned above; only show unknown errors" echo "" echo "A non-zero return value indicates errors were found" exit 1 From 9c854a13ccf44b56163ed90d9ae8e26d163a0ff2 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 26 Sep 2023 21:41:44 -0400 Subject: [PATCH 116/417] skip zeek spool logs due to test data false positives --- salt/common/tools/sbin/so-log-check | 32 ++++++++++++++++++++++------- 1 file changed, 25 insertions(+), 7 deletions(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 752a6d51e..6169e9720 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -38,7 +38,7 @@ while [[ $# -gt 0 ]]; do echo "where options are:" echo " --recent-log-lines N looks at the most recent N log lines per file or container; defaults to 200" echo " --exclude-connection-errors exclude errors caused by a recent server or container restart" - echo " --exclude-false-positives exclude logs that are not actual errors but contain the error string" + echo " --exclude-false-positives exclude logs that are known false positives" echo " --exclude-known-errors exclude errors that are known and non-critical issues" echo " --unknown exclude everything mentioned above; only show unknown errors" echo "" @@ -76,7 +76,8 @@ function exclude_container() { function exclude_log() { name=$1 - LOG_FILES=$(echo "$LOG_FILES" | sed -e "s/$name//g") + cat /tmp/log_check_files | grep -v $name > /tmp/log_check_files.new + mv /tmp/log_check_files.new /tmp/log_check_files } function check_for_errors() { @@ -97,8 +98,10 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|request.py" # server not yet ready (python stack output) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|httperror" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|servfail" # server not yet ready - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|connection refused" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|connect" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing shards" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to send metrics" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|influxsize kbytes" # server not yet ready (telegraf) fi if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then @@ -107,11 +110,15 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error: '0'" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|errors_index" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|noerror" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|deprecated" # false positive (playbook) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|could cause errors" # false positive (playbook) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|id.orig_h" # false positive (zeek test data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|emerging-all.rules" # false positive (error in rulename) fi if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|eof" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" @@ -161,14 +168,25 @@ for container_id in $CONTAINER_IDS; do done # Check Security Onion related log files -LOG_FILES=$(find /opt/so/log/ /nsm -name \*.log) -exclude_log "\s?.*kibana.log" -LOG_FILES="$LOG_FILES /var/log/cron" +find /opt/so/log/ /nsm -name \*.log > /tmp/log_check_files +echo "/var/log/cron" >> /tmp/log_check_files +exclude_log "kibana.log" +exclude_log "spool" -for log_file in $LOG_FILES; do +for log_file in $(cat /tmp/log_check_files); do status "Checking log file $log_file" tail -n $RECENT_LOG_LINES $log_file > /tmp/log_check check_for_errors done +# Cleanup temp files +rm -f /tmp/log_check_files +rm -f /tmp/log_check + +if [[ $RESULT -eq 0 ]]; then + echo -e "\nResult: No errors found" +else + echo -e "\nResult: One or more errors found" +fi + exit $RESULT \ No newline at end of file From b47d915cb6318bfa8af3a29763fe38015764ec2f Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 Sep 2023 09:30:19 -0400 Subject: [PATCH 117/417] don't inspect imported zeek output --- salt/common/tools/sbin/so-log-check | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 6169e9720..621f0027a 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -101,7 +101,12 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|connect" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing shards" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to send metrics" # server not yet ready - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|influxsize kbytes" # server not yet ready (telegraf) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|influxsize kbytes" # server not yet ready (telegraf waiting on influx) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|expected field at" # server not yet ready (telegraf waiting on health data) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cached the public key" # server not yet ready (salt minion waiting on key acceptance) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no ingest nodes" # server not yet ready (logstash waiting on elastic) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to poll" # server not yet ready (sensoroni waiting on soc) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|minions returned with non" # server not yet ready (salt waiting on minions) fi if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then @@ -110,14 +115,18 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error: '0'" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|errors_index" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|noerror" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error-template" # false positive (elastic templates) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|deprecated" # false positive (playbook) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|could cause errors" # false positive (playbook) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|id.orig_h" # false positive (zeek test data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|emerging-all.rules" # false positive (error in rulename) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query) fi if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|eof" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" @@ -146,7 +155,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unable to create detection" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error installing new prebuilt rules" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parent.error" - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # example test data EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so_long_term" # setup in progress, influxdb not yet setup @@ -172,6 +181,7 @@ find /opt/so/log/ /nsm -name \*.log > /tmp/log_check_files echo "/var/log/cron" >> /tmp/log_check_files exclude_log "kibana.log" exclude_log "spool" +exclude_log "import" for log_file in $(cat /tmp/log_check_files); do status "Checking log file $log_file" From 05e7c32cf9f6d9246e458d33d0b13aa43b337d06 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 27 Sep 2023 10:08:08 -0400 Subject: [PATCH 118/417] remove duplicate filecheck_run cron --- salt/strelka/filestream/config.sls | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/salt/strelka/filestream/config.sls b/salt/strelka/filestream/config.sls index 833a08505..0f9f38914 100644 --- a/salt/strelka/filestream/config.sls +++ b/salt/strelka/filestream/config.sls @@ -108,6 +108,11 @@ filecheck_stdout.log: {% if GLOBALS.md_engine == 'ZEEK' %} +remove_filecheck_run: + cron.absent: + - identifier: filecheck_run + - user: socore + filecheck_run_socore: cron.present: - name: 'ps -ef | grep filecheck | grep -v grep > /dev/null 2>&1 || python3 /opt/so/conf/strelka/filecheck >> /opt/so/log/strelka/filecheck_stdout.log 2>&1 &' @@ -121,6 +126,11 @@ remove_filecheck_run_suricata: {% elif GLOBALS.md_engine == 'SURICATA'%} +remove_filecheck_run: + cron.absent: + - identifier: filecheck_run + - user: suricata + filecheck_run_suricata: cron.present: - name: 'ps -ef | grep filecheck | grep -v grep > /dev/null 2>&1 || python3 /opt/so/conf/strelka/filecheck >> /opt/so/log/strelka/filecheck_stdout.log 2>&1 &' From c4fea9cb9da5a5044ad1c7be9aeca417b4e6ab96 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 27 Sep 2023 11:03:58 -0400 Subject: [PATCH 119/417] Update nginx.conf --- salt/nginx/etc/nginx.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/nginx/etc/nginx.conf b/salt/nginx/etc/nginx.conf index 795663384..3ef0c5c1f 100644 --- a/salt/nginx/etc/nginx.conf +++ b/salt/nginx/etc/nginx.conf @@ -8,6 +8,7 @@ worker_processes auto; error_log /var/log/nginx/error.log; pid /run/nginx.pid; +user nobody; include /usr/share/nginx/modules/*.conf; From 87cc389088eb0f1886c73610f70e3701eb625d20 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 Sep 2023 15:36:13 -0400 Subject: [PATCH 120/417] deb OS doesn't use /var/log/cron, skip --- salt/common/tools/sbin/so-log-check | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 621f0027a..d377d0236 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -178,7 +178,9 @@ done # Check Security Onion related log files find /opt/so/log/ /nsm -name \*.log > /tmp/log_check_files -echo "/var/log/cron" >> /tmp/log_check_files +if [[ -f /var/log/cron ]]; then + echo "/var/log/cron" >> /tmp/log_check_files +fi exclude_log "kibana.log" exclude_log "spool" exclude_log "import" From f094b1162d7c23cd5bb2d98d7e34fb2f7b6afecf Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 27 Sep 2023 15:48:05 -0400 Subject: [PATCH 121/417] Update defaults.yaml --- salt/zeek/defaults.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/zeek/defaults.yaml b/salt/zeek/defaults.yaml index 8e6814b2e..783c38820 100644 --- a/salt/zeek/defaults.yaml +++ b/salt/zeek/defaults.yaml @@ -8,9 +8,9 @@ zeek: buffer: 128*1024*1024 zeekctl: MailTo: root@localhost - MailConnectionSummary: 1 + MailConnectionSummary: 0 MinDiskSpace: 5 - MailHostUpDown: 1 + MailHostUpDown: 0 LogRotationInterval: 3600 LogExpireInterval: 0 StatsLogEnable: 1 From 4666916077db773dfb10d94e0decb85620d0c453 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 Sep 2023 15:48:52 -0400 Subject: [PATCH 122/417] ignore generic python stack trace log lines of code, rely on actual error messages --- salt/common/tools/sbin/so-log-check | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index d377d0236..9deeba1cd 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -125,6 +125,8 @@ fi if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|eof" + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|raise error" # redis/python generic stack line, rely on other lines for actual error + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail(error)" # redis/python generic stack line, rely on other lines for actual error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook From 2427344dca2fc963b9b9d608f7f665141610d9aa Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 27 Sep 2023 15:58:58 -0400 Subject: [PATCH 123/417] Update defaults.yaml --- salt/zeek/defaults.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/zeek/defaults.yaml b/salt/zeek/defaults.yaml index 783c38820..4435670a2 100644 --- a/salt/zeek/defaults.yaml +++ b/salt/zeek/defaults.yaml @@ -28,7 +28,6 @@ zeek: - misc/loaded-scripts - tuning/defaults - misc/capture-loss - - misc/stats - frameworks/software/vulnerable - frameworks/software/version-changes - protocols/ftp/software From 2fb73cd51621a8dea6a6f8d31597273e05ff12fb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 27 Sep 2023 16:07:38 -0400 Subject: [PATCH 124/417] Update defaults.yaml --- salt/telegraf/defaults.yaml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/salt/telegraf/defaults.yaml b/salt/telegraf/defaults.yaml index a87fa952b..ab8679e57 100644 --- a/salt/telegraf/defaults.yaml +++ b/salt/telegraf/defaults.yaml @@ -11,7 +11,6 @@ telegraf: quiet: 'false' scripts: eval: - - beatseps.sh - checkfiles.sh - influxdbsize.sh - oldpcap.sh @@ -23,7 +22,6 @@ telegraf: - zeekcaptureloss.sh - zeekloss.sh standalone: - - beatseps.sh - checkfiles.sh - eps.sh - influxdbsize.sh @@ -36,13 +34,11 @@ telegraf: - zeekcaptureloss.sh - zeekloss.sh manager: - - beatseps.sh - influxdbsize.sh - raid.sh - redis.sh - sostatus.sh managersearch: - - beatseps.sh - eps.sh - influxdbsize.sh - raid.sh @@ -51,7 +47,6 @@ telegraf: import: - sostatus.sh sensor: - - beatseps.sh - checkfiles.sh - oldpcap.sh - raid.sh @@ -61,7 +56,6 @@ telegraf: - zeekcaptureloss.sh - zeekloss.sh heavynode: - - beatseps.sh - checkfiles.sh - eps.sh - oldpcap.sh @@ -75,12 +69,10 @@ telegraf: idh: - sostatus.sh searchnode: - - beatseps.sh - eps.sh - raid.sh - sostatus.sh receiver: - - beatseps.sh - eps.sh - raid.sh - redis.sh From 039d5ae9aa4a65e8e8bce9d5e45bfd51682c0e84 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 27 Sep 2023 16:09:27 -0400 Subject: [PATCH 125/417] Delete salt/telegraf/scripts/beatseps.sh --- salt/telegraf/scripts/beatseps.sh | 38 ------------------------------- 1 file changed, 38 deletions(-) delete mode 100644 salt/telegraf/scripts/beatseps.sh diff --git a/salt/telegraf/scripts/beatseps.sh b/salt/telegraf/scripts/beatseps.sh deleted file mode 100644 index 5f3db53f8..000000000 --- a/salt/telegraf/scripts/beatseps.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/bash -# -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -# if this script isn't already running -if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then - - PREVCOUNTFILE='/tmp/beatseps.txt' - EVENTCOUNTCURRENT="$(curl -s localhost:5066/stats | jq '.libbeat.output.events.acked')" - FAILEDEVENTCOUNT="$(curl -s localhost:5066/stats | jq '.libbeat.output.events.failed')" - - if [ ! -z "$EVENTCOUNTCURRENT" ]; then - - if [ -f "$PREVCOUNTFILE" ]; then - EVENTCOUNTPREVIOUS=`cat $PREVCOUNTFILE` - else - echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE - exit 0 - fi - - echo "${EVENTCOUNTCURRENT}" > $PREVCOUNTFILE - # the division by 30 is because the agent interval is 30 seconds - EVENTS=$(((EVENTCOUNTCURRENT - EVENTCOUNTPREVIOUS)/30)) - if [ "$EVENTS" -lt 0 ]; then - EVENTS=0 - fi - - echo "fbstats eps=${EVENTS%%.*},failed=$FAILEDEVENTCOUNT" - fi - -fi - -exit 0 From 24def3a196efc12506f11de8c32e5d52cc6bd24b Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 Sep 2023 16:50:01 -0400 Subject: [PATCH 126/417] ignore generic python stack trace log lines of code, rely on actual error messages --- salt/common/tools/sbin/so-log-check | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 9deeba1cd..f89995065 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -107,6 +107,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no ingest nodes" # server not yet ready (logstash waiting on elastic) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to poll" # server not yet ready (sensoroni waiting on soc) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|minions returned with non" # server not yet ready (salt waiting on minions) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so_long_term" # server not yet ready (influxdb not yet setup) fi if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then @@ -121,23 +122,25 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|id.orig_h" # false positive (zeek test data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|emerging-all.rules" # false positive (error in rulename) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content) fi if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|eof" - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|raise error" # redis/python generic stack line, rely on other lines for actual error - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail(error)" # redis/python generic stack line, rely on other lines for actual error + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|raise error" # redis/python generic stack line, rely on other lines for actual error + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|bookkeeper" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|noindices" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to start transient scope" - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # request successful, contained error string in content EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so-user.lock exists" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|systemd-run" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|retcode: 1" @@ -159,8 +162,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|parent.error" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|req.LocalMeta.host.ip" # known issue in GH EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sendmail" # zeek - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # example test data - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so_long_term" # setup in progress, influxdb not yet setup EXCLUDED_ERRORS="$EXCLUDED_ERRORS|stats.log" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context deadline exceeded" fi From 76c0b881ff3e6a4011e71c2f6ca9941f290349bb Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 27 Sep 2023 18:20:50 -0400 Subject: [PATCH 127/417] exclude import from snapshotting previous version pillars and states --- salt/manager/tools/sbin/soup | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 8c6c2b237..8259bf6ab 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -518,7 +518,7 @@ up_to_2.4.10() { } up_to_2.4.20() { - echo "Nothing to do for 2.4.20" + echo "Preupgrade soup changes for 2.4.20" INSTALLEDVERSION=2.4.20 } @@ -796,7 +796,10 @@ main() { if [ "$is_hotfix" == "true" ]; then echo "Applying $HOTFIXVERSION hotfix" - backup_old_states_pillars + # since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars + if [[ ! "$MINIONID" =~ "_import" ]]; then + backup_old_states_pillars + fi copy_new_files apply_hotfix echo "Hotfix applied" @@ -853,9 +856,12 @@ main() { update_centos_repo fi - echo "" - echo "Creating snapshots of default and local Salt states and pillars and saving to /nsm/backup/" - backup_old_states_pillars + # since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars + if [[ ! "$MINIONID" =~ "_import" ]]; then + echo "" + echo "Creating snapshots of default and local Salt states and pillars and saving to /nsm/backup/" + backup_old_states_pillars + fi echo "" echo "Copying new Security Onion code from $UPDATE_DIR to $DEFAULT_SALT_DIR." From d72e4ae97d7514cf7fe8b3ade06279079f2504aa Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 Sep 2023 18:39:23 -0400 Subject: [PATCH 128/417] ignore soctopus errors --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index f89995065..f9393ce8a 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -135,6 +135,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|forbidden" # playbook EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" From 419acab48aea966593207cb396372b6daecb5833 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 27 Sep 2023 19:17:13 -0400 Subject: [PATCH 129/417] revert up_to_2.4.20 --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 8259bf6ab..960c50f31 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -518,7 +518,7 @@ up_to_2.4.10() { } up_to_2.4.20() { - echo "Preupgrade soup changes for 2.4.20" + echo "Nothing to do for 2.4.20" INSTALLEDVERSION=2.4.20 } From 49115cde55027eba0fe73cf95acd46f1714ec5a3 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 Sep 2023 19:55:46 -0400 Subject: [PATCH 130/417] logcheck improvements --- salt/common/tools/sbin/so-log-check | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index f9393ce8a..e75c9cd60 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -136,6 +136,9 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip database update" # airgap can't update GeoIP DB + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" From 9ee64f93ca25384a3e18fb9308981aa45f7dcdfc Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 Sep 2023 20:17:59 -0400 Subject: [PATCH 131/417] logcheck improvements --- salt/common/tools/sbin/so-log-check | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index e75c9cd60..b4b40a90b 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -119,6 +119,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error-template" # false positive (elastic templates) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|deprecated" # false positive (playbook) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|could cause errors" # false positive (playbook) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_error.yml" # false positive (playbook) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|id.orig_h" # false positive (zeek test data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|emerging-all.rules" # false positive (error in rulename) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query) @@ -139,6 +140,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip database update" # airgap can't update GeoIP DB EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" From 621da9e7e319df596b343847480be8ca4fce3d36 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 27 Sep 2023 22:20:54 -0400 Subject: [PATCH 132/417] more exclusions --- salt/common/tools/sbin/so-log-check | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index b4b40a90b..c6a966385 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -101,6 +101,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|connect" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing shards" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to send metrics" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|broken pipe" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|influxsize kbytes" # server not yet ready (telegraf waiting on influx) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|expected field at" # server not yet ready (telegraf waiting on health data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cached the public key" # server not yet ready (salt minion waiting on key acceptance) @@ -118,6 +119,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|noerror" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error-template" # false positive (elastic templates) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|deprecated" # false positive (playbook) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|windows" # false positive (playbook) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|could cause errors" # false positive (playbook) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_error.yml" # false positive (playbook) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|id.orig_h" # false positive (zeek test data) @@ -129,7 +131,7 @@ fi if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|eof" - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|raise error" # redis/python generic stack line, rely on other lines for actual error + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|raise" # redis/python generic stack line, rely on other lines for actual error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fail\\(error\\)" # redis/python generic stack line, rely on other lines for actual error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|urlerror" # idstools connection timeout EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeouterror" # idstools connection timeout From 89a9c30cc89371979ed8ea50b12a2e00ad978158 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 28 Sep 2023 08:27:31 -0400 Subject: [PATCH 133/417] exclude known issues --- salt/common/tools/sbin/so-log-check | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index c6a966385..865846fac 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -102,6 +102,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing shards" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to send metrics" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|broken pipe" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout exceeded" # server not yet ready (telegraf waiting on elasticsearch) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|influxsize kbytes" # server not yet ready (telegraf waiting on influx) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|expected field at" # server not yet ready (telegraf waiting on health data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|cached the public key" # server not yet ready (salt minion waiting on key acceptance) @@ -117,6 +118,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error: '0'" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|errors_index" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|noerror" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fs_errors" # false positive (suricata stats) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error-template" # false positive (elastic templates) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|deprecated" # false positive (playbook) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|windows" # false positive (playbook) @@ -143,6 +145,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" @@ -192,9 +195,10 @@ find /opt/so/log/ /nsm -name \*.log > /tmp/log_check_files if [[ -f /var/log/cron ]]; then echo "/var/log/cron" >> /tmp/log_check_files fi -exclude_log "kibana.log" -exclude_log "spool" -exclude_log "import" +exclude_log "kibana.log" # kibana error logs are too verbose with large varieties of errors most of which are temporary +exclude_log "spool" # disregard zeek analyze logs +exclude_log "import" # disregard imported test data the contains error strings +exclude_log "update.log" # ignore playbook updates due to known issues for log_file in $(cat /tmp/log_check_files); do status "Checking log file $log_file" From 202eb7e8765ff239734bcecd0c7327bb40dec33a Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 28 Sep 2023 09:16:56 -0400 Subject: [PATCH 134/417] Exclude known_certs --- salt/elasticfleet/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 979e795f7..6737df17d 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -15,6 +15,7 @@ elasticfleet: - cluster - console - ecat_arp_info + - known_certs - known_hosts - known_services - loaded_scripts From ee45fc31a2894137a82a2e90a6e3fb2aff39c2ba Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 28 Sep 2023 11:04:16 -0400 Subject: [PATCH 135/417] Delete salt/strelka/tools/sbin_jinja/so-yara-download --- .../strelka/tools/sbin_jinja/so-yara-download | 21 ------------------- 1 file changed, 21 deletions(-) delete mode 100644 salt/strelka/tools/sbin_jinja/so-yara-download diff --git a/salt/strelka/tools/sbin_jinja/so-yara-download b/salt/strelka/tools/sbin_jinja/so-yara-download deleted file mode 100644 index a8087173c..000000000 --- a/salt/strelka/tools/sbin_jinja/so-yara-download +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/bash -NOROOT=1 -. /usr/sbin/so-common - -{%- set proxy = salt['pillar.get']('manager:proxy') %} - -# Download the rules from the internet -{%- if proxy %} -export http_proxy={{ proxy }} -export https_proxy={{ proxy }} -export no_proxy=salt['pillar.get']('manager:no_proxy') -{%- endif %} - -mkdir -p /tmp/yara -cd /tmp/yara -git clone https://github.com/Security-Onion-Solutions/securityonion-yara.git -mkdir -p /nsm/rules/yara -rsync -shav --progress /tmp/yara/securityonion-yara/yara /nsm/rules/ -cd /tmp -rm -rf /tmp/yara - From a77a53f20b3bbdd6d6b7965e6bb4e65a146ae154 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 28 Sep 2023 11:10:17 -0400 Subject: [PATCH 136/417] Update init.sls --- salt/manager/init.sls | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/salt/manager/init.sls b/salt/manager/init.sls index b9d2d3ba9..146bca126 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -26,6 +26,15 @@ repo_log_dir: - user - group +yara_log_dir: + file.directory: + - name: /opt/so/log/yarasync + - user: socore + - group: socore + - recurse: + - user + - group + repo_conf_dir: file.directory: - name: /opt/so/conf/reposync From 7a21b7903dfbdf57518ba2a667b5aa14f4c8f640 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 28 Sep 2023 11:46:43 -0400 Subject: [PATCH 137/417] Fix manager cron logic --- salt/manager/init.sls | 92 +++++++++++++++++++------------------------ 1 file changed, 40 insertions(+), 52 deletions(-) diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 146bca126..55badaf10 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -61,21 +61,23 @@ manager_sbin: - group: 939 - file_mode: 755 -#manager_sbin_jinja: -# file.recurse: -# - name: /usr/sbin -# - source: salt://manager/tools/sbin_jinja -# - user: 939 -# - group: 939 -# - file_mode: 755 -# - template: jinja +yara_update_scripts: + file.recurse: + - name: /usr/sbin/ + - source: salt://manager/tools/sbin_jinja/ + - user: socore + - group: socore + - file_mode: 755 + - template: jinja + - defaults: + EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }} so-repo-sync: - {% if MANAGERMERGED.reposync.enabled %} + {% if MANAGERMERGED.reposync.enabled or ! GLOBALS.airgap %} cron.present: - {% else %} + {% else %} cron.absent: - {% endif %} + {% endif %} - user: socore - name: '/usr/sbin/so-repo-sync >> /opt/so/log/reposync/reposync.log 2>&1' - identifier: so-repo-sync @@ -91,7 +93,15 @@ socore_own_saltstack: - user - group -{% if STRELKAMERGED.rules.enabled %} +rules_dir: + file.directory: + - name: /nsm/rules/yara + - user: socore + - group: socore + - makedirs: True + +{% if STRELKAMERGED.rules.enabled %} + strelkarepos: file.managed: - name: /opt/so/conf/strelka/repos.txt @@ -100,67 +110,45 @@ strelkarepos: - defaults: STRELKAREPOS: {{ STRELKAMERGED.rules.repos }} - makedirs: True -{% endif %} - -yara_update_scripts: - file.recurse: - - name: /usr/sbin/ - - source: salt://manager/tools/sbin_jinja/ - - user: socore - - group: socore - - file_mode: 755 - - template: jinja - - defaults: - EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }} - -rules_dir: - file.directory: - - name: /nsm/rules/yara - - user: socore - - group: socore - - makedirs: True - -{% if GLOBALS.airgap %} -remove_strelka-yara-download: - cron.absent: - - user: socore - - identifier: strelka-yara-download strelka-yara-update: + {% if MANAGERMERGED.reposync.enabled or ! GLOBALS.airgap %} cron.present: + {% else %} + cron.absent: + {% endif %} - user: socore - - name: '/usr/sbin/so-yara-update >> /nsm/strelka/log/yara-update.log 2>&1' + - name: '/usr/sbin/so-yara-update >> /opt/so/log/yarasync/yara-update.log 2>&1' - identifier: strelka-yara-update - hour: '7' - minute: '1' -update_yara_rules: - cmd.run: - - name: /usr/sbin/so-yara-update - - onchanges: - - file: yara_update_scripts -{% else %} -remove_strelka-yara-update: - cron.absent: - - user: socore - - identifier: strelka-yara-update - strelka-yara-download: + {% if MANAGERMERGED.reposync.enabled or ! GLOBALS.airgap %} cron.present: + {% else %} + cron.absent: + {% endif %} - user: socore - name: '/usr/sbin/so-yara-download >> /nsm/strelka/log/yara-download.log 2>&1' - identifier: strelka-yara-download - hour: '7' - minute: '1' +{% if ! GLOBALS.airgap %} +update_yara_rules: + cmd.run: + - name: /usr/sbin/so-yara-update + - onchanges: + - file: yara_update_scripts + download_yara_rules: cmd.run: - name: /usr/sbin/so-yara-download - onchanges: - file: yara_update_scripts -{% endif %} - - +{% endif %} +{% endif %} {% else %} {{sls}}_state_not_allowed: From 5040df7551474d521fef76a2872913f046b8fdc5 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 28 Sep 2023 12:32:40 -0400 Subject: [PATCH 138/417] Fix manager cron logic --- salt/manager/init.sls | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 55badaf10..68d51c2af 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -73,7 +73,7 @@ yara_update_scripts: EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }} so-repo-sync: - {% if MANAGERMERGED.reposync.enabled or ! GLOBALS.airgap %} + {% if MANAGERMERGED.reposync.enabled or not GLOBALS.airgap %} cron.present: {% else %} cron.absent: @@ -112,7 +112,7 @@ strelkarepos: - makedirs: True strelka-yara-update: - {% if MANAGERMERGED.reposync.enabled or ! GLOBALS.airgap %} + {% if MANAGERMERGED.reposync.enabled or not GLOBALS.airgap %} cron.present: {% else %} cron.absent: @@ -124,18 +124,18 @@ strelka-yara-update: - minute: '1' strelka-yara-download: - {% if MANAGERMERGED.reposync.enabled or ! GLOBALS.airgap %} + {% if MANAGERMERGED.reposync.enabled or not GLOBALS.airgap %} cron.present: {% else %} cron.absent: {% endif %} - user: socore - - name: '/usr/sbin/so-yara-download >> /nsm/strelka/log/yara-download.log 2>&1' + - name: '/usr/sbin/so-yara-download >> /opt/so/log/yarasync/yara-download.log 2>&1' - identifier: strelka-yara-download - hour: '7' - minute: '1' -{% if ! GLOBALS.airgap %} +{% if not GLOBALS.airgap %} update_yara_rules: cmd.run: - name: /usr/sbin/so-yara-update From 018186ccbd1d63ec1e0785e13a9579f27751264c Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 28 Sep 2023 16:43:56 +0000 Subject: [PATCH 139/417] Upgrade packages and load integrations when packages change --- salt/elasticfleet/config.sls | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/salt/elasticfleet/config.sls b/salt/elasticfleet/config.sls index 902b5eb4c..82b975697 100644 --- a/salt/elasticfleet/config.sls +++ b/salt/elasticfleet/config.sls @@ -59,6 +59,14 @@ eastatedir: - group: 939 - makedirs: True +eapackageupgrade: + file.managed: + - name: /usr/sbin/so-elastic-fleet-package-upgrade + - source: salt://elasticfleet/tools/sbin_jinja/so-elastic-fleet-package-upgrade + - user: 947 + - group: 939 + - template: jinja + {% if GLOBALS.role != "so-fleet" %} eaintegrationsdir: file.directory: @@ -88,6 +96,7 @@ ea-integrations-load: - onchanges: - file: eaintegration - file: eadynamicintegration + - file: eapackageupgrade {% endif %} {% else %} From 95d32cb07689a8792e6b2be213c38314797c8eec Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 28 Sep 2023 12:49:46 -0400 Subject: [PATCH 140/417] Fix manager cron logic --- salt/manager/init.sls | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/init.sls b/salt/manager/init.sls index 68d51c2af..e808325ef 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -73,7 +73,7 @@ yara_update_scripts: EXCLUDEDRULES: {{ STRELKAMERGED.rules.excluded }} so-repo-sync: - {% if MANAGERMERGED.reposync.enabled or not GLOBALS.airgap %} + {% if MANAGERMERGED.reposync.enabled %} cron.present: {% else %} cron.absent: @@ -112,7 +112,7 @@ strelkarepos: - makedirs: True strelka-yara-update: - {% if MANAGERMERGED.reposync.enabled or not GLOBALS.airgap %} + {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} cron.present: {% else %} cron.absent: @@ -124,7 +124,7 @@ strelka-yara-update: - minute: '1' strelka-yara-download: - {% if MANAGERMERGED.reposync.enabled or not GLOBALS.airgap %} + {% if MANAGERMERGED.reposync.enabled and not GLOBALS.airgap %} cron.present: {% else %} cron.absent: From ff359460508df28a1a5f022b63507cd076f94047 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 28 Sep 2023 13:06:21 -0400 Subject: [PATCH 141/417] Fix manager cron logic --- setup/so-functions | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 3707e3141..679142e2a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1963,12 +1963,10 @@ securityonion_repo() { fi if [[ $is_rpm ]]; then logCmd "dnf repolist all"; fi if [[ $waitforstate ]]; then - if [[ ! $is_airgap ]]; then - if [[ $is_rpm ]]; then + if [[ $is_rpm ]]; then # Build the repo locally so we can use it echo "Syncing Repos" repo_sync_local - fi fi fi } @@ -1978,7 +1976,7 @@ repo_sync_local() { if [[ $is_supported ]]; then # Sync the repo from the the SO repo locally. # Check for reposync - info "Backing up old repos" + info "Adding Repo Download Configuration" mkdir -p /nsm/repo mkdir -p /opt/so/conf/reposync/cache echo "https://repo.securityonion.net/file/so-repo/prod/2.4/oracle/9" > /opt/so/conf/reposync/mirror.txt @@ -2002,10 +2000,10 @@ repo_sync_local() { if [[ ! $is_airgap ]]; then curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install logCmd "dnf reposync --norepopath -g --delete -m -c /opt/so/conf/reposync/repodownload.conf --repoid=securityonionsync --download-metadata -p /nsm/repo/" + # After the download is complete run createrepo + create_repo fi - # After the download is complete run createrepo - create_repo else # Add the proper repos for unsupported stuff echo "Adding Repos" From 8c44481ee15a2776d32cff1b1e0e6e68619300f5 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 28 Sep 2023 17:57:31 +0000 Subject: [PATCH 142/417] Load templates after package changes --- .../tools/sbin_jinja/so-elastic-fleet-package-upgrade | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-package-upgrade b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-package-upgrade index 2fb3f7798..a092e3ecb 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-package-upgrade +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-package-upgrade @@ -15,3 +15,4 @@ elastic_fleet_package_install "{{ PACKAGE }}" "$VERSION" echo {%- endfor %} echo +/usr/sbin/so-elasticsearch-templates-load From 670cd190518ba9337d63af1dbff0d8052c674241 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 28 Sep 2023 18:04:07 +0000 Subject: [PATCH 143/417] Exclude package upgrade script --- salt/elasticfleet/config.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/elasticfleet/config.sls b/salt/elasticfleet/config.sls index 82b975697..d2e357c91 100644 --- a/salt/elasticfleet/config.sls +++ b/salt/elasticfleet/config.sls @@ -37,6 +37,8 @@ elasticfleet_sbin_jinja: - group: 939 - file_mode: 755 - template: jinja + - exclude_pat: + - so-elastic-fleet-package-upgrade # exclude this because we need to watch it for changes eaconfdir: file.directory: From b8aad7f5e605b47aab7b704b75c7dc65c4d9f5b0 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 28 Sep 2023 19:44:49 -0400 Subject: [PATCH 144/417] Update defaults.yaml --- salt/elasticfleet/defaults.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 6737df17d..a4862623d 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -13,6 +13,7 @@ elasticfleet: - broker - capture_loss - cluster + - conn-summary - console - ecat_arp_info - known_certs From ec3cc7a854137ab4f701c022aa895ecab864851f Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 29 Sep 2023 10:49:36 -0400 Subject: [PATCH 145/417] exclude all playbook logs --- salt/common/tools/sbin/so-log-check | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 865846fac..03b2e5c68 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -118,6 +118,8 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error: '0'" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|errors_index" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|noerror" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding component template" # false positive (elastic security) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding index template" # false positive (elastic security) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fs_errors" # false positive (suricata stats) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error-template" # false positive (elastic templates) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|deprecated" # false positive (playbook) @@ -141,7 +143,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|_ml" # Elastic ML errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|context canceled" # elastic agent during shutdown EXCLUDED_ERRORS="$EXCLUDED_ERRORS|exited with code 128" # soctopus errors during forced restart by highstate - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip database update" # airgap can't update GeoIP DB + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|geoip databases update" # airgap can't update GeoIP DB EXCLUDED_ERRORS="$EXCLUDED_ERRORS|filenotfounderror" # bug in 2.4.10 filecheck salt state caused duplicate cronjobs EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error @@ -181,11 +183,13 @@ RESULT=0 # Check Security Onion container stdout/stderr logs CONTAINER_IDS=$(docker ps -q) -exclude_container so-kibana -exclude_container so-idstools +exclude_container so-kibana # kibana error logs are too verbose with large varieties of errors most of which are temporary +exclude_container so-idstools # ignore due to known issues and noisy logging +exclude_container so-playbook # ignore due to several playbook known issues for container_id in $CONTAINER_IDS; do - status "Checking container $container_id" + container_name=$(docker ps --format json | jq ". | select(.ID==\"$container_id\")|.Names") + status "Checking container $container_name" docker logs -n $RECENT_LOG_LINES $container_id > /tmp/log_check 2>&1 check_for_errors done @@ -195,10 +199,11 @@ find /opt/so/log/ /nsm -name \*.log > /tmp/log_check_files if [[ -f /var/log/cron ]]; then echo "/var/log/cron" >> /tmp/log_check_files fi -exclude_log "kibana.log" # kibana error logs are too verbose with large varieties of errors most of which are temporary -exclude_log "spool" # disregard zeek analyze logs -exclude_log "import" # disregard imported test data the contains error strings -exclude_log "update.log" # ignore playbook updates due to known issues +exclude_log "kibana.log" # kibana error logs are too verbose with large varieties of errors most of which are temporary +exclude_log "spool" # disregard zeek analyze logs as this is data specific +exclude_log "import" # disregard imported test data the contains error strings +exclude_log "update.log" # ignore playbook updates due to several known issues +exclude_log "playbook.log" # ignore due to several playbook known issues for log_file in $(cat /tmp/log_check_files); do status "Checking log file $log_file" From 9d3f6059eed0a07395b0e687b6f7c85b9394c842 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 29 Sep 2023 11:10:08 -0400 Subject: [PATCH 146/417] remove redis from eval --- salt/telegraf/defaults.yaml | 1 - salt/top.sls | 1 - 2 files changed, 2 deletions(-) diff --git a/salt/telegraf/defaults.yaml b/salt/telegraf/defaults.yaml index ab8679e57..0b7d532b1 100644 --- a/salt/telegraf/defaults.yaml +++ b/salt/telegraf/defaults.yaml @@ -15,7 +15,6 @@ telegraf: - influxdbsize.sh - oldpcap.sh - raid.sh - - redis.sh - sostatus.sh - stenoloss.sh - suriloss.sh diff --git a/salt/top.sls b/salt/top.sls index 6db19b361..4f84e17ac 100644 --- a/salt/top.sls +++ b/salt/top.sls @@ -79,7 +79,6 @@ base: - utility - soctopus - playbook - - redis - elasticfleet '*_manager and G@saltversion:{{saltversion}}': From d546d520690abd4f9ea549e60de483907b0c1eda Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 29 Sep 2023 14:08:44 -0400 Subject: [PATCH 147/417] exclude logstash --- salt/common/tools/sbin/so-log-check | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 03b2e5c68..b19026cad 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -91,7 +91,10 @@ EXCLUDED_ERRORS="__LOG_CHECK_PLACEHOLDER_EXCLUSION__" if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|database is locked" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|econnreset" # server not yet ready - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unreachable" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unreachable" # server not yet ready (logstash waiting on elastic) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|shutdown process" # server not yet ready (logstash waiting on elastic) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|contain valid certificates" # server not yet ready (logstash waiting on elastic) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failedaction" # server not yet ready (logstash waiting on elastic) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|no route to host" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|not running" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|unavailable" # server not yet ready @@ -148,6 +151,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|salt-minion-check" # bug in early 2.4 place Jinja script in non-jinja salt dir causing cron output errors EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" From ad01be66ea34cc039fd214c1f8f749942e6545aa Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 29 Sep 2023 14:09:04 -0400 Subject: [PATCH 148/417] remove checkmine engine. add x509.get_pem_entries to managers mine_functions. simplify mine update during soup --- salt/manager/tools/sbin/soup | 2 +- salt/salt/etc/minion.d/mine_functions.conf.jinja | 4 ++++ salt/salt/master.sls | 9 +++------ 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 960c50f31..333be836b 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -580,7 +580,7 @@ update_centos_repo() { update_salt_mine() { echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host." set +e - salt \* cmd.run cmd='MAININT=$(salt-call pillar.get host:mainint --out=newline_values_only) && salt-call mine.send name=network.ip_addrs interface="$MAININT"' + salt \* mine.update set -e } diff --git a/salt/salt/etc/minion.d/mine_functions.conf.jinja b/salt/salt/etc/minion.d/mine_functions.conf.jinja index 378d2c435..2ae345cdf 100644 --- a/salt/salt/etc/minion.d/mine_functions.conf.jinja +++ b/salt/salt/etc/minion.d/mine_functions.conf.jinja @@ -2,3 +2,7 @@ mine_interval: 35 mine_functions: network.ip_addrs: - interface: {{ GLOBALS.main_interface }} +{% if GLOBALS.is_manager -%} + x509.get_pem_entries: + - glob_path: '/etc/pki/ca.crt' +{% endif -%} diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 8b2b6c7d0..b10a4df0f 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -18,17 +18,14 @@ salt_master_service: - enable: True checkmine_engine: - file.managed: + file.absent: - name: /etc/salt/engines/checkmine.py - - source: salt://salt/engines/checkmine.py - - makedirs: True - watch_in: - service: salt_minion_service engines_config: - file.managed: + file.absent: - name: /etc/salt/minion.d/engines.conf - - source: salt://salt/files/engines.conf - watch_in: - service: salt_minion_service @@ -38,4 +35,4 @@ engines_config: test.fail_without_changes: - name: {{sls}}_state_not_allowed -{% endif %} \ No newline at end of file +{% endif %} From e8b67da08bdf5a8239a33d7e6e99450d2d4b49fb Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 29 Sep 2023 14:20:20 -0400 Subject: [PATCH 149/417] exclude oom error from cmd line --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index b19026cad..63a33c4ee 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -121,6 +121,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error: '0'" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|errors_index" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|noerror" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|outofmemoryerror" # false positive (elastic command line) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding component template" # false positive (elastic security) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|adding index template" # false positive (elastic security) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|fs_errors" # false positive (suricata stats) From 8690304dffce6fc1ef2a923edf8ec2b80d90079d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 29 Sep 2023 16:17:19 -0400 Subject: [PATCH 150/417] change how mine_functions.conf is managed during setup --- salt/salt/etc/minion.d/mine_functions.conf.jinja | 4 ++-- salt/salt/mine_functions.sls | 5 +++++ salt/salt/minion.sls | 9 +-------- setup/so-functions | 3 +-- 4 files changed, 9 insertions(+), 12 deletions(-) create mode 100644 salt/salt/mine_functions.sls diff --git a/salt/salt/etc/minion.d/mine_functions.conf.jinja b/salt/salt/etc/minion.d/mine_functions.conf.jinja index 2ae345cdf..e3c62e75c 100644 --- a/salt/salt/etc/minion.d/mine_functions.conf.jinja +++ b/salt/salt/etc/minion.d/mine_functions.conf.jinja @@ -1,8 +1,8 @@ mine_interval: 35 mine_functions: network.ip_addrs: - - interface: {{ GLOBALS.main_interface }} -{% if GLOBALS.is_manager -%} + - interface: {{ pillar.host.mainint }} +{% if grains.role in ['so-eval','so-import','so-manager','so-managersearch','so-standalone'] -%} x509.get_pem_entries: - glob_path: '/etc/pki/ca.crt' {% endif -%} diff --git a/salt/salt/mine_functions.sls b/salt/salt/mine_functions.sls new file mode 100644 index 000000000..27a905847 --- /dev/null +++ b/salt/salt/mine_functions.sls @@ -0,0 +1,5 @@ +mine_functions: + file.managed: + - name: /etc/salt/minion.d/mine_functions.conf + - source: salt://salt/etc/minion.d/mine_functions.conf.jinja + - template: jinja diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 43f7539f9..865bd367f 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -12,6 +12,7 @@ include: - salt - systemd.reload - repo.client + - salt.mine_functions {% if INSTALLEDSALTVERSION|string != SALTVERSION|string %} @@ -78,14 +79,6 @@ salt_minion_service_unit_file: {% endif %} -mine_functions: - file.managed: - - name: /etc/salt/minion.d/mine_functions.conf - - source: salt://salt/etc/minion.d/mine_functions.conf.jinja - - template: jinja - - defaults: - GLOBALS: {{ GLOBALS }} - # this has to be outside the if statement above since there are _in calls to this state salt_minion_service: service.running: diff --git a/setup/so-functions b/setup/so-functions index 679142e2a..eab7a4add 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -649,8 +649,7 @@ configure_minion() { "log_level_logfile: info"\ "log_file: /opt/so/log/salt/minion" >> "$minion_config" - cp -f ../salt/salt/etc/minion.d/mine_functions.conf.jinja /etc/salt/minion.d/mine_functions.conf - sed -i "s/{{ GLOBALS.main_interface }}/$MNIC/" /etc/salt/minion.d/mine_functions.conf + logCmd "salt-call state.apply salt.mine_functions -l info" { logCmd "systemctl enable salt-minion"; From 827ed7b273cf2a9180fa94e7cd398a2e178dbfcb Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 29 Sep 2023 17:08:42 -0400 Subject: [PATCH 151/417] run salt.mine_function state locally and provide pillar info to it --- setup/so-functions | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index eab7a4add..b55ae0def 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -649,7 +649,8 @@ configure_minion() { "log_level_logfile: info"\ "log_file: /opt/so/log/salt/minion" >> "$minion_config" - logCmd "salt-call state.apply salt.mine_functions -l info" + info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='{"host": {"mainint": "$MNIC"}}'" + salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="{'host': {'mainint': $MNIC}}" { logCmd "systemctl enable salt-minion"; From 39ea1d317df32c17e96f3566fea64ddfbd33b297 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 29 Sep 2023 17:12:14 -0400 Subject: [PATCH 152/417] add comment --- salt/salt/mine_functions.sls | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/salt/salt/mine_functions.sls b/salt/salt/mine_functions.sls index 27a905847..49a47e524 100644 --- a/salt/salt/mine_functions.sls +++ b/salt/salt/mine_functions.sls @@ -1,3 +1,11 @@ +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# this state was seperated from salt.minion state since it is called during setup +# GLOBALS are imported in the salt.minion state and that is not available at that point in setup +# this state is included in the salt.minion state mine_functions: file.managed: - name: /etc/salt/minion.d/mine_functions.conf From ea085c5ff6aafb1e06b5851e8731c934c6fc3ccf Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 29 Sep 2023 21:38:13 -0400 Subject: [PATCH 153/417] more known errors --- salt/common/tools/sbin/so-log-check | 3 +++ 1 file changed, 3 insertions(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 63a33c4ee..ba5285bf3 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -105,6 +105,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing shards" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to send metrics" # server not yet ready EXCLUDED_ERRORS="$EXCLUDED_ERRORS|broken pipe" # server not yet ready + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status: 502" # server not yet ready (nginx waiting on upstream) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout exceeded" # server not yet ready (telegraf waiting on elasticsearch) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|influxsize kbytes" # server not yet ready (telegraf waiting on influx) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|expected field at" # server not yet ready (telegraf waiting on health data) @@ -153,6 +154,8 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf does not" # known issue with reposync on pre-2.4.20 + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" From 8c7933cd60feabd414036da72cf3c2282212b99d Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sat, 30 Sep 2023 18:11:29 -0400 Subject: [PATCH 154/417] fix exclusion --- salt/common/tools/sbin/so-log-check | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index ba5285bf3..dac1121bc 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -154,7 +154,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|generating elastalert config" # playbook expected error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|activerecord" # playbook expected error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf does not" # known issue with reposync on pre-2.4.20 + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" From cd8a74290b6f1259d21e294282ac83dc9aeddaa5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 10:36:17 -0400 Subject: [PATCH 155/417] hold openssl version --- salt/common/init.sls | 1 - salt/common/packages.sls | 11 ++++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/salt/common/init.sls b/salt/common/init.sls index f50f0c61b..37ea4239d 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -91,7 +91,6 @@ vimconfig: alwaysupdated: pkg.latest: - pkgs: - - openssl - openssh-server - bash - skip_suggestions: True diff --git a/salt/common/packages.sls b/salt/common/packages.sls index 8b54bdbf5..f5707a377 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -46,6 +46,12 @@ python-rich: {% endif %} {% if GLOBALS.os_family == 'RedHat' %} + +holdversion_openssl: + pkg.held: + - name: - openssl + - version: 1:3.0.7-16.0.1.el9_2 + commonpkgs: pkg.installed: - skip_suggestions: True @@ -65,7 +71,7 @@ commonpkgs: - mariadb-devel - net-tools - nmap-ncat - - openssl + - openssl: 1:3.0.7-16.0.1.el9_2 - procps-ng - python3-dnf-plugin-versionlock - python3-docker @@ -79,4 +85,7 @@ commonpkgs: - unzip - wget - yum-utils + + + {% endif %} From 70a36bafa54b92e258f4e5a2942006c04dcd7b1e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 10:38:54 -0400 Subject: [PATCH 156/417] remove - --- salt/common/packages.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index f5707a377..ae723fd94 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -49,7 +49,7 @@ python-rich: holdversion_openssl: pkg.held: - - name: - openssl + - name: openssl - version: 1:3.0.7-16.0.1.el9_2 commonpkgs: From dfe399291f9398435fd0520955bf19826400bb04 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 10:54:41 -0400 Subject: [PATCH 157/417] hold openssl-libs --- salt/common/packages.sls | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index ae723fd94..f7c8fd5dc 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -52,6 +52,11 @@ holdversion_openssl: - name: openssl - version: 1:3.0.7-16.0.1.el9_2 +holdversion_openssl-libs: + pkg.held: + - name: openssl-libs + - version: 1:3.0.7-16.0.1.el9_2 + commonpkgs: pkg.installed: - skip_suggestions: True From c1ab8952eb727c0cf0cea085c6b75aa468109b0e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 10:59:51 -0400 Subject: [PATCH 158/417] hold openssl-devel --- salt/common/packages.sls | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index f7c8fd5dc..a4a32f15f 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -57,6 +57,11 @@ holdversion_openssl-libs: - name: openssl-libs - version: 1:3.0.7-16.0.1.el9_2 +holdversion_openssl-devel: + pkg.held: + - name: openssl-devel + - version: 1:3.0.7-16.0.1.el9_2 + commonpkgs: pkg.installed: - skip_suggestions: True From f85dd910a302bad9515390d99d7929fe8106fe3c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 11:13:08 -0400 Subject: [PATCH 159/417] hold openssl from update during setup --- salt/common/packages.sls | 2 ++ setup/so-functions | 3 ++- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index a4a32f15f..0bf8616be 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -47,6 +47,8 @@ python-rich: {% if GLOBALS.os_family == 'RedHat' %} +# holding these since openssl-devel-1:3.0.7-16.0.1.el9_2 seems to be a requirement for mariadb-devel-3:10.5.16-2.el9_0 +# https://github.com/Security-Onion-Solutions/securityonion/discussions/11443 holdversion_openssl: pkg.held: - name: openssl diff --git a/setup/so-functions b/setup/so-functions index 679142e2a..26e1b2dab 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -2443,7 +2443,8 @@ update_sudoers_for_testing() { update_packages() { if [[ $is_oracle ]]; then logCmd "dnf repolist" - logCmd "dnf -y update --allowerasing --exclude=salt*,docker*,containerd*" + # holding openssl https://github.com/Security-Onion-Solutions/securityonion/discussions/11443 + logCmd "dnf -y update --allowerasing --exclude=salt*,docker*,containerd*,openssl*" RMREPOFILES=("oracle-linux-ol9.repo" "uek-ol9.repo" "virt-ol9.repo") info "Removing repo files added by oracle-repos package update" for FILE in ${RMREPOFILES[@]}; do From 0f08d5d640a2e0e0fa6767ded9a7ec9d934c15ae Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 11:43:03 -0400 Subject: [PATCH 160/417] install openssl version 1:3.0.7-16.0.1.el9_2 --- setup/so-functions | 1 + 1 file changed, 1 insertion(+) diff --git a/setup/so-functions b/setup/so-functions index 26e1b2dab..243e89c99 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -2444,6 +2444,7 @@ update_packages() { if [[ $is_oracle ]]; then logCmd "dnf repolist" # holding openssl https://github.com/Security-Onion-Solutions/securityonion/discussions/11443 + logCmd "dnf -y install openssl-1:3.0.7-16.0.1.el9_2 openssl-libs-1:3.0.7-16.0.1.el9_2 openssl-devel-1:3.0.7-16.0.1.el9_2" logCmd "dnf -y update --allowerasing --exclude=salt*,docker*,containerd*,openssl*" RMREPOFILES=("oracle-linux-ol9.repo" "uek-ol9.repo" "virt-ol9.repo") info "Removing repo files added by oracle-repos package update" From 3a5c6ee43aac37d2f385bd93091f89dd3dd84bc1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 12:09:13 -0400 Subject: [PATCH 161/417] install version lock before we try to hold pkgs --- salt/common/packages.sls | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index 0bf8616be..827cc6bf0 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -47,6 +47,11 @@ python-rich: {% if GLOBALS.os_family == 'RedHat' %} +# install versionlock first so we can hold packages in the next states +install_versionlock: + pkg.installed: + - name: python3-dnf-plugin-versionlock + # holding these since openssl-devel-1:3.0.7-16.0.1.el9_2 seems to be a requirement for mariadb-devel-3:10.5.16-2.el9_0 # https://github.com/Security-Onion-Solutions/securityonion/discussions/11443 holdversion_openssl: @@ -85,7 +90,6 @@ commonpkgs: - nmap-ncat - openssl: 1:3.0.7-16.0.1.el9_2 - procps-ng - - python3-dnf-plugin-versionlock - python3-docker - python3-m2crypto - python3-packaging From 6547afe6c07cc064587a44c3ca13b723c92d7375 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 13:35:00 -0400 Subject: [PATCH 162/417] dont hold openssl-devel --- salt/common/packages.sls | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index 827cc6bf0..185bf536e 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -64,11 +64,6 @@ holdversion_openssl-libs: - name: openssl-libs - version: 1:3.0.7-16.0.1.el9_2 -holdversion_openssl-devel: - pkg.held: - - name: openssl-devel - - version: 1:3.0.7-16.0.1.el9_2 - commonpkgs: pkg.installed: - skip_suggestions: True @@ -85,10 +80,10 @@ commonpkgs: - httpd-tools - jq - lvm2 + - openssl: 1:3.0.7-16.0.1.el9_2 - mariadb-devel - net-tools - nmap-ncat - - openssl: 1:3.0.7-16.0.1.el9_2 - procps-ng - python3-docker - python3-m2crypto @@ -102,6 +97,4 @@ commonpkgs: - wget - yum-utils - - {% endif %} From 6b90961e87221dcb3e16a5702ff618b237274a28 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 14:26:28 -0400 Subject: [PATCH 163/417] openssl-libs --- salt/common/packages.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index 185bf536e..adef3828b 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -81,6 +81,7 @@ commonpkgs: - jq - lvm2 - openssl: 1:3.0.7-16.0.1.el9_2 + - openssl-libs: 1:3.0.7-16.0.1.el9_2 - mariadb-devel - net-tools - nmap-ncat From d7a14d9e00ab8b098a32c4487a09b22332980da2 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 15:08:22 -0400 Subject: [PATCH 164/417] update holds --- salt/common/packages.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index adef3828b..b002c62e9 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -67,6 +67,7 @@ holdversion_openssl-libs: commonpkgs: pkg.installed: - skip_suggestions: True + - update_holds: True - pkgs: - curl - device-mapper-persistent-data From 57e76232eca7076451d7075ad400d8156daae718 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 15:48:53 -0400 Subject: [PATCH 165/417] openssl pkgs in own state --- salt/common/packages.sls | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index b002c62e9..ca0326839 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -64,10 +64,18 @@ holdversion_openssl-libs: - name: openssl-libs - version: 1:3.0.7-16.0.1.el9_2 -commonpkgs: +openssl_pkgs: pkg.installed: - skip_suggestions: True - update_holds: True + - pkgs: + - openssl: 1:3.0.7-16.0.1.el9_2 + - openssl-libs: 1:3.0.7-16.0.1.el9_2 + - openssl-devel: 1:3.0.7-16.0.1.el9_2 + +commonpkgs: + pkg.installed: + - skip_suggestions: True - pkgs: - curl - device-mapper-persistent-data @@ -81,8 +89,6 @@ commonpkgs: - httpd-tools - jq - lvm2 - - openssl: 1:3.0.7-16.0.1.el9_2 - - openssl-libs: 1:3.0.7-16.0.1.el9_2 - mariadb-devel - net-tools - nmap-ncat From 8995752c2722116e2cf328d067a24371e68bcd33 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 2 Oct 2023 16:17:26 -0400 Subject: [PATCH 166/417] let openssl-devel be installed with mariadb --- salt/common/packages.sls | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index ca0326839..b4e97a81d 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -71,7 +71,6 @@ openssl_pkgs: - pkgs: - openssl: 1:3.0.7-16.0.1.el9_2 - openssl-libs: 1:3.0.7-16.0.1.el9_2 - - openssl-devel: 1:3.0.7-16.0.1.el9_2 commonpkgs: pkg.installed: From c699c2fe2ab9cb2d94e6460d5a5ea69cc60d38fa Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 3 Oct 2023 09:43:29 -0400 Subject: [PATCH 167/417] exclude known issues --- salt/common/tools/sbin/so-log-check | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index dac1121bc..c2d16fd86 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -118,7 +118,7 @@ fi if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|elastalert_status_error" # false positive - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|elastalert_error.json" # false positive + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|elastalert_error" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error: '0'" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|errors_index" # false positive EXCLUDED_ERRORS="$EXCLUDED_ERRORS|noerror" # false positive @@ -156,6 +156,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|monitoring.metrics" # known issue with elastic agent casting the field incorrectly if an integer value shows up before a float EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|soc.field." # known ingest type collisions issue with earlier versions of SO EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" From 66be04e78a8c1a6717134024c89773af2b9d1b7f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 3 Oct 2023 09:53:40 -0400 Subject: [PATCH 168/417] remove mariadb --- salt/common/init.sls | 1 + salt/common/packages.sls | 29 ++++------------------------- salt/common/tools/sbin/so-common | 2 +- setup/so-functions | 3 +-- 4 files changed, 7 insertions(+), 28 deletions(-) diff --git a/salt/common/init.sls b/salt/common/init.sls index 37ea4239d..f50f0c61b 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -91,6 +91,7 @@ vimconfig: alwaysupdated: pkg.latest: - pkgs: + - openssl - openssh-server - bash - skip_suggestions: True diff --git a/salt/common/packages.sls b/salt/common/packages.sls index b4e97a81d..c5d2729fd 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -47,35 +47,15 @@ python-rich: {% if GLOBALS.os_family == 'RedHat' %} -# install versionlock first so we can hold packages in the next states -install_versionlock: - pkg.installed: - - name: python3-dnf-plugin-versionlock - -# holding these since openssl-devel-1:3.0.7-16.0.1.el9_2 seems to be a requirement for mariadb-devel-3:10.5.16-2.el9_0 -# https://github.com/Security-Onion-Solutions/securityonion/discussions/11443 -holdversion_openssl: - pkg.held: - - name: openssl - - version: 1:3.0.7-16.0.1.el9_2 - -holdversion_openssl-libs: - pkg.held: - - name: openssl-libs - - version: 1:3.0.7-16.0.1.el9_2 - -openssl_pkgs: - pkg.installed: - - skip_suggestions: True - - update_holds: True - - pkgs: - - openssl: 1:3.0.7-16.0.1.el9_2 - - openssl-libs: 1:3.0.7-16.0.1.el9_2 +remove_mariadb: + pkg.removed: + - name: mariadb-devel commonpkgs: pkg.installed: - skip_suggestions: True - pkgs: + - python3-dnf-plugin-versionlock - curl - device-mapper-persistent-data - fuse @@ -88,7 +68,6 @@ commonpkgs: - httpd-tools - jq - lvm2 - - mariadb-devel - net-tools - nmap-ncat - procps-ng diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 0dfb19bbe..f754b34ef 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -240,7 +240,7 @@ gpg_rpm_import() { else local RPMKEYSLOC="$UPDATE_DIR/salt/repo/client/files/$OS/keys" fi - RPMKEYS=('RPM-GPG-KEY-oracle' 'RPM-GPG-KEY-EPEL-9' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub' 'MariaDB-Server-GPG-KEY') + RPMKEYS=('RPM-GPG-KEY-oracle' 'RPM-GPG-KEY-EPEL-9' 'SALT-PROJECT-GPG-PUBKEY-2023.pub' 'docker.pub' 'securityonion.pub') for RPMKEY in "${RPMKEYS[@]}"; do rpm --import $RPMKEYSLOC/$RPMKEY echo "Imported $RPMKEY" diff --git a/setup/so-functions b/setup/so-functions index 243e89c99..84d6d80f9 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -2444,8 +2444,7 @@ update_packages() { if [[ $is_oracle ]]; then logCmd "dnf repolist" # holding openssl https://github.com/Security-Onion-Solutions/securityonion/discussions/11443 - logCmd "dnf -y install openssl-1:3.0.7-16.0.1.el9_2 openssl-libs-1:3.0.7-16.0.1.el9_2 openssl-devel-1:3.0.7-16.0.1.el9_2" - logCmd "dnf -y update --allowerasing --exclude=salt*,docker*,containerd*,openssl*" + logCmd "dnf -y update --allowerasing --exclude=salt*,docker*,containerd*" RMREPOFILES=("oracle-linux-ol9.repo" "uek-ol9.repo" "virt-ol9.repo") info "Removing repo files added by oracle-repos package update" for FILE in ${RMREPOFILES[@]}; do From 2434ce14d3fe1ed8773e085a6696b9d01026d1c5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 3 Oct 2023 10:01:07 -0400 Subject: [PATCH 169/417] remove removing mariadb-devel --- salt/common/packages.sls | 4 ---- setup/so-functions | 1 - 2 files changed, 5 deletions(-) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index c5d2729fd..521f2201c 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -47,10 +47,6 @@ python-rich: {% if GLOBALS.os_family == 'RedHat' %} -remove_mariadb: - pkg.removed: - - name: mariadb-devel - commonpkgs: pkg.installed: - skip_suggestions: True diff --git a/setup/so-functions b/setup/so-functions index 84d6d80f9..679142e2a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -2443,7 +2443,6 @@ update_sudoers_for_testing() { update_packages() { if [[ $is_oracle ]]; then logCmd "dnf repolist" - # holding openssl https://github.com/Security-Onion-Solutions/securityonion/discussions/11443 logCmd "dnf -y update --allowerasing --exclude=salt*,docker*,containerd*" RMREPOFILES=("oracle-linux-ol9.repo" "uek-ol9.repo" "virt-ol9.repo") info "Removing repo files added by oracle-repos package update" From f3ba28062b48e6bfd9adb55c649057eb6987ca14 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 3 Oct 2023 10:05:56 -0400 Subject: [PATCH 170/417] Remove MySQL --- salt/mysql/config.sls | 2 +- setup/so-functions | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/mysql/config.sls b/salt/mysql/config.sls index 5f9010011..274f25d76 100644 --- a/salt/mysql/config.sls +++ b/salt/mysql/config.sls @@ -9,7 +9,7 @@ # MySQL Setup mysqlpkgs: - pkg.installed: + pkg.removed: - skip_suggestions: False - pkgs: {% if grains['os_family'] != 'RedHat' %} diff --git a/setup/so-functions b/setup/so-functions index 679142e2a..aad627a8d 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -2088,7 +2088,7 @@ saltify() { if [[ $waitforstate ]]; then retry 150 20 "apt-get -y install salt-common=$SALTVERSION salt-minion=$SALTVERSION salt-master=$SALTVERSION" || fail_setup retry 150 20 "apt-mark hold salt-minion salt-common salt-master" || fail_setup - retry 150 20 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-mysqldb python3-packaging python3-influxdb python3-lxml" || exit 1 + retry 150 20 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-packaging python3-influxdb python3-lxml" || exit 1 else retry 150 20 "apt-get -y install salt-common=$SALTVERSION salt-minion=$SALTVERSION" || fail_setup retry 150 20 "apt-mark hold salt-minion salt-common" || fail_setup From d78b55873d369e0fa759d8c484ad2e51289ee286 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 3 Oct 2023 10:15:28 -0400 Subject: [PATCH 171/417] remove mariadb-devel --- salt/common/packages.sls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/common/packages.sls b/salt/common/packages.sls index 521f2201c..c5d2729fd 100644 --- a/salt/common/packages.sls +++ b/salt/common/packages.sls @@ -47,6 +47,10 @@ python-rich: {% if GLOBALS.os_family == 'RedHat' %} +remove_mariadb: + pkg.removed: + - name: mariadb-devel + commonpkgs: pkg.installed: - skip_suggestions: True From d79e27774c06e77787e8cb171990444594b37abb Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 5 Oct 2023 11:27:48 -0400 Subject: [PATCH 172/417] 2.4.20 --- DOWNLOAD_AND_VERIFY_ISO.md | 22 ++++++++++----------- sigs/securityonion-2.4.20-20231006.iso.sig | Bin 0 -> 566 bytes 2 files changed, 11 insertions(+), 11 deletions(-) create mode 100644 sigs/securityonion-2.4.20-20231006.iso.sig diff --git a/DOWNLOAD_AND_VERIFY_ISO.md b/DOWNLOAD_AND_VERIFY_ISO.md index 1e6299a8e..f78ed8045 100644 --- a/DOWNLOAD_AND_VERIFY_ISO.md +++ b/DOWNLOAD_AND_VERIFY_ISO.md @@ -1,18 +1,18 @@ -### 2.4.10-20230821 ISO image released on 2023/08/21 +### 2.4.20-20231006 ISO image released on 2023/08/21 ### Download and Verify -2.4.10-20230821 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.4.10-20230821.iso +2.4.20-20231006 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231006.iso -MD5: 353EB36F807DC947F08F79B3DCFA420E -SHA1: B25E3BEDB81BBEF319DC710267E6D78422F39C56 -SHA256: 3D369E92FEB65D14E1A981E99FA223DA52C92057A037C243AD6332B6B9A6D9BC +MD5: 269F00308C53976BF0EAE788D1DB29DB +SHA1: 3F7C2324AE1271112F3B752BA4724AF36688FC27 +SHA256: 542B8B3F4F75AD24DC78007F8FE0857E00DC4CC9F4870154DCB8D5D0C4144B65 Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.10-20230821.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231006.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS @@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2. Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.10-20230821.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231006.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.4.10-20230821.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231006.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.4.10-20230821.iso.sig securityonion-2.4.10-20230821.iso +gpg --verify securityonion-2.4.20-20231006.iso.sig securityonion-2.4.20-20231006.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Mon 21 Aug 2023 09:47:50 AM EDT using RSA key ID FE507013 +gpg: Signature made Tue 03 Oct 2023 11:40:51 AM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. diff --git a/sigs/securityonion-2.4.20-20231006.iso.sig b/sigs/securityonion-2.4.20-20231006.iso.sig new file mode 100644 index 0000000000000000000000000000000000000000..b253c67343b7b2cf16a7e755c9994273a0ddcbf3 GIT binary patch literal 566 zcmV-60?GY}0y6{v0SEvc79j-41gSkXz6^6dp_W8^5Ma0dP;e6k0%aUF0{{vM5PT3| zxBgIY6PPIv|8AKBz*y#69E5wO6vi|oL@`l#Tb0V-g6qpodgIz_Qz?#fV&dsL)bw7; zEQ5dR`7WH2!A2&_oP21_JVTNA-V)o_J5B1nXhuKq`dW;jVYtmpO|bDSl2_jc>+i~z z!YaS~vE>O;Knj%{_=6*d1>;fqP7xtOq7dlE*J3+rJ@LOtG8j8$gDzLp(Yp+n>O*9Y zZ;w4lR4oDEQ5tlI#JsHlxdTPdpKI;7=?!Mjr=4})v59Qq@d3juf)K@ROkoR{Vtq8j zOfw_a81qDfNkCEs`A^plu`Gznwc-l3IdkH{5K}tU%EmV33V?2_i~4tT02)RIxGEU&aRRnk}JYjQpnhK>`SjSi# ze2u@u?YU>5_R;{3*6BAG1}iQSBOdN#r48V3bvTv`XYcuiUoJom;3*m4_}RLXHbgV~ zRnf9Ycgu(Tuxhq02}9f7VB85+A9}Q#K&Y5;+wQ~#<o@5C8xG literal 0 HcmV?d00001 From c25aed9a2b285631764a5ce2dfd93f01022d81c1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 5 Oct 2023 11:37:49 -0400 Subject: [PATCH 173/417] Update DOWNLOAD_AND_VERIFY_ISO.md --- DOWNLOAD_AND_VERIFY_ISO.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/DOWNLOAD_AND_VERIFY_ISO.md b/DOWNLOAD_AND_VERIFY_ISO.md index f78ed8045..dabfd285c 100644 --- a/DOWNLOAD_AND_VERIFY_ISO.md +++ b/DOWNLOAD_AND_VERIFY_ISO.md @@ -1,4 +1,4 @@ -### 2.4.20-20231006 ISO image released on 2023/08/21 +### 2.4.20-20231006 ISO image released on 2023/10/06 From 4dc24b22c79042e8f6959f5e59e21b8fb7249410 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 10 Oct 2023 10:51:59 -0400 Subject: [PATCH 174/417] accept icmp on input chain --- salt/firewall/iptables.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/firewall/iptables.jinja b/salt/firewall/iptables.jinja index c15a54e46..074663e15 100644 --- a/salt/firewall/iptables.jinja +++ b/salt/firewall/iptables.jinja @@ -89,7 +89,6 @@ COMMIT -A INPUT -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT -A INPUT -i lo -j ACCEPT -A INPUT -m conntrack --ctstate INVALID -j DROP --A INPUT -j REJECT --reject-with icmp-host-prohibited -A INPUT -p icmp -j ACCEPT -A INPUT -j LOGGING -A FORWARD -j DOCKER-USER @@ -103,6 +102,7 @@ COMMIT -A FORWARD -m conntrack --ctstate INVALID -j DROP -A FORWARD -j REJECT --reject-with icmp-host-prohibited -A OUTPUT -o lo -j ACCEPT +# block icmp timestamp reply -A OUTPUT -p icmp -m icmp --icmp-type 14 -j DROP {%- for rule in D2 %} From 49ebbf3232fe08091f796b1e7b1100fa9aed7d56 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 10 Oct 2023 11:05:39 -0400 Subject: [PATCH 175/417] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index a3ab5389f..8ea99f559 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.20 +2.4.30 From a283e7ea0bea9928ff27ba42f022a5d07934520e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 10 Oct 2023 13:00:54 -0400 Subject: [PATCH 176/417] remove checkmine salt engine --- salt/salt/engines/checkmine.py | 28 ---------------------------- salt/salt/files/engines.conf | 6 ------ 2 files changed, 34 deletions(-) delete mode 100644 salt/salt/engines/checkmine.py delete mode 100644 salt/salt/files/engines.conf diff --git a/salt/salt/engines/checkmine.py b/salt/salt/engines/checkmine.py deleted file mode 100644 index 5cc0a5ad3..000000000 --- a/salt/salt/engines/checkmine.py +++ /dev/null @@ -1,28 +0,0 @@ -# -*- coding: utf-8 -*- - -import logging -from time import sleep -from os import remove - -log = logging.getLogger(__name__) - -def start(interval=30): - log.info("checkmine engine started") - minionid = __grains__['id'] - while True: - try: - ca_crt = __salt__['saltutil.runner']('mine.get', tgt=minionid, fun='x509.get_pem_entries')[minionid]['/etc/pki/ca.crt'] - log.info('Successfully queried Salt mine for the CA.') - except: - log.error('Could not pull CA from the Salt mine.') - log.info('Removing /var/cache/salt/master/minions/%s/mine.p to force Salt mine to be repopulated.' % minionid) - try: - remove('/var/cache/salt/master/minions/%s/mine.p' % minionid) - log.info('Removed /var/cache/salt/master/minions/%s/mine.p' % minionid) - except FileNotFoundError: - log.error('/var/cache/salt/master/minions/%s/mine.p does not exist' % minionid) - - __salt__['mine.send'](name='x509.get_pem_entries', glob_path='/etc/pki/ca.crt') - log.warning('Salt mine repopulated with /etc/pki/ca.crt') - - sleep(interval) \ No newline at end of file diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf deleted file mode 100644 index c9e20adf3..000000000 --- a/salt/salt/files/engines.conf +++ /dev/null @@ -1,6 +0,0 @@ -engines_dirs: - - /etc/salt/engines - -engines: - - checkmine: - interval: 30 \ No newline at end of file From 89467adf9c3ba493c397836a942b9f75b9eb183e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 10 Oct 2023 13:05:43 -0400 Subject: [PATCH 177/417] batch the salt mine update --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 333be836b..e4b388e22 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -580,7 +580,7 @@ update_centos_repo() { update_salt_mine() { echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host." set +e - salt \* mine.update + salt \* mine.update -b 50 set -e } From 4193130ed05fc6cd6e34e1432161737c23996a74 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 10 Oct 2023 13:07:12 -0400 Subject: [PATCH 178/417] reduce salt mine interval to 25 minutes --- salt/salt/etc/minion.d/mine_functions.conf.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/salt/etc/minion.d/mine_functions.conf.jinja b/salt/salt/etc/minion.d/mine_functions.conf.jinja index e3c62e75c..3851238fd 100644 --- a/salt/salt/etc/minion.d/mine_functions.conf.jinja +++ b/salt/salt/etc/minion.d/mine_functions.conf.jinja @@ -1,4 +1,4 @@ -mine_interval: 35 +mine_interval: 25 mine_functions: network.ip_addrs: - interface: {{ pillar.host.mainint }} From 2094b4f6889d74fe4fc6b597511b38c8bc838fa7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 11 Oct 2023 09:04:36 -0400 Subject: [PATCH 179/417] upgrade to salt 3006.3 --- salt/salt/master.defaults.yaml | 2 +- salt/salt/minion.defaults.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index 126039802..40b6f5268 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -2,4 +2,4 @@ # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: master: - version: 3006.1 + version: 3006.3 diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index 7e1540d17..71fd18f96 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -2,6 +2,6 @@ # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: minion: - version: 3006.1 + version: 3006.3 check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default service_start_delay: 30 # in seconds. From d357864d69bc9abf1800558c28160321df8ae191 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 11 Oct 2023 15:32:11 -0400 Subject: [PATCH 180/417] fix upgrade_salt function for oel --- salt/manager/tools/sbin/soup | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index e4b388e22..acde3f74a 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -628,8 +628,8 @@ upgrade_salt() { SALTUPGRADED=True echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "" - # If CentOS - if [[ $OS == 'centos' ]]; then + # If Oracle Linux + if [[ $OS == 'oel' ]]; then echo "Removing yum versionlock for Salt." echo "" yum versionlock delete "salt-*" From 1043315e6b35cf8d07d924ab8ce6d296b5752e62 Mon Sep 17 00:00:00 2001 From: defensivedepth Date: Thu, 12 Oct 2023 09:22:26 -0400 Subject: [PATCH 181/417] Manage Elastic Defend Integration manually --- .../elastic-defend-endpoints.json | 0 .../tools/sbin/so-elastic-fleet-common | 18 +++++++++++++ ...ic-fleet-integration-policy-elastic-defend | 27 +++++++++++++++++++ .../so-elastic-fleet-integration-policy-load | 4 +++ 4 files changed, 49 insertions(+) rename salt/elasticfleet/files/integrations/{endpoints-initial => elastic-defend}/elastic-defend-endpoints.json (100%) mode change 100755 => 100644 salt/elasticfleet/tools/sbin/so-elastic-fleet-common create mode 100755 salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-elastic-defend mode change 100755 => 100644 salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load diff --git a/salt/elasticfleet/files/integrations/endpoints-initial/elastic-defend-endpoints.json b/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json similarity index 100% rename from salt/elasticfleet/files/integrations/endpoints-initial/elastic-defend-endpoints.json rename to salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json diff --git a/salt/elasticfleet/tools/sbin/so-elastic-fleet-common b/salt/elasticfleet/tools/sbin/so-elastic-fleet-common old mode 100755 new mode 100644 index 6ada43003..c0b4db53a --- a/salt/elasticfleet/tools/sbin/so-elastic-fleet-common +++ b/salt/elasticfleet/tools/sbin/so-elastic-fleet-common @@ -42,6 +42,23 @@ elastic_fleet_integration_create() { curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" } + +elastic_fleet_integration_remove() { + + AGENT_POLICY=$1 + + NAME=$2 + + INTEGRATION_ID=$(/usr/sbin/so-elastic-fleet-agent-policy-view "$AGENT_POLICY" | jq -r '.item.package_policies[] | select(.name=="'"$NAME"'") | .id') + + JSON_STRING=$( jq -n \ + --arg INTEGRATIONID "$INTEGRATION_ID" \ + '{"packagePolicyIds":[$INTEGRATIONID]}' + ) + + curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/package_policies/delete" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" +} + elastic_fleet_integration_update() { UPDATE_ID=$1 @@ -98,3 +115,4 @@ elastic_fleet_policy_update() { curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/agent_policies/$POLICYID" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" } + diff --git a/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-elastic-defend b/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-elastic-defend new file mode 100755 index 000000000..c4a7d39fd --- /dev/null +++ b/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-elastic-defend @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +# Usage: Run with --force to update the Elastic Defend integration policy + +. /usr/sbin/so-elastic-fleet-common + +# Manage Elastic Defend Integration for Initial Endpoints Policy +for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/elastic-defend/*.json +do + printf "\n\nInitial Endpoints Policy - Loading $INTEGRATION\n" + elastic_fleet_integration_check "endpoints-initial" "$INTEGRATION" + if [ -n "$INTEGRATION_ID" ]; then + if [ "$1" = "--force" ]; then + printf "\n\nIntegration $NAME exists - Updating integration\n" + elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION" + else + printf "\n\nIntegration $NAME exists - Not updating - rerun with --force to force the update.\n" + fi + else + printf "\n\nIntegration does not exist - Creating integration\n" + elastic_fleet_integration_create "@$INTEGRATION" + fi +done diff --git a/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load b/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load old mode 100755 new mode 100644 index ae0fbb6ba..44e7ccf2b --- a/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load +++ b/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load @@ -12,6 +12,9 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then # First, check for any package upgrades /usr/sbin/so-elastic-fleet-package-upgrade + # Second, configure Elastic Defend Integration seperately + /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend + # Initial Endpoints for INTEGRATION in /opt/so/conf/elastic-fleet/integrations/endpoints-initial/*.json do @@ -65,3 +68,4 @@ else exit $RETURN_CODE fi + From ab4c5acd0c905bad8cdc6acddf41d26dd04de00e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 12 Oct 2023 09:28:07 -0400 Subject: [PATCH 182/417] update bootstrap-salt.sh with stable branch --- salt/salt/scripts/bootstrap-salt.sh | 2074 +++++- salt/salt/scripts/bootstrap-salt_orig.sh | 8126 ++++++++++++++++++++++ 2 files changed, 9976 insertions(+), 224 deletions(-) create mode 100644 salt/salt/scripts/bootstrap-salt_orig.sh diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh index 47d25949c..ace3bce26 100644 --- a/salt/salt/scripts/bootstrap-salt.sh +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -14,7 +14,7 @@ # # BUGS: https://github.com/saltstack/salt-bootstrap/issues # -# COPYRIGHT: (c) 2012-2021 by the SaltStack Team, see AUTHORS.rst for more +# COPYRIGHT: (c) 2012-2022 by the SaltStack Team, see AUTHORS.rst for more # details. # # LICENSE: Apache 2.0 @@ -23,7 +23,7 @@ #====================================================================================================================== set -o nounset # Treat unset variables as an error -__ScriptVersion="2021.09.17" +__ScriptVersion="2023.08.03" __ScriptName="bootstrap-salt.sh" __ScriptFullName="$0" @@ -224,7 +224,6 @@ _KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE} _TEMP_CONFIG_DIR="null" _SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git" _SALT_REPO_URL=${_SALTSTACK_REPO_URL} -_DOWNSTREAM_PKG_REPO=$BS_FALSE _TEMP_KEYS_DIR="null" _SLEEP="${__DEFAULT_SLEEP}" _INSTALL_MASTER=$BS_FALSE @@ -268,6 +267,8 @@ _CUSTOM_MASTER_CONFIG="null" _CUSTOM_MINION_CONFIG="null" _QUIET_GIT_INSTALLATION=$BS_FALSE _REPO_URL="repo.saltproject.io" +_ONEDIR_DIR="salt" +_ONEDIR_NIGHTLY_DIR="salt-dev/${_ONEDIR_DIR}" _PY_EXE="python3" _INSTALL_PY="$BS_FALSE" _TORNADO_MAX_PY3_VERSION="5.0" @@ -275,6 +276,9 @@ _POST_NEON_INSTALL=$BS_FALSE _MINIMUM_PIP_VERSION="9.0.1" _MINIMUM_SETUPTOOLS_VERSION="9.1" _POST_NEON_PIP_INSTALL_ARGS="--prefix=/usr" +_PIP_DOWNLOAD_ARGS="" +_QUICK_START="$BS_FALSE" +_AUTO_ACCEPT_MINION_KEYS="$BS_FALSE" # Defaults for install arguments ITYPE="stable" @@ -290,110 +294,130 @@ __usage() { Usage : ${__ScriptName} [options] [install-type-args] Installation types: - - stable Install latest stable release. This is the default - install type - - stable [branch] Install latest version on a branch. Only supported - for packages available at repo.saltproject.io - - stable [version] Install a specific version. Only supported for - packages available at repo.saltproject.io - To pin a 3xxx minor version, specify it as 3xxx.0 - - testing RHEL-family specific: configure EPEL testing repo - - git Install from the head of the master branch - - git [ref] Install from any git ref (such as a branch, tag, or - commit) + - stable Install latest stable release. This is the default + install type + - stable [branch] Install latest version on a branch. Only supported + for packages available at repo.saltproject.io + - stable [version] Install a specific version. Only supported for + packages available at repo.saltproject.io + To pin a 3xxx minor version, specify it as 3xxx.0 + - testing RHEL-family specific: configure EPEL testing repo + - git Install from the head of the master branch + - git [ref] Install from any git ref (such as a branch, tag, or + commit) + - onedir Install latest onedir release. + - onedir [version] Install a specific version. Only supported for + onedir packages available at repo.saltproject.io + + - onedir_rc Install latest onedir RC release. + - onedir_rc [version] Install a specific version. Only supported for + onedir RC packages available at repo.saltproject.io + - old-stable Install latest old stable release. + - old-stable [branch] Install latest version on a branch. Only supported + for packages available at repo.saltproject.io + - old-stable [version] Install a specific version. Only supported for + packages available at repo.saltproject.io + To pin a 3xxx minor version, specify it as 3xxx.0 Examples: - ${__ScriptName} - ${__ScriptName} stable - - ${__ScriptName} stable 2017.7 - - ${__ScriptName} stable 2017.7.2 + - ${__ScriptName} stable 3006 + - ${__ScriptName} stable 3006.1 - ${__ScriptName} testing - ${__ScriptName} git - ${__ScriptName} git 2017.7 - ${__ScriptName} git v2017.7.2 - ${__ScriptName} git 06f249901a2e2f1ed310d58ea3921a129f214358 + - ${__ScriptName} onedir + - ${__ScriptName} onedir 3006 + - ${__ScriptName} onedir_rc + - ${__ScriptName} onedir_rc 3006 + - ${__ScriptName} old-stable + - ${__ScriptName} old-stable 3005 + - ${__ScriptName} old-stable 3005.1 + Options: - -h Display this message - -v Display script version - -n No colours - -D Show debug output + -a Pip install all Python pkg dependencies for Salt. Requires -V to install + all pip pkgs into the virtualenv. + (Only available for Ubuntu based distributions) + -A Pass the salt-master DNS name or IP. This will be stored under + \${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf + -b Assume that dependencies are already installed and software sources are + set up. If git is selected, git tree is still checked out as dependency + step. -c Temporary configuration directory - -g Salt Git repository URL. Default: ${_SALTSTACK_REPO_URL} - -w Install packages from downstream package repository rather than - upstream, saltstack package repository. This is currently only - implemented for SUSE. - -k Temporary directory holding the minion keys which will pre-seed - the master. - -s Sleep time used when waiting for daemons to start, restart and when - checking for the services running. Default: ${__DEFAULT_SLEEP} - -L Also install salt-cloud and required python-libcloud package - -M Also install salt-master - -S Also install salt-syndic - -N Do not install salt-minion - -X Do not start daemons after installation - -d Disables checking if Salt services are enabled to start on system boot. - You can also do this by touching /tmp/disable_salt_checks on the target - host. Default: \${BS_FALSE} - -P Allow pip based installations. On some distributions the required salt - packages or its dependencies are not available as a package for that - distribution. Using this flag allows the script to use pip as a last - resort method. NOTE: This only works for functions which actually - implement pip based installations. - -U If set, fully upgrade the system prior to bootstrapping Salt - -I If set, allow insecure connections while downloading any files. For - example, pass '--no-check-certificate' to 'wget' or '--insecure' to - 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining - GnuPG archive keys insecurely if distro has changed release signatures. - -F Allow copied files to overwrite existing (config, init.d, etc) - -K If set, keep the temporary files in the temporary directories specified - with -c and -k -C Only run the configuration function. Implies -F (forced overwrite). To overwrite Master or Syndic configs, -M or -S, respectively, must also be specified. Salt installation will be ommitted, but some of the dependencies could be installed to write configuration with -j or -J. - -A Pass the salt-master DNS name or IP. This will be stored under - \${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf - -i Pass the salt-minion id. This will be stored under - \${BS_SALT_ETC_DIR}/minion_id - -p Extra-package to install while installing Salt dependencies. One package - per -p flag. You are responsible for providing the proper package name. - -H Use the specified HTTP proxy for all download URLs (including https://). - For example: http://myproxy.example.com:3128 - -b Assume that dependencies are already installed and software sources are - set up. If git is selected, git tree is still checked out as dependency - step. + -d Disables checking if Salt services are enabled to start on system boot. + You can also do this by touching /tmp/disable_salt_checks on the target + host. Default: \${BS_FALSE} + -D Show debug output -f Force shallow cloning for git installations. This may result in an "n/a" in the version number. - -l Disable ssl checks. When passed, switches "https" calls to "http" where - possible. - -V Install Salt into virtualenv - (only available for Ubuntu based distributions) - -a Pip install all Python pkg dependencies for Salt. Requires -V to install - all pip pkgs into the virtualenv. - (Only available for Ubuntu based distributions) - -r Disable all repository configuration performed by this script. This - option assumes all necessary repository configuration is already present - on the system. - -R Specify a custom repository URL. Assumes the custom repository URL - points to a repository that mirrors Salt packages located at - repo.saltproject.io. The option passed with -R replaces the - "repo.saltproject.io". If -R is passed, -r is also set. Currently only - works on CentOS/RHEL and Debian based distributions. - -J Replace the Master config file with data passed in as a JSON string. If - a Master config file is found, a reasonable effort will be made to save - the file with a ".bak" extension. If used in conjunction with -C or -F, - no ".bak" file will be created as either of those options will force - a complete overwrite of the file. + -F Allow copied files to overwrite existing (config, init.d, etc) + -g Salt Git repository URL. Default: ${_SALTSTACK_REPO_URL} + -h Display this message + -H Use the specified HTTP proxy for all download URLs (including https://). + For example: http://myproxy.example.com:3128 + -i Pass the salt-minion id. This will be stored under + \${BS_SALT_ETC_DIR}/minion_id + -I If set, allow insecure connections while downloading any files. For + example, pass '--no-check-certificate' to 'wget' or '--insecure' to + 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining + GnuPG archive keys insecurely if distro has changed release signatures. -j Replace the Minion config file with data passed in as a JSON string. If a Minion config file is found, a reasonable effort will be made to save the file with a ".bak" extension. If used in conjunction with -C or -F, no ".bak" file will be created as either of those options will force a complete overwrite of the file. + -J Replace the Master config file with data passed in as a JSON string. If + a Master config file is found, a reasonable effort will be made to save + the file with a ".bak" extension. If used in conjunction with -C or -F, + no ".bak" file will be created as either of those options will force + a complete overwrite of the file. + -k Temporary directory holding the minion keys which will pre-seed + the master. + -K If set, keep the temporary files in the temporary directories specified + with -c and -k + -l Disable ssl checks. When passed, switches "https" calls to "http" where + possible. + -L Also install salt-cloud and required python-libcloud package + -M Also install salt-master + -n No colours + -N Do not install salt-minion + -p Extra-package to install while installing Salt dependencies. One package + per -p flag. You are responsible for providing the proper package name. + -P Allow pip based installations. On some distributions the required salt + packages or its dependencies are not available as a package for that + distribution. Using this flag allows the script to use pip as a last + resort method. NOTE: This only works for functions which actually + implement pip based installations. -q Quiet salt installation from git (setup.py install -q) + -Q Quickstart, install the Salt master and the Salt minion. + And automatically accept the minion key. + -R Specify a custom repository URL. Assumes the custom repository URL + points to a repository that mirrors Salt packages located at + repo.saltproject.io. The option passed with -R replaces the + "repo.saltproject.io". If -R is passed, -r is also set. Currently only + works on CentOS/RHEL and Debian based distributions and macOS. + -s Sleep time used when waiting for daemons to start, restart and when + checking for the services running. Default: ${__DEFAULT_SLEEP} + -S Also install salt-syndic + -r Disable all repository configuration performed by this script. This + option assumes all necessary repository configuration is already present + on the system. + -U If set, fully upgrade the system prior to bootstrapping Salt + -v Display script version + -V Install Salt into virtualenv + (only available for Ubuntu based distributions) -x Changes the Python version used to install Salt. For CentOS 6 git installations python2.7 is supported. - Fedora git installation, CentOS 7, Debian 9, Ubuntu 16.04 and 18.04 support python3. + Fedora git installation, CentOS 7, Ubuntu 18.04 support python3. + -X Do not start daemons after installation -y Installs a different python version on host. Currently this has only been tested with CentOS 6 and is considered experimental. This will install the ius repo on the box if disable repo is false. This must be used in conjunction @@ -406,7 +430,7 @@ EOT } # ---------- end of function __usage ---------- -while getopts ':hvnDc:g:Gyx:wk:s:MSNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aq' opt +while getopts ':hvnDc:g:Gyx:k:s:MSNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aqQ' opt do case "${opt}" in @@ -422,7 +446,6 @@ do echowarn "No need to provide this option anymore, now it is a default behavior." ;; - w ) _DOWNSTREAM_PKG_REPO=$BS_TRUE ;; k ) _TEMP_KEYS_DIR="$OPTARG" ;; s ) _SLEEP=$OPTARG ;; M ) _INSTALL_MASTER=$BS_TRUE ;; @@ -451,6 +474,7 @@ do J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;; j ) _CUSTOM_MINION_CONFIG=$OPTARG ;; q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;; + Q ) _QUICK_START=$BS_TRUE ;; x ) _PY_EXE="$OPTARG" ;; y ) _INSTALL_PY="$BS_TRUE" ;; @@ -572,7 +596,6 @@ fi echoinfo "Running version: ${__ScriptVersion}" echoinfo "Executed by: ${CALLER}" echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'" -#echowarn "Running the unstable version of ${__ScriptName}" # Define installation type if [ "$#" -gt 0 ];then @@ -582,7 +605,7 @@ if [ "$#" -gt 0 ];then fi # Check installation type -if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git)')" = "" ]; then +if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git|onedir|onedir_rc|old-stable)')" = "" ]; then echoerror "Installation type \"$ITYPE\" is not known..." exit 1 fi @@ -602,23 +625,123 @@ if [ "$ITYPE" = "git" ]; then # If doing stable install, check if version specified elif [ "$ITYPE" = "stable" ]; then if [ "$#" -eq 0 ];then - STABLE_REV="latest" + ONEDIR_REV="latest" + _ONEDIR_REV="latest" + ITYPE="onedir" else - if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3|2019\.2|3000|3001|3002|3003|3004)$')" != "" ]; then - STABLE_REV="$1" + if [ "$(echo "$1" | grep -E '^(nightly|latest|3005|3006)$')" != "" ]; then + ONEDIR_REV="$1" + _ONEDIR_REV="$1" + ITYPE="onedir" shift - elif [ "$(echo "$1" | grep -E '^(2[0-9]*\.[0-9]*\.[0-9]*|[3-9][0-9]{3}(\.[0-9]*)?)$')" != "" ]; then + elif [ "$(echo "$1" | grep -E '^([3-9][0-5]{2}[5-9](\.[0-9]*)?)')" != "" ]; then + ONEDIR_REV="minor/$1" + _ONEDIR_REV="$1" + ITYPE="onedir" + shift + else + echo "Unknown stable version: $1 (valid: 3005, 3006, latest)" + exit 1 + fi + fi + +# If doing old-stable install, check if version specified +elif [ "$ITYPE" = "old-stable" ]; then + if [ "$#" -eq 0 ];then + ITYPE="stable" + else + if [ "$(echo "$1" | grep -E '^(3003|3004|3005)$')" != "" ]; then + STABLE_REV="$1" + ITYPE="stable" + shift + elif [ "$(echo "$1" | grep -E '^([3-9][0-5]{3}(\.[0-9]*)?)$')" != "" ]; then # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix + ITYPE="stable" STABLE_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') if [ "$(uname)" != "Darwin" ]; then STABLE_REV="archive/$STABLE_REV" fi shift else - echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, 2019.2, 3000, 3001, 3002, 3003, 3004, latest, \$MAJOR.\$MINOR.\$PATCH until 2019.2, \$MAJOR or \$MAJOR.\$PATCH starting from 3000)" + echo "Unknown old stable version: $1 (valid: 3003, 3004, 3005)" exit 1 fi fi + +elif [ "$ITYPE" = "onedir" ]; then + if [ "$#" -eq 0 ];then + ONEDIR_REV="latest" + else + if [ "$(echo "$1" | grep -E '^(nightly|latest|3005|3006)$')" != "" ]; then + ONEDIR_REV="$1" + shift + elif [ "$(echo "$1" | grep -E '^(3005(\.[0-9]*)?)')" != "" ]; then + # Handle the 3005.0 version as 3005 archive (pin to minor) and strip the fake ".0" suffix + ONEDIR_REV=$(echo "$1" | sed -E 's/^(3005)\.0$/\1/') + ONEDIR_REV="minor/$ONEDIR_REV" + shift + elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}(\.[0-9]*)?)')" != "" ]; then + ONEDIR_REV="minor/$1" + shift + else + echo "Unknown onedir version: $1 (valid: 3005, 3006, latest, nightly.)" + exit 1 + fi + fi + +elif [ "$ITYPE" = "onedir_rc" ]; then + # Change the _ONEDIR_DIR to be the location for the RC packages + _ONEDIR_DIR="salt_rc/salt" + + # Change ITYPE to onedir so we use the regular onedir functions + ITYPE="onedir" + + if [ "$#" -eq 0 ];then + ONEDIR_REV="latest" + else + if [ "$(echo "$1" | grep -E '^(latest)$')" != "" ]; then + ONEDIR_REV="$1" + shift + elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}?rc[0-9]-[0-9]$)')" != "" ]; then + # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix + #ONEDIR_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') + ONEDIR_REV="minor/$1" + shift + elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}\.[0-9]?rc[0-9]$)')" != "" ]; then + # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix + #ONEDIR_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') + ONEDIR_REV="minor/$1" + shift + else + echo "Unknown onedir_rc version: $1 (valid: 3005-1, latest.)" + exit 1 + fi + fi +fi + +# Doing a quick start, so install master +# set master address to 127.0.0.1 +if [ "$_QUICK_START" -eq "$BS_TRUE" ]; then + # make install type is stable + ITYPE="stable" + + # make sure the revision is latest + STABLE_REV="latest" + ONEDIR_REV="latest" + + # make sure we're installing the master + _INSTALL_MASTER=$BS_TRUE + + # override incase install minion + # is set to false + _INSTALL_MINION=$BS_TRUE + + # Set master address to loopback IP + _SALT_MASTER_ADDRESS="127.0.0.1" + + # Auto accept the minion key + # when the install is done. + _AUTO_ACCEPT_MINION_KEYS=$BS_TRUE fi # Check for any unparsed arguments. Should be an error. @@ -824,6 +947,18 @@ __fetch_verify() { return 1 } +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_url_exists +# DESCRIPTION: Checks if a URL exists +#---------------------------------------------------------------------------------------------------------------------- +__check_url_exists() { + _URL="$1" + if curl --output /dev/null --silent --fail "${_URL}"; then + return 0 + else + return 1 + fi +} #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __gather_hardware_info # DESCRIPTION: Discover hardware information @@ -945,7 +1080,7 @@ __strip_duplicates() { __sort_release_files() { KNOWN_RELEASE_FILES=$(echo "(arch|alpine|centos|debian|ubuntu|fedora|redhat|suse|\ mandrake|mandriva|gentoo|slackware|turbolinux|unitedlinux|void|lsb|system|\ - oracle|os)(-|_)(release|version)" | sed -E 's:[[:space:]]::g') + oracle|os|almalinux|rocky)(-|_)(release|version)" | sed -E 's:[[:space:]]::g') primary_release_files="" secondary_release_files="" # Sort know VS un-known files first @@ -959,7 +1094,7 @@ __sort_release_files() { done # Now let's sort by know files importance, max important goes last in the max_prio list - max_prio="redhat-release centos-release oracle-release fedora-release" + max_prio="redhat-release centos-release oracle-release fedora-release almalinux-release rocky-release" for entry in $max_prio; do if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\2 \\1 \\3:g") @@ -1028,6 +1163,8 @@ __gather_linux_system_info() { elif [ "${DISTRO_NAME}" = "Arch" ]; then DISTRO_NAME="Arch Linux" return + elif [ "${DISTRO_NAME}" = "Rocky" ]; then + DISTRO_NAME="Rocky Linux" fi rv=$(lsb_release -sr) [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") @@ -1086,6 +1223,8 @@ __gather_linux_system_info() { unitedlinux ) n="UnitedLinux" ;; void ) n="VoidLinux" ;; oracle ) n="Oracle Linux" ;; + almalinux ) n="AlmaLinux" ;; + rocky ) n="Rocky Linux" ;; system ) while read -r line; do [ "${n}x" != "systemx" ] && break @@ -1308,7 +1447,7 @@ __gather_system_info() { #---------------------------------------------------------------------------------------------------------------------- # shellcheck disable=SC2034 __ubuntu_derivatives_translation() { - UBUNTU_DERIVATIVES="(trisquel|linuxmint|linaro|elementary_os|neon)" + UBUNTU_DERIVATIVES="(trisquel|linuxmint|linaro|elementary_os|neon|pop)" # Mappings trisquel_6_ubuntu_base="12.04" linuxmint_13_ubuntu_base="12.04" @@ -1321,6 +1460,8 @@ __ubuntu_derivatives_translation() { neon_16_ubuntu_base="16.04" neon_18_ubuntu_base="18.04" neon_20_ubuntu_base="20.04" + neon_22_ubuntu_base="22.04" + pop_22_ubuntu_base="22.04" # Translate Ubuntu derivatives to their base Ubuntu version match=$(echo "$DISTRO_NAME_L" | grep -E ${UBUNTU_DERIVATIVES}) @@ -1380,9 +1521,13 @@ __check_dpkg_architecture() { if [ "$_CUSTOM_REPO_URL" != "null" ]; then warn_msg="Support for arm64 is experimental, make sure the custom repository used has the expected structure and contents." else - # Saltstack official repository does not yet have arm64 metadata, - # use amd64 repositories on arm64, since all pkgs are arch-independent - __REPO_ARCH="amd64" + # Saltstack official repository has arm64 metadata beginning with Debian 11, + # use amd64 repositories on arm64 for anything older, since all pkgs are arch-independent + if [ "$DISTRO_NAME_L" = "debian" ] || [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then + __REPO_ARCH="amd64" + else + __REPO_ARCH="arm64" + fi __REPO_ARCH_DEB="deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg arch=$__REPO_ARCH]" warn_msg="Support for arm64 packages is experimental and might rely on architecture-independent packages from the amd64 repository." fi @@ -1462,6 +1607,9 @@ __ubuntu_codename_translation() { "21") DISTRO_CODENAME="hirsute" ;; + "22") + DISTRO_CODENAME="jammy" + ;; *) DISTRO_CODENAME="trusty" ;; @@ -1488,10 +1636,12 @@ __debian_derivatives_translation() { devuan_1_debian_base="8.0" devuan_2_debian_base="9.0" kali_1_debian_base="7.0" + kali_2021_debian_base="10.0" linuxmint_1_debian_base="8.0" raspbian_8_debian_base="8.0" raspbian_9_debian_base="9.0" raspbian_10_debian_base="10.0" + raspbian_11_debian_base="11.0" bunsenlabs_9_debian_base="9.0" turnkey_9_debian_base="9.0" @@ -1590,11 +1740,13 @@ __check_end_of_life_versions() { # = 17.04, 17.10 # = 18.10 # = 19.04, 19.10 + # = 20.10 if [ "$DISTRO_MAJOR_VERSION" -lt 16 ] || \ [ "$DISTRO_MAJOR_VERSION" -eq 17 ] || \ [ "$DISTRO_MAJOR_VERSION" -eq 19 ] || \ { [ "$DISTRO_MAJOR_VERSION" -eq 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 18 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then + { [ "$DISTRO_MAJOR_VERSION" -eq 18 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then echoerror "End of life distributions are not supported." echoerror "Please consider upgrading to the next stable. See:" echoerror " https://wiki.ubuntu.com/Releases" @@ -1812,14 +1964,14 @@ elif [ "${DISTRO_NAME_L}" = "debian" ]; then __debian_codename_translation fi -if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(debian|ubuntu|centos|gentoo|red_hat|oracle|scientific|amazon|fedora|macosx)')" = "" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]; then +if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(debian|ubuntu|centos|gentoo|red_hat|oracle|scientific|amazon|fedora|macosx|almalinux|rocky)')" = "" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]; then echoerror "${DISTRO_NAME} does not have major version pegged packages support" exit 1 fi # Only RedHat based distros have testing support if [ "${ITYPE}" = "testing" ]; then - if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(centos|red_hat|amazon|oracle)')" = "" ]; then + if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(centos|red_hat|amazon|oracle|almalinux|rocky)')" = "" ]; then echoerror "${DISTRO_NAME} does not have testing packages support" exit 1 fi @@ -1850,10 +2002,6 @@ if [ "$ITYPE" = "git" ]; then if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then _POST_NEON_INSTALL=$BS_TRUE __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" - if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then - # We do this to properly clone tags - GIT_REV="v${GIT_REV}" - fi echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" else __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?[0-9]{1,4}\.[0-9]{1,2})(\.[0-9]{1,2})?.*$/MATCH/') @@ -1865,10 +2013,6 @@ if [ "$ITYPE" = "git" ]; then if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then _POST_NEON_INSTALL=$BS_TRUE __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" - if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then - # We do this to properly clone tags - GIT_REV="v${GIT_REV}" - fi echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" else __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?[[:digit:]]\{1,4\}\.[[:digit:]]\{1,2\}\)\(\.[[:digit:]]\{1,2\}\)\?.*$/MATCH/') @@ -2031,20 +2175,13 @@ __rpm_import_gpg() { #---------------------------------------------------------------------------------------------------------------------- __yum_install_noinput() { - ENABLE_EPEL_CMD="" - # Skip Amazon Linux for the first round, since EPEL is no longer required. - # See issue #724 - if [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DISTRO_NAME_L" != "amazon_linux_ami" ]; then - ENABLE_EPEL_CMD="--enablerepo=${_EPEL_REPO}" - fi - if [ "$DISTRO_NAME_L" = "oracle_linux" ]; then # We need to install one package at a time because --enablerepo=X disables ALL OTHER REPOS!!!! for package in "${@}"; do - yum -y install "${package}" || yum -y install "${package}" ${ENABLE_EPEL_CMD} || return $? + yum -y install "${package}" || yum -y install "${package}" || return $? done else - yum -y install "${@}" ${ENABLE_EPEL_CMD} || return $? + yum -y install "${@}" || return $? fi } # ---------- end of function __yum_install_noinput ---------- @@ -2057,6 +2194,15 @@ __dnf_install_noinput() { dnf -y install "${@}" || return $? } # ---------- end of function __dnf_install_noinput ---------- +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __tdnf_install_noinput +# DESCRIPTION: (DRY) dnf install with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__tdnf_install_noinput() { + + tdnf -y install "${@}" || return $? +} # ---------- end of function __tdnf_install_noinput ---------- + #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __git_clone_and_checkout # DESCRIPTION: (DRY) Helper function to clone and checkout salt to a @@ -2582,7 +2728,7 @@ __activate_virtualenv() { # NAME: __install_pip_pkgs # DESCRIPTION: Return 0 or 1 if successfully able to install pip packages. Can provide a different python version to # install pip packages with. If $py_ver is not specified it will use the default python version. -# PARAMETERS: pkgs, py_ver +# PARAMETERS: pkgs, py_ver, upgrade #---------------------------------------------------------------------------------------------------------------------- __install_pip_pkgs() { @@ -2751,15 +2897,15 @@ EOM fi echodebug "Running '${_pip_cmd} install wheel ${_setuptools_dep}'" - ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} wheel "${_setuptools_dep}" + ${_pip_cmd} install --upgrade ${_POST_NEON_PIP_INSTALL_ARGS} wheel "${_setuptools_dep}" echoinfo "Installing salt using ${_py_exe}" cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 mkdir /tmp/git/deps echoinfo "Downloading Salt Dependencies from PyPi" - echodebug "Running '${_pip_cmd} download -d /tmp/git/deps .'" - ${_pip_cmd} download -d /tmp/git/deps . || (echo "Failed to download salt dependencies" && return 1) + echodebug "Running '${_pip_cmd} download -d /tmp/git/deps ${_PIP_DOWNLOAD_ARGS} .'" + ${_pip_cmd} download -d /tmp/git/deps ${_PIP_DOWNLOAD_ARGS} . || (echo "Failed to download salt dependencies" && return 1) echoinfo "Installing Downloaded Salt Dependencies" echodebug "Running '${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/*'" @@ -2918,7 +3064,8 @@ __enable_universe_repository() { __install_saltstack_ubuntu_repository() { # Workaround for latest non-LTS Ubuntu if { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 21 ] && [ "$DISTRO_MINOR_VERSION" -eq 04 ]; }; then + # remove 22 version when salt packages for 22.04 are available + [ "$DISTRO_MAJOR_VERSION" -eq 21 ] || [ "$DISTRO_MAJOR_VERSION" -eq 22 ]; then echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages for previous LTS release. You may experience problems." UBUNTU_VERSION=20.04 UBUNTU_CODENAME="focal" @@ -2957,6 +3104,58 @@ __install_saltstack_ubuntu_repository() { __wait_for_apt apt-get update || return 1 } +__install_saltstack_ubuntu_onedir_repository() { + # Workaround for latest non-LTS Ubuntu + if { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ + [ "$DISTRO_MAJOR_VERSION" -eq 21 ]; then + echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages for previous LTS release. You may experience problems." + UBUNTU_VERSION=20.04 + UBUNTU_CODENAME="focal" + else + UBUNTU_VERSION=${DISTRO_VERSION} + UBUNTU_CODENAME=${DISTRO_CODENAME} + fi + + # Install downloader backend for GPG keys fetching + __PACKAGES='wget' + + # Required as it is not installed by default on Ubuntu 18+ + if [ "$DISTRO_MAJOR_VERSION" -ge 18 ]; then + __PACKAGES="${__PACKAGES} gnupg" + fi + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" + fi + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + __PY_VERSION_REPO="apt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # SaltStack's stable Ubuntu repository: + SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${ONEDIR_REV}/" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/" + fi + echo "$__REPO_ARCH_DEB $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/salt.list + + if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ]; then + __apt_key_fetch "${SALTSTACK_UBUNTU_URL}salt-archive-keyring.gpg" || return 1 + elif [ "$(echo "${ONEDIR_REV}" | grep -E '(latest|nightly)')" != "" ]; then + __apt_key_fetch "${SALTSTACK_UBUNTU_URL}salt-archive-keyring.gpg" || \ + __apt_key_fetch "${SALTSTACK_UBUNTU_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 + else + __apt_key_fetch "${SALTSTACK_UBUNTU_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 + fi + + __wait_for_apt apt-get update || return 1 +} + install_ubuntu_deps() { if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then # Install add-apt-repository @@ -3032,7 +3231,7 @@ install_ubuntu_stable_deps() { if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then - if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ] || [ "$DISTRO_MAJOR_VERSION" -ge 22 ]; then __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 else __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && @@ -3113,6 +3312,9 @@ install_ubuntu_git_deps() { fi else __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + if [ "$DISTRO_MAJOR_VERSION" -ge 22 ]; then + __PACKAGES="${__PACKAGES} g++" + fi # shellcheck disable=SC2086 __apt_get_install_noinput ${__PACKAGES} || return 1 fi @@ -3126,6 +3328,44 @@ install_ubuntu_git_deps() { return 0 } +install_ubuntu_onedir_deps() { + if [ "${_SLEEP}" -eq "${__DEFAULT_SLEEP}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then + # The user did not pass a custom sleep value as an argument, let's increase the default value + echodebug "On Ubuntu systems we increase the default sleep value to 10." + echodebug "See https://github.com/saltstack/salt/issues/12248 for more info." + _SLEEP=10 + fi + + if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." + fi + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + __wait_for_apt apt-get update || return 1 + + if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then + if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ]; then + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 + else + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && + apt-key update && apt-get update || return 1 + fi + fi + + __apt_get_upgrade_noinput || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __check_dpkg_architecture || return 1 + __install_saltstack_ubuntu_onedir_repository || return 1 + fi + + install_ubuntu_deps || return 1 +} + install_ubuntu_stable() { __PACKAGES="" @@ -3170,7 +3410,15 @@ install_ubuntu_git() { _POST_NEON_PIP_INSTALL_ARGS="" __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 - sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service + + # Account for new path for services files in later releases + if [ -d "pkg/common" ]; then + _SERVICE_DIR="pkg/common" + else + _SERVICE_DIR="pkg" + fi + + sed -i 's:/usr/bin:/usr/local/bin:g' ${_SERVICE_DIR}/*.service return 0 fi @@ -3185,6 +3433,28 @@ install_ubuntu_git() { return 0 } +install_ubuntu_onedir() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + install_ubuntu_stable_post() { for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -3220,8 +3490,15 @@ install_ubuntu_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg" + fi + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -3236,8 +3513,8 @@ install_ubuntu_git_post() { if [ ! -f $_upstart_conf ]; then # upstart does not know about our service, let's copy the proper file echowarn "Upstart does not appear to know about salt-$fname" - echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.upstart to $_upstart_conf" - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.upstart" "$_upstart_conf" + echodebug "Copying ${_SERVICE_DIR}/salt-$fname.upstart to $_upstart_conf" + __copyfile "${_SERVICE_DIR}/salt-${fname}.upstart" "$_upstart_conf" # Set service to know about virtualenv if [ "${_VIRTUALENV_DIR}" != "null" ]; then echo "SALT_USE_VIRTUALENV=${_VIRTUALENV_DIR}" > /etc/default/salt-${fname} @@ -3349,17 +3626,8 @@ install_ubuntu_check_services() { # Debian Install Functions # __install_saltstack_debian_repository() { - if [ "$DISTRO_MAJOR_VERSION" -eq 11 ]; then - # Packages for Debian 11 at repo.saltproject.io are not yet available - # Set up repository for Debian 10 for Debian 11 for now until support - # is available at repo.saltproject.io for Debian 11. - echowarn "Debian 11 distribution detected, but stable packages requested. Trying packages from Debian 10. You may experience problems." - DEBIAN_RELEASE="10" - DEBIAN_CODENAME="buster" - else - DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" - DEBIAN_CODENAME="$DISTRO_CODENAME" - fi + DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" + DEBIAN_CODENAME="$DISTRO_CODENAME" __PY_VERSION_REPO="apt" if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then @@ -3391,6 +3659,50 @@ __install_saltstack_debian_repository() { __wait_for_apt apt-get update || return 1 } +__install_saltstack_debian_onedir_repository() { + DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" + DEBIAN_CODENAME="$DISTRO_CODENAME" + + __PY_VERSION_REPO="apt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # Install downloader backend for GPG keys fetching + __PACKAGES='wget' + + # Required as it is not installed by default on Debian 9+ + if [ "$DISTRO_MAJOR_VERSION" -ge 9 ]; then + __PACKAGES="${__PACKAGES} gnupg2" + fi + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" + fi + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location + SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/${ONEDIR_REV}/" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/" + fi + echo "$__REPO_ARCH_DEB $SALTSTACK_DEBIAN_URL $DEBIAN_CODENAME main" > "/etc/apt/sources.list.d/salt.list" + + if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ]; then + __apt_key_fetch "${SALTSTACK_DEBIAN_URL}salt-archive-keyring.gpg" || return 1 + elif [ "$(echo "${ONEDIR_REV}" | grep -E '(latest|nightly)')" != "" ]; then + __apt_key_fetch "${SALTSTACK_DEBIAN_URL}salt-archive-keyring.gpg" || \ + __apt_key_fetch "${SALTSTACK_DEBIAN_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 + else + __apt_key_fetch "${SALTSTACK_DEBIAN_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 + fi + + __wait_for_apt apt-get update || return 1 +} + install_debian_deps() { if [ $_START_DAEMONS -eq $BS_FALSE ]; then echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." @@ -3444,6 +3756,59 @@ install_debian_deps() { return 0 } +install_debian_onedir_deps() { + if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." + fi + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + __wait_for_apt apt-get update || return 1 + + if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then + # Try to update GPG keys first if allowed + if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 10 ]; then + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 + else + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && + apt-key update && apt-get update || return 1 + fi + fi + + __apt_get_upgrade_noinput || return 1 + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 + __PACKAGES='procps pciutils' + + # YAML module is used for generating custom master/minion configs + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __check_dpkg_architecture || return 1 + __install_saltstack_debian_onedir_repository || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + install_debian_git_pre() { if ! __check_command_exists git; then __apt_get_install_noinput git || return 1 @@ -3692,7 +4057,15 @@ install_debian_git() { _POST_NEON_PIP_INSTALL_ARGS="" __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 - sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service + + # Account for new path for services files in later releases + if [ -d "pkg/common" ]; then + _SERVICE_DIR="pkg/common" + else + _SERVICE_DIR="pkg" + fi + + sed -i 's:/usr/bin:/usr/local/bin:g' ${_SERVICE_DIR}/*.service return 0 fi @@ -3720,6 +4093,28 @@ install_debian_9_git() { return 0 } +install_debian_onedir() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + install_debian_git_post() { for fname in api master minion syndic; do # Skip if not meant to be installed @@ -3729,16 +4124,23 @@ install_debian_git_post() { [ "$fname" = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ "$fname" = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg" + fi + # Configure SystemD for Debian 8 "Jessie" and later if [ -f /bin/systemctl ]; then if [ ! -f /lib/systemd/system/salt-${fname}.service ] || \ { [ -f /lib/systemd/system/salt-${fname}.service ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]; }; then - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.environment" "/etc/default/salt-${fname}" + if [ -f "${_SERVICE_DIR}/salt-${fname}.service" ]; then + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" /lib/systemd/system + __copyfile "${_SERVICE_DIR}/salt-${fname}.environment" "/etc/default/salt-${fname}" else # workaround before adding Debian-specific unit files to the Salt main repo - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" /lib/systemd/system sed -i -e '/^Type/ s/notify/simple/' /lib/systemd/system/salt-${fname}.service fi fi @@ -3770,6 +4172,13 @@ install_debian_git_post() { done } +install_debian_2021_post() { + # Kali 2021 (debian derivative) disables all network services by default + # Using archlinux post function to enable salt systemd services + install_arch_linux_post || return 1 + return 0 +} + install_debian_restart_daemons() { [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 @@ -3826,6 +4235,41 @@ install_debian_check_services() { # Fedora Install Functions # +__install_saltstack_fedora_onedir_repository() { + if [ "$ITYPE" = "stable" ]; then + REPO_REV="$ONEDIR_REV" + else + REPO_REV="latest" + fi + + __PY_VERSION_REPO="yum" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + GPG_KEY="SALT-PROJECT-GPG-PUBKEY-2023.pub" + + REPO_FILE="/etc/yum.repos.d/salt.repo" + + if [ ! -s "$REPO_FILE" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/fedora/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${ONEDIR_REV}" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/fedora/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/" + fi + + __fetch_url "${REPO_FILE}" "${FETCH_URL}.repo" + + __rpm_import_gpg "${FETCH_URL}/${GPG_KEY}" || return 1 + + yum clean metadata || return 1 + elif [ "$REPO_REV" != "latest" ]; then + echowarn "salt.repo already exists, ignoring salt version argument." + echowarn "Use -F (forced overwrite) to install $REPO_REV." + fi + + return 0 +} + install_fedora_deps() { if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then dnf -y update || return 1 @@ -3985,6 +4429,9 @@ install_fedora_git_deps() { done else __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + if [ "${DISTRO_VERSION}" -ge 35 ]; then + __PACKAGES="${__PACKAGES} gcc-c++" + fi # shellcheck disable=SC2086 __dnf_install_noinput ${__PACKAGES} || return 1 fi @@ -4028,7 +4475,18 @@ install_fedora_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm" + fi + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Salt executables are located under `/usr/local/bin/` on Fedora 36+ + #if [ "${DISTRO_VERSION}" -ge 36 ]; then + # sed -i -e 's:/usr/bin/:/usr/local/bin/:g' /lib/systemd/system/salt-*.service + #fi # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -4076,6 +4534,83 @@ install_fedora_check_services() { return 0 } + +install_fedora_onedir_deps() { + + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + echowarn "Detected -r or -R option while installing Salt packages for Python 3." + echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." + echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then + __install_saltstack_fedora_onedir_repository || return 1 + fi + + # If -R was passed, we need to configure custom repo url with rsync-ed packages + # Which is still handled in __install_saltstack_rhel_repository. This call has + # its own check in case -r was passed without -R. + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_fedora_onedir_repository || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __PACKAGES="dnf-utils chkconfig" + else + __PACKAGES="yum-utils chkconfig" + fi + + __PACKAGES="${__PACKAGES} procps" + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 + +} + + +install_fedora_onedir() { + STABLE_REV=$ONEDIR_REV + #install_fedora_stable || return 1 + + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_fedora_onedir_post() { + STABLE_REV=$ONEDIR_REV + install_fedora_stable_post || return 1 + + return 0 +} # # Ended Fedora Install Functions # @@ -4085,27 +4620,13 @@ install_fedora_check_services() { # # CentOS Install Functions # -__install_epel_repository() { - if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_TRUE ]; then - return 0 - fi - - # Check if epel repo is already enabled and flag it accordingly - if yum repolist | grep -q "^[!]\\?${_EPEL_REPO}/"; then - _EPEL_REPOS_INSTALLED=$BS_TRUE - return 0 - fi - - # Download latest 'epel-release' package for the distro version directly - epel_repo_url="${HTTP_VAL}://dl.fedoraproject.org/pub/epel/epel-release-latest-${DISTRO_MAJOR_VERSION}.noarch.rpm" - rpm -Uvh --force "$epel_repo_url" || return 1 - - _EPEL_REPOS_INSTALLED=$BS_TRUE - - return 0 -} - __install_saltstack_rhel_repository() { + if [ "${DISTRO_MAJOR_VERSION}" -ge 9 ]; then + echoerror "Old stable repository unavailable on RH variants greater than or equal to 9" + echoerror "Use the stable install type." + exit 1 + fi + if [ "$ITYPE" = "stable" ]; then repo_rev="$STABLE_REV" else @@ -4120,7 +4641,19 @@ __install_saltstack_rhel_repository() { # Avoid using '$releasever' variable for yum. # Instead, this should work correctly on all RHEL variants. base_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/${repo_rev}/" - gpg_key="SALTSTACK-GPG-KEY.pub" + if [ "${DISTRO_MAJOR_VERSION}" -eq 7 ]; then + gpg_key="SALTSTACK-GPG-KEY.pub base/RPM-GPG-KEY-CentOS-7" + elif [ "${DISTRO_MAJOR_VERSION}" -ge 9 ]; then + gpg_key="SALTSTACK-GPG-KEY2.pub" + else + gpg_key="SALTSTACK-GPG-KEY.pub" + fi + + gpg_key_urls="" + for key in $gpg_key; do + gpg_key_urls=$(printf "${base_url}${key},%s" "$gpg_key_urls") + done + repo_file="/etc/yum.repos.d/salt.repo" if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then @@ -4130,13 +4663,80 @@ name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever baseurl=${base_url} skip_if_unavailable=True gpgcheck=1 -gpgkey=${base_url}${gpg_key} +gpgkey=${gpg_key_urls} enabled=1 enabled_metadata=1 _eof fetch_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${repo_rev}/" - __rpm_import_gpg "${fetch_url}${gpg_key}" || return 1 + for key in $gpg_key; do + __rpm_import_gpg "${fetch_url}${key}" || return 1 + done + + yum clean metadata || return 1 + elif [ "$repo_rev" != "latest" ]; then + echowarn "salt.repo already exists, ignoring salt version argument." + echowarn "Use -F (forced overwrite) to install $repo_rev." + fi + + return 0 +} + +__install_saltstack_rhel_onedir_repository() { + if [ "$ITYPE" = "stable" ]; then + repo_rev="$ONEDIR_REV" + else + repo_rev="latest" + fi + + __PY_VERSION_REPO="yum" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # Avoid using '$releasever' variable for yum. + # Instead, this should work correctly on all RHEL variants. + base_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/${ONEDIR_REV}/" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + base_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/" + fi + if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ] || [ "${ONEDIR_REV}" = "nightly" ]; then + if [ "${DISTRO_MAJOR_VERSION}" -eq 9 ]; then + gpg_key="SALTSTACK-GPG-KEY2.pub" + else + gpg_key="SALTSTACK-GPG-KEY.pub" + fi + else + gpg_key="SALT-PROJECT-GPG-PUBKEY-2023.pub" + fi + + gpg_key_urls="" + for key in $gpg_key; do + gpg_key_urls=$(printf "${base_url}${key},%s" "$gpg_key_urls") + done + + repo_file="/etc/yum.repos.d/salt.repo" + + if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + cat <<_eof > "$repo_file" +[saltstack] +name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever +baseurl=${base_url} +skip_if_unavailable=True +gpgcheck=1 +gpgkey=${gpg_key_urls} +enabled=1 +enabled_metadata=1 +_eof + + fetch_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${ONEDIR_REV}/" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + fetch_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/" + fi + for key in $gpg_key; do + __rpm_import_gpg "${fetch_url}${key}" || return 1 + done + yum clean metadata || return 1 elif [ "$repo_rev" != "latest" ]; then echowarn "salt.repo already exists, ignoring salt version argument." @@ -4158,7 +4758,6 @@ install_centos_stable_deps() { fi if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then - __install_epel_repository || return 1 __install_saltstack_rhel_repository || return 1 fi @@ -4179,27 +4778,29 @@ install_centos_stable_deps() { if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then # YAML module is used for generating custom master/minion configs if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python3-pyyaml" + __PACKAGES="${__PACKAGES} python3-pyyaml python3-setuptools" else __PACKAGES="${__PACKAGES} python2-pyyaml" fi elif [ "$DISTRO_MAJOR_VERSION" -eq 7 ]; then # YAML module is used for generating custom master/minion configs if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python36-PyYAML" + __PACKAGES="${__PACKAGES} python36-PyYAML python36-setuptools" else __PACKAGES="${__PACKAGES} PyYAML" fi else # YAML module is used for generating custom master/minion configs if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python34-PyYAML" + __PACKAGES="${__PACKAGES} python34-PyYAML python34-setuptools" else __PACKAGES="${__PACKAGES} PyYAML" fi fi fi + __PACKAGES="${__PACKAGES} procps" + # shellcheck disable=SC2086 __yum_install_noinput ${__PACKAGES} || return 1 @@ -4216,40 +4817,29 @@ install_centos_stable_deps() { install_centos_stable() { __PACKAGES="" - local cloud='salt-cloud' - local master='salt-master' - local minion='salt-minion' - local syndic='salt-syndic' - - if echo "$STABLE_REV" | grep -q "archive";then # point release being applied - local ver=$(echo "$STABLE_REV"|awk -F/ '{print $2}') # strip archive/ - elif echo "$STABLE_REV" | egrep -vq "archive|latest";then # latest or major version(3003, 3004, etc) being applie - local ver=$STABLE_REV - fi - - if [ ! -z $ver ]; then - cloud+="-$ver" - master+="-$ver" - minion+="-$ver" - syndic+="-$ver" - fi - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $cloud" + __PACKAGES="${__PACKAGES} salt-cloud" fi if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $master" + __PACKAGES="${__PACKAGES} salt-master" fi if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} $minion" + __PACKAGES="${__PACKAGES} salt-minion" fi if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $syndic" + __PACKAGES="${__PACKAGES} salt-syndic" fi # shellcheck disable=SC2086 __yum_install_noinput ${__PACKAGES} || return 1 + # Workaround for 3.11 broken on CentOS Stream 8.x + # Re-install Python 3.6 + _py_version=$(${_PY_EXE} -c "import sys; print('{0}.{1}'.format(*sys.version_info))") + if [ "$DISTRO_MAJOR_VERSION" -eq 8 ] && [ "${_py_version}" = "3.11" ]; then + __yum_install_noinput python3 + fi + return 0 } @@ -4285,7 +4875,14 @@ install_centos_stable_post() { } install_centos_git_deps() { - install_centos_stable_deps || return 1 + # First try stable deps then fall back to onedir deps if that one fails + # if we're installing on a Red Hat based host that doesn't have the classic + # package repos available. + # Set ONEDIR_REV to STABLE_REV in case we + # end up calling install_centos_onedir_deps + ONEDIR_REV=${STABLE_REV} + install_centos_onedir_deps || \ + return 1 if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then __yum_install_noinput ca-certificates || return 1 @@ -4445,10 +5042,16 @@ install_centos_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_FILE="${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" + else + _SERVICE_FILE="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" + fi if [ -f /bin/systemctl ]; then if [ ! -f "/usr/lib/systemd/system/salt-${fname}.service" ] || \ { [ -f "/usr/lib/systemd/system/salt-${fname}.service" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" /usr/lib/systemd/system + __copyfile "${_SERVICE_FILE}" /usr/lib/systemd/system fi SYSTEMD_RELOAD=$BS_TRUE @@ -4468,6 +5071,102 @@ install_centos_git_post() { return 0 } +install_centos_onedir_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + echowarn "Detected -r or -R option while installing Salt packages for Python 3." + echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." + echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then + __install_saltstack_rhel_onedir_repository || return 1 + fi + + # If -R was passed, we need to configure custom repo url with rsync-ed packages + # Which is still handled in __install_saltstack_rhel_repository. This call has + # its own check in case -r was passed without -R. + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_rhel_onedir_repository || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __PACKAGES="dnf-utils chkconfig" + else + __PACKAGES="yum-utils chkconfig" + fi + + __PACKAGES="${__PACKAGES} procps" + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + + return 0 +} + +install_centos_onedir() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_centos_onedir_post() { + SYSTEMD_RELOAD=$BS_FALSE + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + /bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( + /bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && + /bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 + ) + + SYSTEMD_RELOAD=$BS_TRUE + elif [ -f "/etc/init.d/salt-${fname}" ]; then + /sbin/chkconfig salt-${fname} on + fi + done + + if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then + /bin/systemctl daemon-reload + fi + + return 0 +} + install_centos_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return @@ -4567,6 +5266,11 @@ install_red_hat_linux_git_deps() { return 0 } +install_red_hat_linux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + install_red_hat_enterprise_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -4577,6 +5281,11 @@ install_red_hat_enterprise_git_deps() { return 0 } +install_red_hat_enterprise_onedir_deps() { + install_red_hat_linux_onedir_deps || return 1 + return 0 +} + install_red_hat_enterprise_linux_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -4587,6 +5296,11 @@ install_red_hat_enterprise_linux_git_deps() { return 0 } +install_red_hat_enterprise_linux_onedir_deps() { + install_red_hat_linux_onedir_deps || return 1 + return 0 +} + install_red_hat_enterprise_server_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -4597,6 +5311,11 @@ install_red_hat_enterprise_server_git_deps() { return 0 } +install_red_hat_enterprise_server_onedir_deps() { + install_red_hat_linux_onedir_deps || return 1 + return 0 +} + install_red_hat_enterprise_workstation_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -4607,6 +5326,11 @@ install_red_hat_enterprise_workstation_git_deps() { return 0 } +install_red_hat_enterprise_workstation_onedir_deps() { + install_red_hat_linux_timat_deps || return 1 + return 0 +} + install_red_hat_linux_stable() { install_centos_stable || return 1 return 0 @@ -4617,6 +5341,11 @@ install_red_hat_linux_git() { return 0 } +install_red_hat_linux_onedir() { + install_centos_onedir || return 1 + return 0 +} + install_red_hat_enterprise_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -4627,6 +5356,11 @@ install_red_hat_enterprise_git() { return 0 } +install_red_hat_enterprise_onedir() { + install_red_hat_linux_onedir || return 1 + return 0 +} + install_red_hat_enterprise_linux_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -4637,6 +5371,11 @@ install_red_hat_enterprise_linux_git() { return 0 } +install_red_hat_enterprise_linux_onedir() { + install_red_hat_linux_onedir || return 1 + return 0 +} + install_red_hat_enterprise_server_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -4647,6 +5386,11 @@ install_red_hat_enterprise_server_git() { return 0 } +install_red_hat_enterprise_server_onedir() { + install_red_hat_linux_onedir || return 1 + return 0 +} + install_red_hat_enterprise_workstation_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -4657,6 +5401,11 @@ install_red_hat_enterprise_workstation_git() { return 0 } +install_red_hat_enterprise_workstation_onedir() { + install_red_hat_linux_onedir || return 1 + return 0 +} + install_red_hat_linux_stable_post() { install_centos_stable_post || return 1 return 0 @@ -4801,6 +5550,15 @@ install_red_hat_enterprise_workstation_testing_post() { # Oracle Linux Install Functions # install_oracle_linux_stable_deps() { + # Install Oracle's EPEL. + if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_FALSE ]; then + _EPEL_REPO=oracle-epel-release-el${DISTRO_MAJOR_VERSION} + if ! rpm -q "${_EPEL_REPO}" > /dev/null; then + __yum_install_noinput "${_EPEL_REPO}" + fi + _EPEL_REPOS_INSTALLED=$BS_TRUE + fi + install_centos_stable_deps || return 1 return 0 } @@ -4810,6 +5568,11 @@ install_oracle_linux_git_deps() { return 0 } +install_oracle_linux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + install_oracle_linux_testing_deps() { install_centos_testing_deps || return 1 return 0 @@ -4825,6 +5588,11 @@ install_oracle_linux_git() { return 0 } +install_oracle_linux_onedir() { + install_centos_onedir || return 1 + return 0 +} + install_oracle_linux_testing() { install_centos_testing || return 1 return 0 @@ -4840,6 +5608,11 @@ install_oracle_linux_git_post() { return 0 } +install_oracle_linux_onedir_post() { + install_centos_onedir_post || return 1 + return 0 +} + install_oracle_linux_testing_post() { install_centos_testing_post || return 1 return 0 @@ -4859,6 +5632,162 @@ install_oracle_linux_check_services() { # ####################################################################################################################### +####################################################################################################################### +# +# AlmaLinux Install Functions +# +install_almalinux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_almalinux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_almalinux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + +install_almalinux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_almalinux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_almalinux_git() { + install_centos_git || return 1 + return 0 +} + +install_almalinux_onedir() { + install_centos_onedir || return 1 + return 0 +} + +install_almalinux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_almalinux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_almalinux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_almalinux_onedir_post() { + install_centos_onedir_post || return 1 + return 0 +} + +install_almalinux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_almalinux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_almalinux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# Ended AlmaLinux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Rocky Linux Install Functions +# +install_rocky_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_rocky_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_rocky_linux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + +install_rocky_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_rocky_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_rocky_linux_onedir() { + install_centos_onedir || return 1 + return 0 +} + +install_rocky_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_rocky_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_rocky_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_rocky_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_rocky_linux_onedir_post() { + install_centos_onedir_post || return 1 + return 0 +} + +install_rocky_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_rocky_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_rocky_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# Ended Rocky Linux Install Functions +# +####################################################################################################################### + ####################################################################################################################### # # Scientific Linux Install Functions @@ -4873,6 +5802,11 @@ install_scientific_linux_git_deps() { return 0 } +install_scientific_linux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + install_scientific_linux_testing_deps() { install_centos_testing_deps || return 1 return 0 @@ -4888,6 +5822,11 @@ install_scientific_linux_git() { return 0 } +install_scientific_linux_onedir() { + install_centos_onedir || return 1 + return 0 +} + install_scientific_linux_testing() { install_centos_testing || return 1 return 0 @@ -4903,6 +5842,11 @@ install_scientific_linux_git_post() { return 0 } +install_scientific_linux_onedir_post() { + install_centos_onedir_post || return 1 + return 0 +} + install_scientific_linux_testing_post() { install_centos_testing_post || return 1 return 0 @@ -4936,6 +5880,11 @@ install_cloud_linux_git_deps() { return 0 } +install_cloud_linux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + install_cloud_linux_testing_deps() { install_centos_testing_deps || return 1 return 0 @@ -5029,8 +5978,8 @@ install_alpine_linux_git_deps() { fi fi else - apk -U add python2 py2-pip py2-setuptools || return 1 - _PY_EXE=python2 + apk -U add python3 python3-dev py3-pip py3-setuptools g++ linux-headers zeromq-dev openrc || return 1 + _PY_EXE=python3 return 0 fi @@ -5500,6 +6449,100 @@ _eof fi } +install_amazon_linux_ami_2_onedir_deps() { + # Shim to figure out if we're using old (rhel) or new (aws) rpms. + _USEAWS=$BS_FALSE + pkg_append="python" + + if [ "$ITYPE" = "onedir" ]; then + repo_rev="$ONEDIR_REV" + else + repo_rev="latest" + fi + + if echo $repo_rev | grep -E -q '^archive'; then + year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) + else + year=$(echo "$repo_rev" | cut -c1-4) + fi + + # We need to install yum-utils before doing anything else when installing on + # Amazon Linux ECS-optimized images. See issue #974. + __yum_install_noinput yum-utils + + # Do upgrade early + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __REPO_FILENAME="salt.repo" + __PY_VERSION_REPO="yum" + PY_PKG_VER="" + repo_label="saltstack-repo" + repo_name="SaltStack repo for Amazon Linux 2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __REPO_FILENAME="salt.repo" + __PY_VERSION_REPO="py3" + PY_PKG_VER=3 + repo_label="saltstack-py3-repo" + repo_name="SaltStack Python 3 repo for Amazon Linux 2" + fi + + base_url="$HTTP_VAL://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/amazon/2/\$basearch/$repo_rev/" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + base_url="$HTTP_VAL://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/amazon/2/\$basearch/" + fi + + if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ] || [ "${ONEDIR_REV}" = "nightly" ]; then + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub,${base_url}base/RPM-GPG-KEY-CentOS-7" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + fi + else + gpg_key="${base_url}SALT-PROJECT-GPG-PUBKEY-2023.pub" + fi + + # This should prob be refactored to use __install_saltstack_rhel_repository() + # With args passed in to do the right thing. Reformatted to be more like the + # amazon linux yum file. + if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then + cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" +[$repo_label] +name=$repo_name +failovermethod=priority +priority=10 +gpgcheck=1 +gpgkey=$gpg_key +baseurl=$base_url +_eof + fi + + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 + # which is already installed + if [ -n "${PY_PKG_VER}" ] && [ "${PY_PKG_VER}" -eq 3 ]; then + __PACKAGES="${pkg_append}${PY_PKG_VER}-m2crypto ${pkg_append}${PY_PKG_VER}-pyyaml" + else + __PACKAGES="m2crypto PyYAML ${pkg_append}-futures" + fi + + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-crypto ${pkg_append}${PY_PKG_VER}-jinja2 procps-ng" + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-msgpack ${pkg_append}${PY_PKG_VER}-requests ${pkg_append}${PY_PKG_VER}-zmq" + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi +} + install_amazon_linux_ami_stable() { install_centos_stable || return 1 return 0 @@ -5575,6 +6618,16 @@ install_amazon_linux_ami_2_check_services() { return 0 } +install_amazon_linux_ami_2_onedir() { + install_centos_stable || return 1 + return 0 +} + +install_amazon_linux_ami_2_onedir_post() { + install_centos_stable_post || return 1 + return 0 +} + # # Ended Amazon Linux AMI Install Functions # @@ -5666,6 +6719,10 @@ install_arch_linux_git_deps() { return 0 } +install_arch_linux_onedir_deps() { + install_arch_linux_stable_deps || return 1 +} + install_arch_linux_stable() { # Pacman does not resolve dependencies on outdated versions # They always need to be updated @@ -5684,6 +6741,8 @@ install_arch_linux_stable() { install_arch_linux_git() { + _POST_NEON_PIP_INSTALL_ARGS="${_POST_NEON_PIP_INSTALL_ARGS} --use-pep517" + _PIP_DOWNLOAD_ARGS="${_PIP_DOWNLOAD_ARGS} --use-pep517" if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 return 0 @@ -5741,8 +6800,15 @@ install_arch_linux_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm" + fi + if [ -f /usr/bin/systemctl ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -5809,11 +6875,342 @@ install_arch_check_services() { return 0 } + +install_arch_linux_onedir() { + install_arch_linux_stable || return 1 + + return 0 +} + +install_arch_linux_onedir_post() { + install_arch_linux_post || return 1 + + return 0 +} # # Ended Arch Install Functions # ####################################################################################################################### +####################################################################################################################### +# +# Photon OS Install Functions +# + +__install_saltstack_photon_onedir_repository() { + if [ "$ITYPE" = "stable" ]; then + REPO_REV="$ONEDIR_REV" + else + REPO_REV="latest" + fi + + __PY_VERSION_REPO="yum" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + REPO_FILE="/etc/yum.repos.d/salt.repo" + + if [ ! -s "$REPO_FILE" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/photon/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${ONEDIR_REV}" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/photon/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/" + fi + + __fetch_url "${REPO_FILE}" "${FETCH_URL}.repo" + + GPG_KEY="SALT-PROJECT-GPG-PUBKEY-2023.pub" + + __rpm_import_gpg "${FETCH_URL}/${GPG_KEY}" || return 1 + + tdnf makecache || return 1 + elif [ "$REPO_REV" != "latest" ]; then + echowarn "salt.repo already exists, ignoring salt version argument." + echowarn "Use -F (forced overwrite) to install $REPO_REV." + fi + + return 0 +} + +install_photon_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + tdnf -y update || return 1 + fi + + __PACKAGES="${__PACKAGES:=}" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -lt 3 ]; then + echoerror "There are no Python 2 stable packages for Fedora, only Py3 packages" + return 1 + fi + + PY_PKG_VER=3 + + __PACKAGES="${__PACKAGES} libyaml procps-ng python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-zmq" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip python${PY_PKG_VER}-m2crypto python${PY_PKG_VER}-pyyaml" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-systemd" + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + fi + + # shellcheck disable=SC2086 + __tdnf_install_noinput ${__PACKAGES} ${_EXTRA_PACKAGES} || return 1 + + return 0 +} + +install_photon_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + done +} + +install_photon_git_deps() { + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + # Packages are named python3- + PY_PKG_VER=3 + else + PY_PKG_VER=2 + fi + + __PACKAGES="" + if ! __check_command_exists ps; then + __PACKAGES="${__PACKAGES} procps-ng" + fi + if ! __check_command_exists git; then + __PACKAGES="${__PACKAGES} git" + fi + + if [ -n "${__PACKAGES}" ]; then + # shellcheck disable=SC2086 + __tdnf_install_noinput ${__PACKAGES} || return 1 + __PACKAGES="" + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __PACKAGES="${__PACKAGES} ca-certificates" + fi + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr" + fi + + install_photon_deps || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + if __check_command_exists python3; then + __python="python3" + fi + elif [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + if __check_command_exists python2; then + __python="python2" + fi + else + if ! __check_command_exists python; then + echoerror "Unable to find a python binary?!" + return 1 + fi + # Let's hope it's the right one + __python="python" + fi + + grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" | while IFS=' + ' read -r dep; do + echodebug "Running '${__python}' -m pip install '${dep}'" + "${__python}" -m pip install "${dep}" || return 1 + done + else + __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + __tdnf_install_noinput ${__PACKAGES} || return 1 + fi + + # Need newer version of setuptools on Photon + _setuptools_dep="setuptools>=${_MINIMUM_SETUPTOOLS_VERSION}" + echodebug "Running '${_PY_EXE} -m pip --upgrade install ${_setuptools_dep}'" + ${_PY_EXE} -m pip install --upgrade "${_setuptools_dep}" + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_photon_git() { + if [ "${_PY_EXE}" != "" ]; then + _PYEXE=${_PY_EXE} + echoinfo "Using the following python version: ${_PY_EXE} to install salt" + else + _PYEXE='python2' + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + ${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + else + ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + fi + return 0 +} + +install_photon_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm" + fi + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Salt executables are located under `/usr/local/bin/` on Fedora 36+ + #if [ "${DISTRO_VERSION}" -ge 36 ]; then + # sed -i -e 's:/usr/bin/:/usr/local/bin/:g' /lib/systemd/system/salt-*.service + #fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + done +} + +install_photon_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + done +} + +install_photon_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} + +install_photon_onedir_deps() { + + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + tdnf -y update || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + echowarn "Detected -r or -R option while installing Salt packages for Python 3." + echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." + echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then + __install_saltstack_photon_onedir_repository || return 1 + fi + + # If -R was passed, we need to configure custom repo url with rsync-ed packages + # Which is still handled in __install_saltstack_rhel_repository. This call has + # its own check in case -r was passed without -R. + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_photon_onedir_repository || return 1 + fi + + __PACKAGES="procps-ng" + + # shellcheck disable=SC2086 + __tdnf_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __tdnf_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 + +} + + +install_photon_onedir() { + STABLE_REV=$ONEDIR_REV + + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __tdnf_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_photon_onedir_post() { + STABLE_REV=$ONEDIR_REV + install_photon_stable_post || return 1 + + return 0 +} +# +# Ended Fedora Install Functions +# +####################################################################################################################### + ####################################################################################################################### # # FreeBSD Install Functions @@ -5841,15 +7238,15 @@ install_freebsd_git_deps() { if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - SALT_DEPENDENCIES=$(/usr/local/sbin/pkg rquery %dn py38-salt) + SALT_DEPENDENCIES=$(/usr/local/sbin/pkg rquery %dn py39-salt) # shellcheck disable=SC2086 /usr/local/sbin/pkg install -y ${SALT_DEPENDENCIES} python || return 1 - /usr/local/sbin/pkg install -y py38-requests || return 1 - /usr/local/sbin/pkg install -y py38-tornado4 || return 1 + /usr/local/sbin/pkg install -y py39-requests || return 1 + /usr/local/sbin/pkg install -y py39-tornado4 || return 1 else - /usr/local/sbin/pkg install -y python py38-pip py38-setuptools libzmq4 libunwind || return 1 + /usr/local/sbin/pkg install -y python py39-pip py39-setuptools libzmq4 libunwind || return 1 fi echodebug "Adapting paths to FreeBSD" @@ -5895,7 +7292,7 @@ install_freebsd_stable() { # installing latest version of salt from FreeBSD CURRENT ports repo # # shellcheck disable=SC2086 - /usr/local/sbin/pkg install -y py38-salt || return 1 + /usr/local/sbin/pkg install -y py39-salt || return 1 return 0 } @@ -5987,6 +7384,15 @@ install_freebsd_restart_daemons() { service salt_$fname start done } + +install_freebsd_onedir() { +# +# call install_freebsd_stable +# + install_freebsd_stable || return 1 + + return 0 +} # # Ended FreeBSD Install Functions # @@ -6021,7 +7427,7 @@ install_openbsd_git_deps() { __git_clone_and_checkout || return 1 if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - pkg_add -I -v py-pip py-setuptools + pkg_add -I -v py3-pip py3-setuptools fi # @@ -6105,6 +7511,14 @@ install_openbsd_restart_daemons() { return 0 } +install_openbsd_onedir() { +# +# Call install_openbsd_stable +# + install_openbsd_stable || return 1 + + return 0 +} # # Ended OpenBSD Install Functions # @@ -6305,6 +7719,14 @@ install_smartos_restart_daemons() { return 0 } +install_smartos_onedir() { +# +# call install_smartos_stable +# + install_smartos_stable || return 1 + + return 0 +} # # Ended SmartOS Install Functions # @@ -6321,19 +7743,16 @@ __set_suse_pkg_repo() { # Set distro repo variable if [ "${DISTRO_MAJOR_VERSION}" -gt 2015 ]; then DISTRO_REPO="openSUSE_Tumbleweed" + elif [ "${DISTRO_MAJOR_VERSION}" -eq 15 ] && [ "${DISTRO_MINOR_VERSION}" -ge 4 ]; then + DISTRO_REPO="${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" elif [ "${DISTRO_MAJOR_VERSION}" -ge 42 ] || [ "${DISTRO_MAJOR_VERSION}" -eq 15 ]; then DISTRO_REPO="openSUSE_Leap_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" else DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}_SP${SUSE_PATCHLEVEL}" fi - if [ "$_DOWNSTREAM_PKG_REPO" -eq $BS_TRUE ]; then - suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack" - suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" - else - suse_pkg_url_base="${HTTP_VAL}://repo.saltproject.io/opensuse" - suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack:products.repo" - fi + suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack" + suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" SUSE_PKG_URL="$suse_pkg_url_base/$suse_pkg_url_path" } @@ -6353,7 +7772,7 @@ __version_lte() { zypper --non-interactive install --auto-agree-with-licenses python || return 1 fi - if [ "$(python -c 'import sys; V1=tuple([int(i) for i in sys.argv[1].split(".")]); V2=tuple([int(i) for i in sys.argv[2].split(".")]); print V1<=V2' "$1" "$2")" = "True" ]; then + if [ "$(${_PY_EXE} -c 'import sys; V1=tuple([int(i) for i in sys.argv[1].split(".")]); V2=tuple([int(i) for i in sys.argv[2].split(".")]); print(V1<=V2)' "$1" "$2")" = "True" ]; then __ZYPPER_REQUIRES_REPLACE_FILES=${BS_TRUE} else __ZYPPER_REQUIRES_REPLACE_FILES=${BS_FALSE} @@ -6470,7 +7889,7 @@ install_opensuse_git_deps() { fi # Check for Tumbleweed elif [ "${DISTRO_MAJOR_VERSION}" -ge 20210101 ]; then - __PACKAGES="python3-pip" + __PACKAGES="python3-pip gcc-c++ python3-pyzmq-devel" else __PACKAGES="python-pip python-setuptools gcc" fi @@ -6487,6 +7906,10 @@ install_opensuse_git_deps() { return 0 } +install_opensuse_onedir_deps() { + install_opensuse_stable_deps || return 1 +} + install_opensuse_stable() { __PACKAGES="" @@ -6519,6 +7942,10 @@ install_opensuse_git() { return 0 } +install_opensuse_onedir() { + install_opensuse_stable || return 1 +} + install_opensuse_stable_post() { for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -6563,10 +7990,17 @@ install_opensuse_git_post() { use_usr_lib=$BS_TRUE fi - if [ "${use_usr_lib}" -eq $BS_TRUE ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/usr/lib/systemd/system/salt-${fname}.service" + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" else - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/" + fi + + if [ "${use_usr_lib}" -eq $BS_TRUE ]; then + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/usr/lib/systemd/system/salt-${fname}.service" + else + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" fi continue @@ -6581,6 +8015,10 @@ install_opensuse_git_post() { return 0 } +install_opensuse_onedir_post() { + install_opensuse_stable_post || return 1 +} + install_opensuse_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return @@ -6740,6 +8178,11 @@ install_opensuse_15_git() { return 0 } +install_opensuse_15_onedir_deps() { + __opensuse_prep_install || return 1 + return 0 +} + # # End of openSUSE Leap 15 # @@ -6769,6 +8212,13 @@ install_suse_15_git_deps() { return 0 } +install_suse_15_onedir_deps() { + __opensuse_prep_install || return 1 + install_opensuse_15_onedir_deps || return 1 + + return 0 +} + install_suse_15_stable() { install_opensuse_stable || return 1 return 0 @@ -6779,6 +8229,11 @@ install_suse_15_git() { return 0 } +install_suse_15_onedir() { + install_opensuse_stable || return 1 + return 0 +} + install_suse_15_stable_post() { install_opensuse_stable_post || return 1 return 0 @@ -6789,6 +8244,11 @@ install_suse_15_git_post() { return 0 } +install_suse_15_onedir_post() { + install_opensuse_stable_post || return 1 + return 0 +} + install_suse_15_restart_daemons() { install_opensuse_restart_daemons || return 1 return 0 @@ -6871,6 +8331,11 @@ install_suse_12_git_deps() { return 0 } +install_suse_12_onedir_deps() { + install_suse_12_stable_deps || return 1 + return 0 +} + install_suse_12_stable() { install_opensuse_stable || return 1 return 0 @@ -6881,6 +8346,11 @@ install_suse_12_git() { return 0 } +install_suse_12_onedir() { + install_opensuse_stable || return 1 + return 0 +} + install_suse_12_stable_post() { install_opensuse_stable_post || return 1 return 0 @@ -6891,6 +8361,11 @@ install_suse_12_git_post() { return 0 } +install_suse_12_onedir_post() { + install_opensuse_stable_post || return 1 + return 0 +} + install_suse_12_restart_daemons() { install_opensuse_restart_daemons || return 1 return 0 @@ -6967,6 +8442,11 @@ install_suse_11_git_deps() { return 0 } +install_suse_11_onedir_deps() { + install_suse_11_stable_deps || return 1 + return 0 +} + install_suse_11_stable() { install_opensuse_stable || return 1 return 0 @@ -6977,6 +8457,11 @@ install_suse_11_git() { return 0 } +install_suse_11_onedir() { + install_opensuse_stable || return 1 + return 0 +} + install_suse_11_stable_post() { install_opensuse_stable_post || return 1 return 0 @@ -6987,6 +8472,11 @@ install_suse_11_git_post() { return 0 } +install_suse_11_onedir_post() { + install_opensuse_stable_post || return 1 + return 0 +} + install_suse_11_restart_daemons() { install_opensuse_restart_daemons || return 1 return 0 @@ -7086,11 +8576,6 @@ __gentoo_pre_dep() { mkdir /etc/portage fi - # Enable Python 3.6 target for pre Neon Salt release - if echo "${STABLE_REV}" | grep -q "2019" || [ "${ITYPE}" = "git" ] && [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - EXTRA_PYTHON_TARGET=python3_6 - fi - # Enable Python 3.7 target for Salt Neon using GIT if [ "${ITYPE}" = "git" ] && [ "${GIT_REV}" = "v3000" ]; then EXTRA_PYTHON_TARGET=python3_7 @@ -7186,6 +8671,9 @@ install_gentoo_git_deps() { __emerge ${GENTOO_GIT_PACKAGES} || return 1 fi + echoinfo "Running emerge -v1 setuptools" + __emerge -v1 setuptools || return 1 + __git_clone_and_checkout || return 1 __gentoo_post_dep || return 1 } @@ -7233,6 +8721,11 @@ install_gentoo_git() { return 0 } +install_gentoo_onedir() { + STABLE_REV=${ONEDIR_REV} + install_gentoo_stable || return 1 +} + install_gentoo_post() { for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -7268,8 +8761,15 @@ install_gentoo_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg" + fi + if __check_command_exists systemctl ; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -7315,6 +8815,10 @@ _eof return 0 } +install_gentoo_onedir_post() { + install_gentoo_post || return 1 +} + install_gentoo_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return @@ -7466,7 +8970,46 @@ __macosx_get_packagesite() { fi PKG="salt-${STABLE_REV}-${__PY_VERSION_REPO}-${DARWIN_ARCH}.pkg" - SALTPKGCONFURL="https://repo.saltproject.io/osx/${PKG}" + SALTPKGCONFURL="https://${_REPO_URL}/osx/${PKG}" +} + +__parse_repo_json_python() { + + # Using latest, grab the right + # version from the repo.json + _JSON_VERSION=$(python - <<-EOF +import json, urllib.request +url = "https://repo.saltproject.io/salt/py3/macos/repo.json" +response = urllib.request.urlopen(url) +data = json.loads(response.read()) +version = data["${_ONEDIR_REV}"][list(data["${_ONEDIR_REV}"])[0]]['version'] +print(version) +EOF +) +echo "${_JSON_VERSION}" +} + +__macosx_get_packagesite_onedir() { + DARWIN_ARCH="x86_64" + + __PY_VERSION_REPO="py2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + if [ "$(echo "$_ONEDIR_REV" | grep -E '^(latest)$')" != "" ]; then + _PKG_VERSION=$(__parse_repo_json_python) + elif [ "$(echo "$_ONEDIR_REV" | grep -E '^([3-9][0-9]{3}(\.[0-9]*))')" != "" ]; then + _PKG_VERSION=$_ONEDIR_REV + else + _PKG_VERSION=$(__parse_repo_json_python) + fi + if [ "$(echo "$_ONEDIR_REV" | grep -E '^(3005)')" != "" ]; then + PKG="salt-${_PKG_VERSION}-macos-${DARWIN_ARCH}.pkg" + else + PKG="salt-${_PKG_VERSION}-${__PY_VERSION_REPO}-${DARWIN_ARCH}.pkg" + fi + SALTPKGCONFURL="https://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/macos/${ONEDIR_REV}/${PKG}" } # Using a separate conf step to head for idempotent install... @@ -7475,11 +9018,21 @@ __configure_macosx_pkg_details() { return 0 } +__configure_macosx_pkg_details_onedir() { + __macosx_get_packagesite_onedir || return 1 + return 0 +} + install_macosx_stable_deps() { __configure_macosx_pkg_details || return 1 return 0 } +install_macosx_onedir_deps() { + __configure_macosx_pkg_details_onedir || return 1 + return 0 +} + install_macosx_git_deps() { install_macosx_stable_deps || return 1 @@ -7526,6 +9079,16 @@ install_macosx_stable() { return 0 } +install_macosx_onedir() { + install_macosx_onedir_deps || return 1 + + __fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1 + + /usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1 + + return 0 +} + install_macosx_git() { if [ -n "$_PY_EXE" ]; then @@ -7563,6 +9126,11 @@ install_macosx_stable_post() { return 0 } +install_macosx_onedir_post() { + install_macosx_stable_post || return 1 + return 0 +} + install_macosx_git_post() { install_macosx_stable_post || return 1 return 0 @@ -7571,8 +9139,15 @@ install_macosx_git_post() { install_macosx_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return - /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 - /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + fi + + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist || return 1 + /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.master.plist || return 1 + fi return 0 } @@ -7774,6 +9349,43 @@ preseed_master() { # ####################################################################################################################### +####################################################################################################################### +# +# This function checks if all of the installed daemons are running or not. +# +daemons_running_onedir() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + FAILED_DAEMONS=0 + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f "/opt/saltstack/salt/run/run" ]; then + salt_path="/opt/saltstack/salt/run/run ${fname}" + else + salt_path="salt-${fname}" + fi + process_running=$(pgrep -f "${salt_path}") + if [ "${process_running}" = "" ]; then + echoerror "${salt_path} was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} + +# +# Ended daemons running check function +# +####################################################################################################################### + ####################################################################################################################### # # This function checks if all of the installed daemons are running or not. @@ -7874,6 +9486,7 @@ echodebug "PRESEED_MASTER_FUNC=${PRESEED_MASTER_FUNC}" INSTALL_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}" +echodebug "INSTALL_FUNC_NAMES=${INSTALL_FUNC_NAMES}" INSTALL_FUNC="null" for FUNC_NAME in $(__strip_duplicates "$INSTALL_FUNC_NAMES"); do @@ -7925,6 +9538,7 @@ DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}" DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}_${ITYPE}" DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${ITYPE}" DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running" DAEMONS_RUNNING_FUNC="null" @@ -8114,6 +9728,11 @@ if [ "$DAEMONS_RUNNING_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; fi fi +if [ "$_AUTO_ACCEPT_MINION_KEYS" -eq "$BS_TRUE" ]; then + echoinfo "Accepting the Salt Minion Keys" + salt-key -yA +fi + # Done! if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then echoinfo "Salt installed!" @@ -8121,6 +9740,13 @@ else echoinfo "Salt configured!" fi +if [ "$_QUICK_START" -eq "$BS_TRUE" ]; then + echoinfo "Congratulations!" + echoinfo "A couple of commands to try:" + echoinfo " salt \* test.ping" + echoinfo " salt \* test.version" +fi + exit 0 # vim: set sts=4 ts=4 et diff --git a/salt/salt/scripts/bootstrap-salt_orig.sh b/salt/salt/scripts/bootstrap-salt_orig.sh new file mode 100644 index 000000000..47d25949c --- /dev/null +++ b/salt/salt/scripts/bootstrap-salt_orig.sh @@ -0,0 +1,8126 @@ +#!/bin/sh - + +# WARNING: Changes to this file in the salt repo will be overwritten! +# Please submit pull requests against the salt-bootstrap repo: +# https://github.com/saltstack/salt-bootstrap + +#====================================================================================================================== +# vim: softtabstop=4 shiftwidth=4 expandtab fenc=utf-8 spell spelllang=en cc=120 +#====================================================================================================================== +# +# FILE: bootstrap-salt.sh +# +# DESCRIPTION: Bootstrap Salt installation for various systems/distributions +# +# BUGS: https://github.com/saltstack/salt-bootstrap/issues +# +# COPYRIGHT: (c) 2012-2021 by the SaltStack Team, see AUTHORS.rst for more +# details. +# +# LICENSE: Apache 2.0 +# ORGANIZATION: SaltStack (saltproject.io) +# CREATED: 10/15/2012 09:49:37 PM WEST +#====================================================================================================================== +set -o nounset # Treat unset variables as an error + +__ScriptVersion="2021.09.17" +__ScriptName="bootstrap-salt.sh" + +__ScriptFullName="$0" +__ScriptArgs="$*" + +#====================================================================================================================== +# Environment variables taken into account. +#---------------------------------------------------------------------------------------------------------------------- +# * BS_COLORS: If 0 disables colour support +# * BS_PIP_ALLOWED: If 1 enable pip based installations(if needed) +# * BS_PIP_ALL: If 1 enable all python packages to be installed via pip instead of apt, requires setting virtualenv +# * BS_VIRTUALENV_DIR: The virtualenv to install salt into (shouldn't exist yet) +# * BS_ECHO_DEBUG: If 1 enable debug echo which can also be set by -D +# * BS_SALT_ETC_DIR: Defaults to /etc/salt (Only tweak'able on git based installations) +# * BS_SALT_CACHE_DIR: Defaults to /var/cache/salt (Only tweak'able on git based installations) +# * BS_KEEP_TEMP_FILES: If 1, don't move temporary files, instead copy them +# * BS_FORCE_OVERWRITE: Force overriding copied files(config, init.d, etc) +# * BS_UPGRADE_SYS: If 1 and an option, upgrade system. Default 0. +# * BS_GENTOO_USE_BINHOST: If 1 add `--getbinpkg` to gentoo's emerge +# * BS_SALT_MASTER_ADDRESS: The IP or DNS name of the salt-master the minion should connect to +# * BS_SALT_GIT_CHECKOUT_DIR: The directory where to clone Salt on git installations +#====================================================================================================================== + + +# Bootstrap script truth values +BS_TRUE=1 +BS_FALSE=0 + +# Default sleep time used when waiting for daemons to start, restart and checking for these running +__DEFAULT_SLEEP=3 + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __detect_color_support +# DESCRIPTION: Try to detect color support. +#---------------------------------------------------------------------------------------------------------------------- +_COLORS=${BS_COLORS:-$(tput colors 2>/dev/null || echo 0)} +__detect_color_support() { + # shellcheck disable=SC2181 + if [ $? -eq 0 ] && [ "$_COLORS" -gt 2 ]; then + RC='\033[1;31m' + GC='\033[1;32m' + BC='\033[1;34m' + YC='\033[1;33m' + EC='\033[0m' + else + RC="" + GC="" + BC="" + YC="" + EC="" + fi +} +__detect_color_support + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echoerr +# DESCRIPTION: Echo errors to stderr. +#---------------------------------------------------------------------------------------------------------------------- +echoerror() { + printf "${RC} * ERROR${EC}: %s\\n" "$@" 1>&2; +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echoinfo +# DESCRIPTION: Echo information to stdout. +#---------------------------------------------------------------------------------------------------------------------- +echoinfo() { + printf "${GC} * INFO${EC}: %s\\n" "$@"; +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echowarn +# DESCRIPTION: Echo warning information to stdout. +#---------------------------------------------------------------------------------------------------------------------- +echowarn() { + printf "${YC} * WARN${EC}: %s\\n" "$@"; +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: echodebug +# DESCRIPTION: Echo debug information to stdout. +#---------------------------------------------------------------------------------------------------------------------- +echodebug() { + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + printf "${BC} * DEBUG${EC}: %s\\n" "$@"; + fi +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_command_exists +# DESCRIPTION: Check if a command exists. +#---------------------------------------------------------------------------------------------------------------------- +__check_command_exists() { + command -v "$1" > /dev/null 2>&1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_pip_allowed +# DESCRIPTION: Simple function to let the users know that -P needs to be used. +#---------------------------------------------------------------------------------------------------------------------- +__check_pip_allowed() { + if [ $# -eq 1 ]; then + _PIP_ALLOWED_ERROR_MSG=$1 + else + _PIP_ALLOWED_ERROR_MSG="pip based installations were not allowed. Retry using '-P'" + fi + + if [ "$_PIP_ALLOWED" -eq $BS_FALSE ]; then + echoerror "$_PIP_ALLOWED_ERROR_MSG" + __usage + exit 1 + fi +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_config_dir +# DESCRIPTION: Checks the config directory, retrieves URLs if provided. +#---------------------------------------------------------------------------------------------------------------------- +__check_config_dir() { + CC_DIR_NAME="$1" + CC_DIR_BASE=$(basename "${CC_DIR_NAME}") + + case "$CC_DIR_NAME" in + http://*|https://*) + __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + ftp://*) + __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + *://*) + echoerror "Unsupported URI scheme for $CC_DIR_NAME" + echo "null" + return + ;; + *) + if [ ! -e "${CC_DIR_NAME}" ]; then + echoerror "The configuration directory or archive $CC_DIR_NAME does not exist." + echo "null" + return + fi + ;; + esac + + case "$CC_DIR_NAME" in + *.tgz|*.tar.gz) + tar -zxf "${CC_DIR_NAME}" -C /tmp + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tgz") + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.gz") + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + *.tbz|*.tar.bz2) + tar -xjf "${CC_DIR_NAME}" -C /tmp + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tbz") + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.bz2") + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + *.txz|*.tar.xz) + tar -xJf "${CC_DIR_NAME}" -C /tmp + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".txz") + CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.xz") + CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + ;; + esac + + echo "${CC_DIR_NAME}" +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_unparsed_options +# DESCRIPTION: Checks the placed after the install arguments +#---------------------------------------------------------------------------------------------------------------------- +__check_unparsed_options() { + shellopts="$1" + # grep alternative for SunOS + if [ -f /usr/xpg4/bin/grep ]; then + grep='/usr/xpg4/bin/grep' + else + grep='grep' + fi + unparsed_options=$( echo "$shellopts" | ${grep} -E '(^|[[:space:]])[-]+[[:alnum:]]' ) + if [ "$unparsed_options" != "" ]; then + __usage + echo + echoerror "options are only allowed before install arguments" + echo + exit 1 + fi +} + + +#---------------------------------------------------------------------------------------------------------------------- +# Handle command line arguments +#---------------------------------------------------------------------------------------------------------------------- +_KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE} +_TEMP_CONFIG_DIR="null" +_SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git" +_SALT_REPO_URL=${_SALTSTACK_REPO_URL} +_DOWNSTREAM_PKG_REPO=$BS_FALSE +_TEMP_KEYS_DIR="null" +_SLEEP="${__DEFAULT_SLEEP}" +_INSTALL_MASTER=$BS_FALSE +_INSTALL_SYNDIC=$BS_FALSE +_INSTALL_MINION=$BS_TRUE +_INSTALL_CLOUD=$BS_FALSE +_VIRTUALENV_DIR=${BS_VIRTUALENV_DIR:-"null"} +_START_DAEMONS=$BS_TRUE +_DISABLE_SALT_CHECKS=$BS_FALSE +_ECHO_DEBUG=${BS_ECHO_DEBUG:-$BS_FALSE} +_CONFIG_ONLY=$BS_FALSE +_PIP_ALLOWED=${BS_PIP_ALLOWED:-$BS_FALSE} +_PIP_ALL=${BS_PIP_ALL:-$BS_FALSE} +_SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/etc/salt} +_SALT_CACHE_DIR=${BS_SALT_CACHE_DIR:-/var/cache/salt} +_PKI_DIR=${_SALT_ETC_DIR}/pki +_FORCE_OVERWRITE=${BS_FORCE_OVERWRITE:-$BS_FALSE} +_GENTOO_USE_BINHOST=${BS_GENTOO_USE_BINHOST:-$BS_FALSE} +_EPEL_REPO=${BS_EPEL_REPO:-epel} +_EPEL_REPOS_INSTALLED=$BS_FALSE +_UPGRADE_SYS=${BS_UPGRADE_SYS:-$BS_FALSE} +_INSECURE_DL=${BS_INSECURE_DL:-$BS_FALSE} +_CURL_ARGS=${BS_CURL_ARGS:-} +_FETCH_ARGS=${BS_FETCH_ARGS:-} +_GPG_ARGS=${BS_GPG_ARGS:-} +_WGET_ARGS=${BS_WGET_ARGS:-} +_SALT_MASTER_ADDRESS=${BS_SALT_MASTER_ADDRESS:-null} +_SALT_MINION_ID="null" +# _SIMPLIFY_VERSION is mostly used in Solaris based distributions +_SIMPLIFY_VERSION=$BS_TRUE +_LIBCLOUD_MIN_VERSION="0.14.0" +_EXTRA_PACKAGES="" +_HTTP_PROXY="" +_SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-/tmp/git/salt} +_NO_DEPS=$BS_FALSE +_FORCE_SHALLOW_CLONE=$BS_FALSE +_DISABLE_SSL=$BS_FALSE +_DISABLE_REPOS=$BS_FALSE +_CUSTOM_REPO_URL="null" +_CUSTOM_MASTER_CONFIG="null" +_CUSTOM_MINION_CONFIG="null" +_QUIET_GIT_INSTALLATION=$BS_FALSE +_REPO_URL="repo.saltproject.io" +_PY_EXE="python3" +_INSTALL_PY="$BS_FALSE" +_TORNADO_MAX_PY3_VERSION="5.0" +_POST_NEON_INSTALL=$BS_FALSE +_MINIMUM_PIP_VERSION="9.0.1" +_MINIMUM_SETUPTOOLS_VERSION="9.1" +_POST_NEON_PIP_INSTALL_ARGS="--prefix=/usr" + +# Defaults for install arguments +ITYPE="stable" + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __usage +# DESCRIPTION: Display usage information. +#---------------------------------------------------------------------------------------------------------------------- +__usage() { + cat << EOT + + Usage : ${__ScriptName} [options] [install-type-args] + + Installation types: + - stable Install latest stable release. This is the default + install type + - stable [branch] Install latest version on a branch. Only supported + for packages available at repo.saltproject.io + - stable [version] Install a specific version. Only supported for + packages available at repo.saltproject.io + To pin a 3xxx minor version, specify it as 3xxx.0 + - testing RHEL-family specific: configure EPEL testing repo + - git Install from the head of the master branch + - git [ref] Install from any git ref (such as a branch, tag, or + commit) + + Examples: + - ${__ScriptName} + - ${__ScriptName} stable + - ${__ScriptName} stable 2017.7 + - ${__ScriptName} stable 2017.7.2 + - ${__ScriptName} testing + - ${__ScriptName} git + - ${__ScriptName} git 2017.7 + - ${__ScriptName} git v2017.7.2 + - ${__ScriptName} git 06f249901a2e2f1ed310d58ea3921a129f214358 + + Options: + -h Display this message + -v Display script version + -n No colours + -D Show debug output + -c Temporary configuration directory + -g Salt Git repository URL. Default: ${_SALTSTACK_REPO_URL} + -w Install packages from downstream package repository rather than + upstream, saltstack package repository. This is currently only + implemented for SUSE. + -k Temporary directory holding the minion keys which will pre-seed + the master. + -s Sleep time used when waiting for daemons to start, restart and when + checking for the services running. Default: ${__DEFAULT_SLEEP} + -L Also install salt-cloud and required python-libcloud package + -M Also install salt-master + -S Also install salt-syndic + -N Do not install salt-minion + -X Do not start daemons after installation + -d Disables checking if Salt services are enabled to start on system boot. + You can also do this by touching /tmp/disable_salt_checks on the target + host. Default: \${BS_FALSE} + -P Allow pip based installations. On some distributions the required salt + packages or its dependencies are not available as a package for that + distribution. Using this flag allows the script to use pip as a last + resort method. NOTE: This only works for functions which actually + implement pip based installations. + -U If set, fully upgrade the system prior to bootstrapping Salt + -I If set, allow insecure connections while downloading any files. For + example, pass '--no-check-certificate' to 'wget' or '--insecure' to + 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining + GnuPG archive keys insecurely if distro has changed release signatures. + -F Allow copied files to overwrite existing (config, init.d, etc) + -K If set, keep the temporary files in the temporary directories specified + with -c and -k + -C Only run the configuration function. Implies -F (forced overwrite). + To overwrite Master or Syndic configs, -M or -S, respectively, must + also be specified. Salt installation will be ommitted, but some of the + dependencies could be installed to write configuration with -j or -J. + -A Pass the salt-master DNS name or IP. This will be stored under + \${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf + -i Pass the salt-minion id. This will be stored under + \${BS_SALT_ETC_DIR}/minion_id + -p Extra-package to install while installing Salt dependencies. One package + per -p flag. You are responsible for providing the proper package name. + -H Use the specified HTTP proxy for all download URLs (including https://). + For example: http://myproxy.example.com:3128 + -b Assume that dependencies are already installed and software sources are + set up. If git is selected, git tree is still checked out as dependency + step. + -f Force shallow cloning for git installations. + This may result in an "n/a" in the version number. + -l Disable ssl checks. When passed, switches "https" calls to "http" where + possible. + -V Install Salt into virtualenv + (only available for Ubuntu based distributions) + -a Pip install all Python pkg dependencies for Salt. Requires -V to install + all pip pkgs into the virtualenv. + (Only available for Ubuntu based distributions) + -r Disable all repository configuration performed by this script. This + option assumes all necessary repository configuration is already present + on the system. + -R Specify a custom repository URL. Assumes the custom repository URL + points to a repository that mirrors Salt packages located at + repo.saltproject.io. The option passed with -R replaces the + "repo.saltproject.io". If -R is passed, -r is also set. Currently only + works on CentOS/RHEL and Debian based distributions. + -J Replace the Master config file with data passed in as a JSON string. If + a Master config file is found, a reasonable effort will be made to save + the file with a ".bak" extension. If used in conjunction with -C or -F, + no ".bak" file will be created as either of those options will force + a complete overwrite of the file. + -j Replace the Minion config file with data passed in as a JSON string. If + a Minion config file is found, a reasonable effort will be made to save + the file with a ".bak" extension. If used in conjunction with -C or -F, + no ".bak" file will be created as either of those options will force + a complete overwrite of the file. + -q Quiet salt installation from git (setup.py install -q) + -x Changes the Python version used to install Salt. + For CentOS 6 git installations python2.7 is supported. + Fedora git installation, CentOS 7, Debian 9, Ubuntu 16.04 and 18.04 support python3. + -y Installs a different python version on host. Currently this has only been + tested with CentOS 6 and is considered experimental. This will install the + ius repo on the box if disable repo is false. This must be used in conjunction + with -x . For example: + sh bootstrap.sh -P -y -x python2.7 git v2017.7.2 + The above will install python27 and install the git version of salt using the + python2.7 executable. This only works for git and pip installations. + +EOT +} # ---------- end of function __usage ---------- + + +while getopts ':hvnDc:g:Gyx:wk:s:MSNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aq' opt +do + case "${opt}" in + + h ) __usage; exit 0 ;; + v ) echo "$0 -- Version $__ScriptVersion"; exit 0 ;; + n ) _COLORS=0; __detect_color_support ;; + D ) _ECHO_DEBUG=$BS_TRUE ;; + c ) _TEMP_CONFIG_DIR="$OPTARG" ;; + g ) _SALT_REPO_URL=$OPTARG ;; + + G ) echowarn "The '-G' option is DEPRECATED and will be removed in the future stable release!" + echowarn "Bootstrap will always use 'https' protocol to clone from SaltStack GitHub repo." + echowarn "No need to provide this option anymore, now it is a default behavior." + ;; + + w ) _DOWNSTREAM_PKG_REPO=$BS_TRUE ;; + k ) _TEMP_KEYS_DIR="$OPTARG" ;; + s ) _SLEEP=$OPTARG ;; + M ) _INSTALL_MASTER=$BS_TRUE ;; + S ) _INSTALL_SYNDIC=$BS_TRUE ;; + N ) _INSTALL_MINION=$BS_FALSE ;; + X ) _START_DAEMONS=$BS_FALSE ;; + C ) _CONFIG_ONLY=$BS_TRUE ;; + P ) _PIP_ALLOWED=$BS_TRUE ;; + F ) _FORCE_OVERWRITE=$BS_TRUE ;; + U ) _UPGRADE_SYS=$BS_TRUE ;; + K ) _KEEP_TEMP_FILES=$BS_TRUE ;; + I ) _INSECURE_DL=$BS_TRUE ;; + A ) _SALT_MASTER_ADDRESS=$OPTARG ;; + i ) _SALT_MINION_ID=$OPTARG ;; + L ) _INSTALL_CLOUD=$BS_TRUE ;; + p ) _EXTRA_PACKAGES="$_EXTRA_PACKAGES $OPTARG" ;; + d ) _DISABLE_SALT_CHECKS=$BS_TRUE ;; + H ) _HTTP_PROXY="$OPTARG" ;; + b ) _NO_DEPS=$BS_TRUE ;; + f ) _FORCE_SHALLOW_CLONE=$BS_TRUE ;; + l ) _DISABLE_SSL=$BS_TRUE ;; + V ) _VIRTUALENV_DIR="$OPTARG" ;; + a ) _PIP_ALL=$BS_TRUE ;; + r ) _DISABLE_REPOS=$BS_TRUE ;; + R ) _CUSTOM_REPO_URL=$OPTARG ;; + J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;; + j ) _CUSTOM_MINION_CONFIG=$OPTARG ;; + q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;; + x ) _PY_EXE="$OPTARG" ;; + y ) _INSTALL_PY="$BS_TRUE" ;; + + \?) echo + echoerror "Option does not exist : $OPTARG" + __usage + exit 1 + ;; + + esac # --- end of case --- +done +shift $((OPTIND-1)) + + +# Define our logging file and pipe paths +LOGFILE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.log/g )" +LOGPIPE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )" +# Ensure no residual pipe exists +rm "$LOGPIPE" 2>/dev/null + +# Create our logging pipe +# On FreeBSD we have to use mkfifo instead of mknod +if ! (mknod "$LOGPIPE" p >/dev/null 2>&1 || mkfifo "$LOGPIPE" >/dev/null 2>&1); then + echoerror "Failed to create the named pipe required to log" + exit 1 +fi + +# What ever is written to the logpipe gets written to the logfile +tee < "$LOGPIPE" "$LOGFILE" & + +# Close STDOUT, reopen it directing it to the logpipe +exec 1>&- +exec 1>"$LOGPIPE" +# Close STDERR, reopen it directing it to the logpipe +exec 2>&- +exec 2>"$LOGPIPE" + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __exit_cleanup +# DESCRIPTION: Cleanup any leftovers after script has ended +# +# +# http://www.unix.com/man-page/POSIX/1posix/trap/ +# +# Signal Number Signal Name +# 1 SIGHUP +# 2 SIGINT +# 3 SIGQUIT +# 6 SIGABRT +# 9 SIGKILL +# 14 SIGALRM +# 15 SIGTERM +#---------------------------------------------------------------------------------------------------------------------- +APT_ERR=$(mktemp /tmp/apt_error.XXXXXX) +__exit_cleanup() { + EXIT_CODE=$? + + if [ "$ITYPE" = "git" ] && [ -d "${_SALT_GIT_CHECKOUT_DIR}" ]; then + if [ $_KEEP_TEMP_FILES -eq $BS_FALSE ]; then + # Clean up the checked out repository + echodebug "Cleaning up the Salt Temporary Git Repository" + # shellcheck disable=SC2164 + cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" + rm -rf "${_SALT_GIT_CHECKOUT_DIR}" + #rm -rf "${_SALT_GIT_CHECKOUT_DIR}/deps" + else + echowarn "Not cleaning up the Salt Temporary git repository on request" + echowarn "Note that if you intend to re-run this script using the git approach, you might encounter some issues" + fi + fi + + # Remove the logging pipe when the script exits + if [ -p "$LOGPIPE" ]; then + echodebug "Removing the logging pipe $LOGPIPE" + rm -f "$LOGPIPE" + fi + + # Remove the temporary apt error file when the script exits + if [ -f "$APT_ERR" ]; then + echodebug "Removing the temporary apt error file $APT_ERR" + rm -f "$APT_ERR" + fi + + # Kill tee when exiting, CentOS, at least requires this + # shellcheck disable=SC2009 + TEE_PID=$(ps ax | grep tee | grep "$LOGFILE" | awk '{print $1}') + + [ "$TEE_PID" = "" ] && exit $EXIT_CODE + + echodebug "Killing logging pipe tee's with pid(s): $TEE_PID" + + # We need to trap errors since killing tee will cause a 127 errno + # We also do this as late as possible so we don't "mis-catch" other errors + __trap_errors() { + echoinfo "Errors Trapped: $EXIT_CODE" + # Exit with the "original" exit code, not the trapped code + exit $EXIT_CODE + } + trap "__trap_errors" INT ABRT QUIT TERM + + # Now we're "good" to kill tee + kill -s TERM "$TEE_PID" + + # In case the 127 errno is not triggered, exit with the "original" exit code + exit $EXIT_CODE +} +trap "__exit_cleanup" EXIT INT + + +# Let's discover how we're being called +# shellcheck disable=SC2009 +CALLER=$(ps -a -o pid,args | grep $$ | grep -v grep | tr -s ' ' | cut -d ' ' -f 3) + +if [ "${CALLER}x" = "${0}x" ]; then + CALLER="shell pipe" +fi + +echoinfo "Running version: ${__ScriptVersion}" +echoinfo "Executed by: ${CALLER}" +echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'" +#echowarn "Running the unstable version of ${__ScriptName}" + +# Define installation type +if [ "$#" -gt 0 ];then + __check_unparsed_options "$*" + ITYPE=$1 + shift +fi + +# Check installation type +if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git)')" = "" ]; then + echoerror "Installation type \"$ITYPE\" is not known..." + exit 1 +fi + +# If doing a git install, check what branch/tag/sha will be checked out +if [ "$ITYPE" = "git" ]; then + if [ "$#" -eq 0 ];then + GIT_REV="master" + else + GIT_REV="$1" + shift + fi + + # Disable shell warning about unbound variable during git install + STABLE_REV="latest" + +# If doing stable install, check if version specified +elif [ "$ITYPE" = "stable" ]; then + if [ "$#" -eq 0 ];then + STABLE_REV="latest" + else + if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3|2019\.2|3000|3001|3002|3003|3004)$')" != "" ]; then + STABLE_REV="$1" + shift + elif [ "$(echo "$1" | grep -E '^(2[0-9]*\.[0-9]*\.[0-9]*|[3-9][0-9]{3}(\.[0-9]*)?)$')" != "" ]; then + # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix + STABLE_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') + if [ "$(uname)" != "Darwin" ]; then + STABLE_REV="archive/$STABLE_REV" + fi + shift + else + echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, 2019.2, 3000, 3001, 3002, 3003, 3004, latest, \$MAJOR.\$MINOR.\$PATCH until 2019.2, \$MAJOR or \$MAJOR.\$PATCH starting from 3000)" + exit 1 + fi + fi +fi + +# Check for any unparsed arguments. Should be an error. +if [ "$#" -gt 0 ]; then + __usage + echo + echoerror "Too many arguments." + exit 1 +fi + +# whoami alternative for SunOS +if [ -f /usr/xpg4/bin/id ]; then + whoami='/usr/xpg4/bin/id -un' +else + whoami='whoami' +fi + +# Root permissions are required to run this script +if [ "$($whoami)" != "root" ]; then + echoerror "Salt requires root privileges to install. Please re-run this script as root." + exit 1 +fi + +# Check that we're actually installing one of minion/master/syndic +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echowarn "Nothing to install or configure" + exit 1 +fi + +# Check that we're installing a minion if we're being passed a master address +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MASTER_ADDRESS" != "null" ]; then + echoerror "Don't pass a master address (-A) if no minion is going to be bootstrapped." + exit 1 +fi + +# Check that we're installing a minion if we're being passed a minion id +if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MINION_ID" != "null" ]; then + echoerror "Don't pass a minion id (-i) if no minion is going to be bootstrapped." + exit 1 +fi + +# Check that we're installing or configuring a master if we're being passed a master config json dict +if [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then + if [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoerror "Don't pass a master config JSON dict (-J) if no master is going to be bootstrapped or configured." + exit 1 + fi +fi + +# Check that we're installing or configuring a minion if we're being passed a minion config json dict +if [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoerror "Don't pass a minion config JSON dict (-j) if no minion is going to be bootstrapped or configured." + exit 1 + fi +fi + +# Check if we're installing via a different Python executable and set major version variables +if [ -n "$_PY_EXE" ]; then + if [ "$(uname)" = "Darwin" ]; then + _PY_PKG_VER=$(echo "$_PY_EXE" | sed "s/\\.//g") + else + _PY_PKG_VER=$(echo "$_PY_EXE" | sed -E "s/\\.//g") + fi + + _PY_MAJOR_VERSION=$(echo "$_PY_PKG_VER" | cut -c 7) + if [ "$_PY_MAJOR_VERSION" != 3 ] && [ "$_PY_MAJOR_VERSION" != 2 ]; then + echoerror "Detected -x option, but Python major version is not 2 or 3." + echoerror "The -x option must be passed as python2, python27, or python2.7 (or use the Python '3' versions of examples)." + exit 1 + fi + + if [ "$_PY_EXE" != "python3" ]; then + echoinfo "Detected -x option. Using $_PY_EXE to install Salt." + fi +else + _PY_PKG_VER="" + _PY_MAJOR_VERSION="" +fi + +# If the configuration directory or archive does not exist, error out +if [ "$_TEMP_CONFIG_DIR" != "null" ]; then + _TEMP_CONFIG_DIR="$(__check_config_dir "$_TEMP_CONFIG_DIR")" + [ "$_TEMP_CONFIG_DIR" = "null" ] && exit 1 +fi + +# If the pre-seed keys directory does not exist, error out +if [ "$_TEMP_KEYS_DIR" != "null" ] && [ ! -d "$_TEMP_KEYS_DIR" ]; then + echoerror "The pre-seed keys directory ${_TEMP_KEYS_DIR} does not exist." + exit 1 +fi + +# -a and -V only work from git +if [ "$ITYPE" != "git" ]; then + if [ $_PIP_ALL -eq $BS_TRUE ]; then + echoerror "Pip installing all python packages with -a is only possible when installing Salt via git" + exit 1 + fi + if [ "$_VIRTUALENV_DIR" != "null" ]; then + echoerror "Virtualenv installs via -V is only possible when installing Salt via git" + exit 1 + fi +fi + +# Set the _REPO_URL value based on if -R was passed or not. Defaults to repo.saltproject.io. +if [ "$_CUSTOM_REPO_URL" != "null" ]; then + _REPO_URL="$_CUSTOM_REPO_URL" + + # Check for -r since -R is being passed. Set -r with a warning. + if [ "$_DISABLE_REPOS" -eq $BS_FALSE ]; then + echowarn "Detected -R option. No other repositories will be configured when -R is used. Setting -r option to True." + _DISABLE_REPOS=$BS_TRUE + fi +fi + +# Check the _DISABLE_SSL value and set HTTP or HTTPS. +if [ "$_DISABLE_SSL" -eq $BS_TRUE ]; then + HTTP_VAL="http" +else + HTTP_VAL="https" +fi + +# Check the _QUIET_GIT_INSTALLATION value and set SETUP_PY_INSTALL_ARGS. +if [ "$_QUIET_GIT_INSTALLATION" -eq $BS_TRUE ]; then + SETUP_PY_INSTALL_ARGS="-q" +else + SETUP_PY_INSTALL_ARGS="" +fi + +# Handle the insecure flags +if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then + _CURL_ARGS="${_CURL_ARGS} --insecure" + _FETCH_ARGS="${_FETCH_ARGS} --no-verify-peer" + _GPG_ARGS="${_GPG_ARGS} --keyserver-options no-check-cert" + _WGET_ARGS="${_WGET_ARGS} --no-check-certificate" +else + _GPG_ARGS="${_GPG_ARGS} --keyserver-options ca-cert-file=/etc/ssl/certs/ca-certificates.crt" +fi + +# Export the http_proxy configuration to our current environment +if [ "${_HTTP_PROXY}" != "" ]; then + export http_proxy="${_HTTP_PROXY}" + export https_proxy="${_HTTP_PROXY}" + # Using "deprecated" option here, but that appears the only way to make it work. + # See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=818802 + # and https://bugs.launchpad.net/ubuntu/+source/gnupg2/+bug/1625848 + _GPG_ARGS="${_GPG_ARGS},http-proxy=${_HTTP_PROXY}" +fi + +# Work around for 'Docker + salt-bootstrap failure' https://github.com/saltstack/salt-bootstrap/issues/394 +if [ "${_DISABLE_SALT_CHECKS}" -eq $BS_FALSE ] && [ -f /tmp/disable_salt_checks ]; then + # shellcheck disable=SC2016 + echowarn 'Found file: /tmp/disable_salt_checks, setting _DISABLE_SALT_CHECKS=$BS_TRUE' + _DISABLE_SALT_CHECKS=$BS_TRUE +fi + +# Because -a can only be installed into virtualenv +if [ "${_PIP_ALL}" -eq $BS_TRUE ] && [ "${_VIRTUALENV_DIR}" = "null" ]; then + usage + # Could possibly set up a default virtualenv location when -a flag is passed + echoerror "Using -a requires -V because pip pkgs should be siloed from python system pkgs" + exit 1 +fi + +# Make sure virtualenv directory does not already exist +if [ -d "${_VIRTUALENV_DIR}" ]; then + echoerror "The directory ${_VIRTUALENV_DIR} for virtualenv already exists" + exit 1 +fi + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __fetch_url +# DESCRIPTION: Retrieves a URL and writes it to a given path +#---------------------------------------------------------------------------------------------------------------------- +__fetch_url() { + # shellcheck disable=SC2086 + curl $_CURL_ARGS -L -s -f -o "$1" "$2" >/dev/null 2>&1 || + wget $_WGET_ARGS -q -O "$1" "$2" >/dev/null 2>&1 || + fetch $_FETCH_ARGS -q -o "$1" "$2" >/dev/null 2>&1 || # FreeBSD + fetch -q -o "$1" "$2" >/dev/null 2>&1 || # Pre FreeBSD 10 + ftp -o "$1" "$2" >/dev/null 2>&1 || # OpenBSD + (echoerror "$2 failed to download to $1"; exit 1) +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __fetch_verify +# DESCRIPTION: Retrieves a URL, verifies its content and writes it to standard output +#---------------------------------------------------------------------------------------------------------------------- +__fetch_verify() { + fetch_verify_url="$1" + fetch_verify_sum="$2" + fetch_verify_size="$3" + + fetch_verify_tmpf=$(mktemp) && \ + __fetch_url "$fetch_verify_tmpf" "$fetch_verify_url" && \ + test "$(stat --format=%s "$fetch_verify_tmpf")" -eq "$fetch_verify_size" && \ + test "$(md5sum "$fetch_verify_tmpf" | awk '{ print $1 }')" = "$fetch_verify_sum" && \ + cat "$fetch_verify_tmpf" && \ + if rm -f "$fetch_verify_tmpf"; then + return 0 + fi + echo "Failed verification of $fetch_verify_url" + return 1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_hardware_info +# DESCRIPTION: Discover hardware information +#---------------------------------------------------------------------------------------------------------------------- +__gather_hardware_info() { + if [ -f /proc/cpuinfo ]; then + CPU_VENDOR_ID=$(awk '/vendor_id|Processor/ {sub(/-.*$/,"",$3); print $3; exit}' /proc/cpuinfo ) + elif [ -f /usr/bin/kstat ]; then + # SmartOS. + # Solaris!? + # This has only been tested for a GenuineIntel CPU + CPU_VENDOR_ID=$(/usr/bin/kstat -p cpu_info:0:cpu_info0:vendor_id | awk '{print $2}') + else + CPU_VENDOR_ID=$( sysctl -n hw.model ) + fi + # shellcheck disable=SC2034 + CPU_VENDOR_ID_L=$( echo "$CPU_VENDOR_ID" | tr '[:upper:]' '[:lower:]' ) + CPU_ARCH=$(uname -m 2>/dev/null || uname -p 2>/dev/null || echo "unknown") + CPU_ARCH_L=$( echo "$CPU_ARCH" | tr '[:upper:]' '[:lower:]' ) +} +__gather_hardware_info + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_os_info +# DESCRIPTION: Discover operating system information +#---------------------------------------------------------------------------------------------------------------------- +__gather_os_info() { + OS_NAME=$(uname -s 2>/dev/null) + OS_NAME_L=$( echo "$OS_NAME" | tr '[:upper:]' '[:lower:]' ) + OS_VERSION=$(uname -r) + # shellcheck disable=SC2034 + OS_VERSION_L=$( echo "$OS_VERSION" | tr '[:upper:]' '[:lower:]' ) +} +__gather_os_info + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __parse_version_string +# DESCRIPTION: Parse version strings ignoring the revision. +# MAJOR.MINOR.REVISION becomes MAJOR.MINOR +#---------------------------------------------------------------------------------------------------------------------- +__parse_version_string() { + VERSION_STRING="$1" + PARSED_VERSION=$( + echo "$VERSION_STRING" | + sed -e 's/^/#/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\.[0-9][0-9]*\)\(\.[0-9][0-9]*\).*$/\1/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\.[0-9][0-9]*\).*$/\1/' \ + -e 's/^#[^0-9]*\([0-9][0-9]*\).*$/\1/' \ + -e 's/^#.*$//' + ) + echo "$PARSED_VERSION" +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __derive_debian_numeric_version +# DESCRIPTION: Derive the numeric version from a Debian version string. +#---------------------------------------------------------------------------------------------------------------------- +__derive_debian_numeric_version() { + NUMERIC_VERSION="" + INPUT_VERSION="$1" + if echo "$INPUT_VERSION" | grep -q '^[0-9]'; then + NUMERIC_VERSION="$INPUT_VERSION" + elif [ -z "$INPUT_VERSION" ] && [ -f "/etc/debian_version" ]; then + INPUT_VERSION="$(cat /etc/debian_version)" + fi + if [ -z "$NUMERIC_VERSION" ]; then + if [ "$INPUT_VERSION" = "wheezy/sid" ]; then + # I've found an EC2 wheezy image which did not tell its version + NUMERIC_VERSION=$(__parse_version_string "7.0") + elif [ "$INPUT_VERSION" = "jessie/sid" ]; then + NUMERIC_VERSION=$(__parse_version_string "8.0") + elif [ "$INPUT_VERSION" = "stretch/sid" ]; then + NUMERIC_VERSION=$(__parse_version_string "9.0") + elif [ "$INPUT_VERSION" = "buster/sid" ]; then + NUMERIC_VERSION=$(__parse_version_string "10.0") + elif [ "$INPUT_VERSION" = "bullseye/sid" ]; then + NUMERIC_VERSION=$(__parse_version_string "11.0") + else + echowarn "Unable to parse the Debian Version (codename: '$INPUT_VERSION')" + fi + fi + echo "$NUMERIC_VERSION" +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __unquote_string +# DESCRIPTION: Strip single or double quotes from the provided string. +#---------------------------------------------------------------------------------------------------------------------- +__unquote_string() { + # shellcheck disable=SC1117 + echo "$*" | sed -e "s/^\([\"\']\)\(.*\)\1\$/\2/g" +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __camelcase_split +# DESCRIPTION: Convert 'CamelCased' strings to 'Camel Cased' +#---------------------------------------------------------------------------------------------------------------------- +__camelcase_split() { + echo "$*" | sed -e 's/\([^[:upper:][:punct:]]\)\([[:upper:]]\)/\1 \2/g' +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __strip_duplicates +# DESCRIPTION: Strip duplicate strings +#---------------------------------------------------------------------------------------------------------------------- +__strip_duplicates() { + echo "$*" | tr -s '[:space:]' '\n' | awk '!x[$0]++' +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __sort_release_files +# DESCRIPTION: Custom sort function. Alphabetical or numerical sort is not +# enough. +#---------------------------------------------------------------------------------------------------------------------- +__sort_release_files() { + KNOWN_RELEASE_FILES=$(echo "(arch|alpine|centos|debian|ubuntu|fedora|redhat|suse|\ + mandrake|mandriva|gentoo|slackware|turbolinux|unitedlinux|void|lsb|system|\ + oracle|os)(-|_)(release|version)" | sed -E 's:[[:space:]]::g') + primary_release_files="" + secondary_release_files="" + # Sort know VS un-known files first + for release_file in $(echo "${@}" | sed -E 's:[[:space:]]:\n:g' | sort -f | uniq); do + match=$(echo "$release_file" | grep -E -i "${KNOWN_RELEASE_FILES}") + if [ "${match}" != "" ]; then + primary_release_files="${primary_release_files} ${release_file}" + else + secondary_release_files="${secondary_release_files} ${release_file}" + fi + done + + # Now let's sort by know files importance, max important goes last in the max_prio list + max_prio="redhat-release centos-release oracle-release fedora-release" + for entry in $max_prio; do + if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then + primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\2 \\1 \\3:g") + fi + done + # Now, least important goes last in the min_prio list + min_prio="lsb-release" + for entry in $min_prio; do + if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then + primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\1 \\3 \\2:g") + fi + done + + # Echo the results collapsing multiple white-space into a single white-space + echo "${primary_release_files} ${secondary_release_files}" | sed -E 's:[[:space:]]+:\n:g' +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_linux_system_info +# DESCRIPTION: Discover Linux system information +#---------------------------------------------------------------------------------------------------------------------- +__gather_linux_system_info() { + DISTRO_NAME="" + DISTRO_VERSION="" + + # Let's test if the lsb_release binary is available + rv=$(lsb_release >/dev/null 2>&1) + + # shellcheck disable=SC2181 + if [ $? -eq 0 ]; then + DISTRO_NAME=$(lsb_release -si) + if [ "${DISTRO_NAME}" = "Scientific" ]; then + DISTRO_NAME="Scientific Linux" + elif [ "$(echo "$DISTRO_NAME" | grep ^CloudLinux)" != "" ]; then + DISTRO_NAME="Cloud Linux" + elif [ "$(echo "$DISTRO_NAME" | grep ^RedHat)" != "" ]; then + # Let's convert 'CamelCased' to 'Camel Cased' + n=$(__camelcase_split "$DISTRO_NAME") + # Skip setting DISTRO_NAME this time, splitting CamelCase has failed. + # See https://github.com/saltstack/salt-bootstrap/issues/918 + [ "$n" = "$DISTRO_NAME" ] && DISTRO_NAME="" || DISTRO_NAME="$n" + elif [ "$( echo "${DISTRO_NAME}" | grep openSUSE )" != "" ]; then + # lsb_release -si returns "openSUSE Tumbleweed" on openSUSE tumbleweed + # lsb_release -si returns "openSUSE project" on openSUSE 12.3 + # lsb_release -si returns "openSUSE" on openSUSE 15.n + DISTRO_NAME="opensuse" + elif [ "${DISTRO_NAME}" = "SUSE LINUX" ]; then + if [ "$(lsb_release -sd | grep -i opensuse)" != "" ]; then + # openSUSE 12.2 reports SUSE LINUX on lsb_release -si + DISTRO_NAME="opensuse" + else + # lsb_release -si returns "SUSE LINUX" on SLES 11 SP3 + DISTRO_NAME="suse" + fi + elif [ "${DISTRO_NAME}" = "EnterpriseEnterpriseServer" ]; then + # This the Oracle Linux Enterprise ID before ORACLE LINUX 5 UPDATE 3 + DISTRO_NAME="Oracle Linux" + elif [ "${DISTRO_NAME}" = "OracleServer" ]; then + # This the Oracle Linux Server 6.5 + DISTRO_NAME="Oracle Linux" + elif [ "${DISTRO_NAME}" = "AmazonAMI" ] || [ "${DISTRO_NAME}" = "Amazon" ]; then + DISTRO_NAME="Amazon Linux AMI" + elif [ "${DISTRO_NAME}" = "ManjaroLinux" ]; then + DISTRO_NAME="Arch Linux" + elif [ "${DISTRO_NAME}" = "Arch" ]; then + DISTRO_NAME="Arch Linux" + return + fi + rv=$(lsb_release -sr) + [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") + elif [ -f /etc/lsb-release ]; then + # We don't have the lsb_release binary, though, we do have the file it parses + DISTRO_NAME=$(grep DISTRIB_ID /etc/lsb-release | sed -e 's/.*=//') + rv=$(grep DISTRIB_RELEASE /etc/lsb-release | sed -e 's/.*=//') + [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") + fi + + if [ "$DISTRO_NAME" != "" ] && [ "$DISTRO_VERSION" != "" ]; then + # We already have the distribution name and version + return + fi + # shellcheck disable=SC2035,SC2086 + for rsource in $(__sort_release_files "$( + cd /etc && /bin/ls *[_-]release *[_-]version 2>/dev/null | env -i sort | \ + sed -e '/^redhat-release$/d' -e '/^lsb-release$/d'; \ + echo redhat-release lsb-release + )"); do + + [ ! -f "/etc/${rsource}" ] && continue # Does not exist + + n=$(echo "${rsource}" | sed -e 's/[_-]release$//' -e 's/[_-]version$//') + shortname=$(echo "${n}" | tr '[:upper:]' '[:lower:]') + if [ "$shortname" = "debian" ]; then + rv=$(__derive_debian_numeric_version "$(cat /etc/${rsource})") + else + rv=$( (grep VERSION "/etc/${rsource}"; cat "/etc/${rsource}") | grep '[0-9]' | sed -e 'q' ) + fi + [ "${rv}" = "" ] && [ "$shortname" != "arch" ] && continue # There's no version information. Continue to next rsource + v=$(__parse_version_string "$rv") + case $shortname in + redhat ) + if [ "$(grep -E 'CentOS' /etc/${rsource})" != "" ]; then + n="CentOS" + elif [ "$(grep -E 'Scientific' /etc/${rsource})" != "" ]; then + n="Scientific Linux" + elif [ "$(grep -E 'Red Hat Enterprise Linux' /etc/${rsource})" != "" ]; then + n="ed at nterprise inux" + else + n="ed at inux" + fi + ;; + arch ) n="Arch Linux" ;; + alpine ) n="Alpine Linux" ;; + centos ) n="CentOS" ;; + debian ) n="Debian" ;; + ubuntu ) n="Ubuntu" ;; + fedora ) n="Fedora" ;; + suse|opensuse ) n="SUSE" ;; + mandrake*|mandriva ) n="Mandriva" ;; + gentoo ) n="Gentoo" ;; + slackware ) n="Slackware" ;; + turbolinux ) n="TurboLinux" ;; + unitedlinux ) n="UnitedLinux" ;; + void ) n="VoidLinux" ;; + oracle ) n="Oracle Linux" ;; + system ) + while read -r line; do + [ "${n}x" != "systemx" ] && break + case "$line" in + *Amazon*Linux*AMI*) + n="Amazon Linux AMI" + break + esac + done < "/etc/${rsource}" + ;; + os ) + nn="$(__unquote_string "$(grep '^ID=' /etc/os-release | sed -e 's/^ID=\(.*\)$/\1/g')")" + rv="$(__unquote_string "$(grep '^VERSION_ID=' /etc/os-release | sed -e 's/^VERSION_ID=\(.*\)$/\1/g')")" + [ "${rv}" != "" ] && v=$(__parse_version_string "$rv") || v="" + case $(echo "${nn}" | tr '[:upper:]' '[:lower:]') in + alpine ) + n="Alpine Linux" + v="${rv}" + ;; + amzn ) + # Amazon AMI's after 2014.09 match here + n="Amazon Linux AMI" + ;; + arch ) + n="Arch Linux" + v="" # Arch Linux does not provide a version. + ;; + cloudlinux ) + n="Cloud Linux" + ;; + debian ) + n="Debian" + v=$(__derive_debian_numeric_version "$v") + ;; + sles ) + n="SUSE" + v="${rv}" + ;; + opensuse-* ) + n="opensuse" + v="${rv}" + ;; + * ) + n=${nn} + ;; + esac + ;; + * ) n="${n}" ; + esac + DISTRO_NAME=$n + DISTRO_VERSION=$v + break + done +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_python() +# DESCRIPTION: Install a different version of python on a host. Currently this has only been tested on CentOS 6 and +# is considered experimental. +#---------------------------------------------------------------------------------------------------------------------- +__install_python() { + if [ "$_PY_EXE" = "" ]; then + echoerror "Must specify -x with -y to install a specific python version" + exit 1 + fi + + __PACKAGES="$_PY_PKG_VER" + + if [ ${_DISABLE_REPOS} -eq ${BS_FALSE} ]; then + echoinfo "Attempting to install a repo to help provide a separate python package" + echoinfo "$DISTRO_NAME_L" + case "$DISTRO_NAME_L" in + "red_hat"|"centos") + __PYTHON_REPO_URL="https://repo.ius.io/ius-release-el${DISTRO_MAJOR_VERSION}.rpm" + ;; + *) + echoerror "Installing a repo to provide a python package is only supported on Redhat/CentOS. + If a repo is already available, please try running script with -r." + exit 1 + ;; + esac + + echoinfo "Installing IUS repo" + __yum_install_noinput "${__PYTHON_REPO_URL}" || return 1 + fi + + echoinfo "Installing ${__PACKAGES}" + __yum_install_noinput "${__PACKAGES}" || return 1 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_sunos_system_info +# DESCRIPTION: Discover SunOS system info +#---------------------------------------------------------------------------------------------------------------------- +__gather_sunos_system_info() { + if [ -f /sbin/uname ]; then + DISTRO_VERSION=$(/sbin/uname -X | awk '/[kK][eE][rR][nN][eE][lL][iI][dD]/ { print $3 }') + fi + + DISTRO_NAME="" + if [ -f /etc/release ]; then + while read -r line; do + [ "${DISTRO_NAME}" != "" ] && break + case "$line" in + *OpenIndiana*oi_[0-9]*) + DISTRO_NAME="OpenIndiana" + DISTRO_VERSION=$(echo "$line" | sed -nE "s/OpenIndiana(.*)oi_([[:digit:]]+)(.*)/\\2/p") + break + ;; + *OpenSolaris*snv_[0-9]*) + DISTRO_NAME="OpenSolaris" + DISTRO_VERSION=$(echo "$line" | sed -nE "s/OpenSolaris(.*)snv_([[:digit:]]+)(.*)/\\2/p") + break + ;; + *Oracle*Solaris*[0-9]*) + DISTRO_NAME="Oracle Solaris" + DISTRO_VERSION=$(echo "$line" | sed -nE "s/(Oracle Solaris) ([[:digit:]]+)(.*)/\\2/p") + break + ;; + *Solaris*) + DISTRO_NAME="Solaris" + # Let's make sure we not actually on a Joyent's SmartOS VM since some releases + # don't have SmartOS in `/etc/release`, only `Solaris` + if uname -v | grep joyent >/dev/null 2>&1; then + DISTRO_NAME="SmartOS" + fi + break + ;; + *NexentaCore*) + DISTRO_NAME="Nexenta Core" + break + ;; + *SmartOS*) + DISTRO_NAME="SmartOS" + break + ;; + *OmniOS*) + DISTRO_NAME="OmniOS" + DISTRO_VERSION=$(echo "$line" | awk '{print $3}') + _SIMPLIFY_VERSION=$BS_FALSE + break + ;; + esac + done < /etc/release + fi + + if [ "${DISTRO_NAME}" = "" ]; then + DISTRO_NAME="Solaris" + DISTRO_VERSION=$( + echo "${OS_VERSION}" | + sed -e 's;^4\.;1.;' \ + -e 's;^5\.\([0-6]\)[^0-9]*$;2.\1;' \ + -e 's;^5\.\([0-9][0-9]*\).*;\1;' + ) + fi + + if [ "${DISTRO_NAME}" = "SmartOS" ]; then + VIRTUAL_TYPE="smartmachine" + if [ "$(zonename)" = "global" ]; then + VIRTUAL_TYPE="global" + fi + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_bsd_system_info +# DESCRIPTION: Discover OpenBSD, NetBSD and FreeBSD systems information +#---------------------------------------------------------------------------------------------------------------------- +__gather_bsd_system_info() { + DISTRO_NAME=${OS_NAME} + DISTRO_VERSION=$(echo "${OS_VERSION}" | sed -e 's;[()];;' -e 's/-.*$//') +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_osx_system_info +# DESCRIPTION: Discover MacOS X +#---------------------------------------------------------------------------------------------------------------------- +__gather_osx_system_info() { + DISTRO_NAME="MacOSX" + DISTRO_VERSION=$(sw_vers -productVersion) +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __gather_system_info +# DESCRIPTION: Discover which system and distribution we are running. +#---------------------------------------------------------------------------------------------------------------------- +__gather_system_info() { + case ${OS_NAME_L} in + linux ) + __gather_linux_system_info + ;; + sunos ) + __gather_sunos_system_info + ;; + openbsd|freebsd|netbsd ) + __gather_bsd_system_info + ;; + darwin ) + __gather_osx_system_info + ;; + * ) + echoerror "${OS_NAME} not supported."; + exit 1 + ;; + esac + +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __ubuntu_derivatives_translation +# DESCRIPTION: Map Ubuntu derivatives to their Ubuntu base versions. +# If distro has a known Ubuntu base version, use those install +# functions by pretending to be Ubuntu (i.e. change global vars) +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__ubuntu_derivatives_translation() { + UBUNTU_DERIVATIVES="(trisquel|linuxmint|linaro|elementary_os|neon)" + # Mappings + trisquel_6_ubuntu_base="12.04" + linuxmint_13_ubuntu_base="12.04" + linuxmint_17_ubuntu_base="14.04" + linuxmint_18_ubuntu_base="16.04" + linuxmint_19_ubuntu_base="18.04" + linuxmint_20_ubuntu_base="20.04" + linaro_12_ubuntu_base="12.04" + elementary_os_02_ubuntu_base="12.04" + neon_16_ubuntu_base="16.04" + neon_18_ubuntu_base="18.04" + neon_20_ubuntu_base="20.04" + + # Translate Ubuntu derivatives to their base Ubuntu version + match=$(echo "$DISTRO_NAME_L" | grep -E ${UBUNTU_DERIVATIVES}) + + if [ "${match}" != "" ]; then + case $match in + "elementary_os") + _major=$(echo "$DISTRO_VERSION" | sed 's/\.//g') + ;; + "linuxmint") + export LSB_ETC_LSB_RELEASE=/etc/upstream-release/lsb-release + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + ;; + *) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + ;; + esac + + _ubuntu_version=$(eval echo "\$${match}_${_major}_ubuntu_base") + + if [ "$_ubuntu_version" != "" ]; then + echodebug "Detected Ubuntu $_ubuntu_version derivative" + DISTRO_NAME_L="ubuntu" + DISTRO_VERSION="$_ubuntu_version" + fi + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_dpkg_architecture +# DESCRIPTION: Determine the primary architecture for packages to install on Debian and derivatives +# and issue all necessary error messages. +#---------------------------------------------------------------------------------------------------------------------- +__check_dpkg_architecture() { + if __check_command_exists dpkg; then + DPKG_ARCHITECTURE="$(dpkg --print-architecture)" + else + echoerror "dpkg: command not found." + return 1 + fi + + __REPO_ARCH="$DPKG_ARCHITECTURE" + __REPO_ARCH_DEB='deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg]' + __return_code=0 + + case $DPKG_ARCHITECTURE in + "i386") + error_msg="$_REPO_URL likely doesn't have all required 32-bit packages for $DISTRO_NAME $DISTRO_MAJOR_VERSION." + # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location + __REPO_ARCH="amd64" + ;; + "amd64") + error_msg="" + ;; + "arm64") + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + warn_msg="Support for arm64 is experimental, make sure the custom repository used has the expected structure and contents." + else + # Saltstack official repository does not yet have arm64 metadata, + # use amd64 repositories on arm64, since all pkgs are arch-independent + __REPO_ARCH="amd64" + __REPO_ARCH_DEB="deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg arch=$__REPO_ARCH]" + warn_msg="Support for arm64 packages is experimental and might rely on architecture-independent packages from the amd64 repository." + fi + error_msg="" + ;; + "armhf") + if [ "$DISTRO_NAME_L" = "ubuntu" ] || [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then + error_msg="Support for armhf packages at $_REPO_URL is limited to Debian/Raspbian 8 platforms." + __return_code=1 + else + error_msg="" + fi + ;; + *) + error_msg="$_REPO_URL doesn't have packages for your system architecture: $DPKG_ARCHITECTURE." + __return_code=1 + ;; + esac + + if [ "${warn_msg:-}" != "" ]; then + # AArch64: Do not fail at this point, but warn the user about experimental support + # See https://github.com/saltstack/salt-bootstrap/issues/1240 + echowarn "${warn_msg}" + fi + if [ "${error_msg}" != "" ]; then + echoerror "${error_msg}" + if [ "$ITYPE" != "git" ]; then + echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2017.7.2." + echoerror "It may be necessary to use git installation mode with pip and disable the SaltStack apt repository." + echoerror "For example:" + echoerror " sh ${__ScriptName} -r -P git v2017.7.2" + fi + fi + + if [ "${__return_code}" -eq 0 ]; then + return 0 + else + return 1 + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __ubuntu_codename_translation +# DESCRIPTION: Map Ubuntu major versions to their corresponding codenames +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__ubuntu_codename_translation() { + case $DISTRO_MINOR_VERSION in + "04") + _april="yes" + ;; + "10") + _april="" + ;; + *) + _april="yes" + ;; + esac + + case $DISTRO_MAJOR_VERSION in + "12") + DISTRO_CODENAME="precise" + ;; + "14") + DISTRO_CODENAME="trusty" + ;; + "16") + DISTRO_CODENAME="xenial" + ;; + "18") + DISTRO_CODENAME="bionic" + ;; + "20") + DISTRO_CODENAME="focal" + ;; + "21") + DISTRO_CODENAME="hirsute" + ;; + *) + DISTRO_CODENAME="trusty" + ;; + esac +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __debian_derivatives_translation +# DESCRIPTION: Map Debian derivatives to their Debian base versions. +# If distro has a known Debian base version, use those install +# functions by pretending to be Debian (i.e. change global vars) +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__debian_derivatives_translation() { + # If the file does not exist, return + [ ! -f /etc/os-release ] && return + + DEBIAN_DERIVATIVES="(cumulus|devuan|kali|linuxmint|raspbian|bunsenlabs|turnkey)" + # Mappings + cumulus_2_debian_base="7.0" + cumulus_3_debian_base="8.0" + cumulus_4_debian_base="10.0" + devuan_1_debian_base="8.0" + devuan_2_debian_base="9.0" + kali_1_debian_base="7.0" + linuxmint_1_debian_base="8.0" + raspbian_8_debian_base="8.0" + raspbian_9_debian_base="9.0" + raspbian_10_debian_base="10.0" + bunsenlabs_9_debian_base="9.0" + turnkey_9_debian_base="9.0" + + # Translate Debian derivatives to their base Debian version + match=$(echo "$DISTRO_NAME_L" | grep -E ${DEBIAN_DERIVATIVES}) + + if [ "${match}" != "" ]; then + case $match in + cumulus*) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="cumulus" + ;; + devuan) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="devuan" + ;; + kali) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="kali" + ;; + linuxmint) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="linuxmint" + ;; + raspbian) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="raspbian" + ;; + bunsenlabs) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="bunsenlabs" + ;; + turnkey) + _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + _debian_derivative="turnkey" + ;; + esac + + _debian_version=$(eval echo "\$${_debian_derivative}_${_major}_debian_base" 2>/dev/null) + + if [ "$_debian_version" != "" ]; then + echodebug "Detected Debian $_debian_version derivative" + DISTRO_NAME_L="debian" + DISTRO_VERSION="$_debian_version" + DISTRO_MAJOR_VERSION="$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g')" + fi + fi +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __debian_codename_translation +# DESCRIPTION: Map Debian major versions to their corresponding code names +#---------------------------------------------------------------------------------------------------------------------- +# shellcheck disable=SC2034 +__debian_codename_translation() { + + case $DISTRO_MAJOR_VERSION in + "9") + DISTRO_CODENAME="stretch" + ;; + "10") + DISTRO_CODENAME="buster" + ;; + "11") + DISTRO_CODENAME="bullseye" + ;; + *) + DISTRO_CODENAME="stretch" + ;; + esac +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_end_of_life_versions +# DESCRIPTION: Check for end of life distribution versions +#---------------------------------------------------------------------------------------------------------------------- +__check_end_of_life_versions() { + case "${DISTRO_NAME_L}" in + debian) + # Debian versions below 9 are not supported + if [ "$DISTRO_MAJOR_VERSION" -lt 9 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://wiki.debian.org/DebianReleases" + exit 1 + fi + ;; + + ubuntu) + # Ubuntu versions not supported + # + # < 16.04 + # = 16.10 + # = 17.04, 17.10 + # = 18.10 + # = 19.04, 19.10 + if [ "$DISTRO_MAJOR_VERSION" -lt 16 ] || \ + [ "$DISTRO_MAJOR_VERSION" -eq 17 ] || \ + [ "$DISTRO_MAJOR_VERSION" -eq 19 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 18 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://wiki.ubuntu.com/Releases" + exit 1 + fi + ;; + + opensuse) + # openSUSE versions not supported + # + # <= 13.X + # <= 42.2 + if [ "$DISTRO_MAJOR_VERSION" -lt 15 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 42 ] && [ "$DISTRO_MINOR_VERSION" -le 2 ]; }; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://en.opensuse.org/Lifetime" + exit 1 + fi + ;; + + suse) + # SuSE versions not supported + # + # < 11 SP4 + # < 12 SP2 + # < 15 SP1 + SUSE_PATCHLEVEL=$(awk -F'=' '/VERSION_ID/ { print $2 }' /etc/os-release | grep -oP "\.\K\w+") + if [ "${SUSE_PATCHLEVEL}" = "" ]; then + SUSE_PATCHLEVEL="00" + fi + if [ "$DISTRO_MAJOR_VERSION" -lt 11 ] || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 11 ] && [ "$SUSE_PATCHLEVEL" -lt 04 ]; } || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 15 ] && [ "$SUSE_PATCHLEVEL" -lt 01 ]; } || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$SUSE_PATCHLEVEL" -lt 02 ]; }; then + echoerror "Versions lower than SuSE 11 SP4, 12 SP2 or 15 SP1 are not supported." + echoerror "Please consider upgrading to the next stable" + echoerror " https://www.suse.com/lifecycle/" + exit 1 + fi + ;; + + fedora) + # Fedora lower than 33 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 33 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://fedoraproject.org/wiki/Releases" + exit 1 + fi + ;; + + centos) + # CentOS versions lower than 7 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://wiki.centos.org/Download" + exit 1 + fi + ;; + + red_hat*linux) + # Red Hat (Enterprise) Linux versions lower than 7 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://access.redhat.com/support/policy/updates/errata/" + exit 1 + fi + ;; + + oracle*linux) + # Oracle Linux versions lower than 7 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " http://www.oracle.com/us/support/library/elsp-lifetime-069338.pdf" + exit 1 + fi + ;; + + scientific*linux) + # Scientific Linux versions lower than 7 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://www.scientificlinux.org/downloads/sl-versions/" + exit 1 + fi + ;; + + cloud*linux) + # Cloud Linux versions lower than 7 are no longer supported + if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://docs.cloudlinux.com/index.html?cloudlinux_life-cycle.html" + exit 1 + fi + ;; + + amazon*linux*ami) + # Amazon Linux versions 2018.XX and lower no longer supported + # Except for Amazon Linux 2, which reset the major version counter + if [ "$DISTRO_MAJOR_VERSION" -le 2018 ] && [ "$DISTRO_MAJOR_VERSION" -gt 10 ]; then + echoerror "End of life distributions are not supported." + echoerror "Please consider upgrading to the next stable. See:" + echoerror " https://aws.amazon.com/amazon-linux-ami/" + exit 1 + fi + ;; + + freebsd) + # FreeBSD versions lower than 11 are EOL + if [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then + echoerror "Versions lower than FreeBSD 11 are EOL and no longer supported." + exit 1 + fi + ;; + + *) + ;; + esac +} + + +__gather_system_info + +echo +echoinfo "System Information:" +echoinfo " CPU: ${CPU_VENDOR_ID}" +echoinfo " CPU Arch: ${CPU_ARCH}" +echoinfo " OS Name: ${OS_NAME}" +echoinfo " OS Version: ${OS_VERSION}" +echoinfo " Distribution: ${DISTRO_NAME} ${DISTRO_VERSION}" +echo + +# Simplify distro name naming on functions +DISTRO_NAME_L=$(echo "$DISTRO_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-zA-Z0-9_ ]//g' | sed -Ee 's/([[:space:]])+/_/g' | sed -Ee 's/tumbleweed//' ) + +# Simplify version naming on functions +if [ "$DISTRO_VERSION" = "" ] || [ ${_SIMPLIFY_VERSION} -eq $BS_FALSE ]; then + DISTRO_MAJOR_VERSION="" + DISTRO_MINOR_VERSION="" + PREFIXED_DISTRO_MAJOR_VERSION="" + PREFIXED_DISTRO_MINOR_VERSION="" +else + DISTRO_MAJOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') + DISTRO_MINOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).\([0-9]*\).*/\2/g') + PREFIXED_DISTRO_MAJOR_VERSION="_${DISTRO_MAJOR_VERSION}" + if [ "${PREFIXED_DISTRO_MAJOR_VERSION}" = "_" ]; then + PREFIXED_DISTRO_MAJOR_VERSION="" + fi + PREFIXED_DISTRO_MINOR_VERSION="_${DISTRO_MINOR_VERSION}" + if [ "${PREFIXED_DISTRO_MINOR_VERSION}" = "_" ]; then + PREFIXED_DISTRO_MINOR_VERSION="" + fi +fi + +# For Ubuntu derivatives, pretend to be their Ubuntu base version +__ubuntu_derivatives_translation + +# For Debian derivates, pretend to be their Debian base version +__debian_derivatives_translation + +# Fail soon for end of life versions +__check_end_of_life_versions + +echodebug "Binaries will be searched using the following \$PATH: ${PATH}" + +# Let users know that we'll use a proxy +if [ "${_HTTP_PROXY}" != "" ]; then + echoinfo "Using http proxy $_HTTP_PROXY" +fi + +# Let users know what's going to be installed/configured +if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing minion" + else + echoinfo "Configuring minion" + fi +fi + +if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing master" + else + echoinfo "Configuring master" + fi +fi + +if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing syndic" + else + echoinfo "Configuring syndic" + fi +fi + +if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Installing salt-cloud and required python-libcloud package" +fi + +if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echoinfo "Daemons will not be started" +fi + +if [ "${DISTRO_NAME_L}" = "ubuntu" ]; then + # For ubuntu versions, obtain the codename from the release version + __ubuntu_codename_translation +elif [ "${DISTRO_NAME_L}" = "debian" ]; then + # For debian versions, obtain the codename from the release version + __debian_codename_translation +fi + +if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(debian|ubuntu|centos|gentoo|red_hat|oracle|scientific|amazon|fedora|macosx)')" = "" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]; then + echoerror "${DISTRO_NAME} does not have major version pegged packages support" + exit 1 +fi + +# Only RedHat based distros have testing support +if [ "${ITYPE}" = "testing" ]; then + if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(centos|red_hat|amazon|oracle)')" = "" ]; then + echoerror "${DISTRO_NAME} does not have testing packages support" + exit 1 + fi + _EPEL_REPO="epel-testing" +fi + +# Only Ubuntu has support for installing to virtualenvs +if [ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$_VIRTUALENV_DIR" != "null" ]; then + echoerror "${DISTRO_NAME} does not have -V support" + exit 1 +fi + +# Only Ubuntu has support for pip installing all packages +if [ "${DISTRO_NAME_L}" != "ubuntu" ] && [ $_PIP_ALL -eq $BS_TRUE ]; then + echoerror "${DISTRO_NAME} does not have -a support" + exit 1 +fi + +if [ "$ITYPE" = "git" ]; then + + if [ "${GIT_REV}" = "master" ]; then + _POST_NEON_INSTALL=$BS_TRUE + __TAG_REGEX_MATCH="MATCH" + else + case ${OS_NAME_L} in + openbsd|freebsd|netbsd|darwin ) + __NEW_VS_TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?3[0-9]{3}(\.[0-9]{1,2})?).*$/MATCH/') + if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then + _POST_NEON_INSTALL=$BS_TRUE + __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" + if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then + # We do this to properly clone tags + GIT_REV="v${GIT_REV}" + fi + echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" + else + __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?[0-9]{1,4}\.[0-9]{1,2})(\.[0-9]{1,2})?.*$/MATCH/') + echodebug "Pre Neon Tag Regex Match On: ${GIT_REV}" + fi + ;; + * ) + __NEW_VS_TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?3[[:digit:]]\{3\}\(\.[[:digit:]]\{1,2\}\)\?\).*$/MATCH/') + if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then + _POST_NEON_INSTALL=$BS_TRUE + __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" + if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then + # We do this to properly clone tags + GIT_REV="v${GIT_REV}" + fi + echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" + else + __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?[[:digit:]]\{1,4\}\.[[:digit:]]\{1,2\}\)\(\.[[:digit:]]\{1,2\}\)\?.*$/MATCH/') + echodebug "Pre Neon Tag Regex Match On: ${GIT_REV}" + fi + ;; + esac + fi + + if [ "$_POST_NEON_INSTALL" -eq $BS_TRUE ]; then + echo + echowarn "Post Neon git based installations will always install salt" + echowarn "and its dependencies using pip which will be upgraded to" + echowarn "at least v${_MINIMUM_PIP_VERSION}, and, in case the setuptools version is also" + echowarn "too old, it will be upgraded to at least v${_MINIMUM_SETUPTOOLS_VERSION}" + echo + echowarn "You have 10 seconds to cancel and stop the bootstrap process..." + echo + sleep 10 + _PIP_ALLOWED=$BS_TRUE + fi +fi + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __function_defined +# DESCRIPTION: Checks if a function is defined within this scripts scope +# PARAMETERS: function name +# RETURNS: 0 or 1 as in defined or not defined +#---------------------------------------------------------------------------------------------------------------------- +__function_defined() { + FUNC_NAME=$1 + if [ "$(command -v "$FUNC_NAME")" != "" ]; then + echoinfo "Found function $FUNC_NAME" + return 0 + fi + echodebug "$FUNC_NAME not found...." + return 1 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __wait_for_apt +# DESCRIPTION: Check if any apt, apt-get, aptitude, or dpkg processes are running before +# calling these again. This is useful when these process calls are part of +# a boot process, such as on AWS AMIs. This func will wait until the boot +# process is finished so the script doesn't exit on a locked proc. +#---------------------------------------------------------------------------------------------------------------------- +__wait_for_apt(){ + # Timeout set at 15 minutes + WAIT_TIMEOUT=900 + + # Run our passed in apt command + "${@}" 2>"$APT_ERR" + APT_RETURN=$? + + # Make sure we're not waiting on a lock + while [ $APT_RETURN -ne 0 ] && grep -q '^E: Could not get lock' "$APT_ERR"; do + echoinfo "Aware of the lock. Patiently waiting $WAIT_TIMEOUT more seconds..." + sleep 1 + WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1)) + + if [ "$WAIT_TIMEOUT" -eq 0 ]; then + echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long." + echoerror "Bootstrap script cannot proceed. Aborting." + return 1 + else + "${@}" 2>"$APT_ERR" + APT_RETURN=$? + fi + done + + return $APT_RETURN +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __apt_get_install_noinput +# DESCRIPTION: (DRY) apt-get install with noinput options +# PARAMETERS: packages +#---------------------------------------------------------------------------------------------------------------------- +__apt_get_install_noinput() { + __wait_for_apt apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $? +} # ---------- end of function __apt_get_install_noinput ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __apt_get_upgrade_noinput +# DESCRIPTION: (DRY) apt-get upgrade with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__apt_get_upgrade_noinput() { + __wait_for_apt apt-get upgrade -y -o DPkg::Options::=--force-confold; return $? +} # ---------- end of function __apt_get_upgrade_noinput ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __temp_gpg_pub +# DESCRIPTION: Create a temporary file for downloading a GPG public key. +#---------------------------------------------------------------------------------------------------------------------- +__temp_gpg_pub() { + if __check_command_exists mktemp; then + tempfile="$(mktemp /tmp/salt-gpg-XXXXXXXX.pub 2>/dev/null)" + + if [ -z "$tempfile" ]; then + echoerror "Failed to create temporary file in /tmp" + return 1 + fi + else + tempfile="/tmp/salt-gpg-$$.pub" + fi + + echo $tempfile +} # ----------- end of function __temp_gpg_pub ----------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __apt_key_fetch +# DESCRIPTION: Download and import GPG public key for "apt-secure" +# PARAMETERS: url +#---------------------------------------------------------------------------------------------------------------------- +__apt_key_fetch() { + url=$1 + + tempfile="$(__temp_gpg_pub)" + + __fetch_url "$tempfile" "$url" || return 1 + cp -f "$tempfile" /usr/share/keyrings/salt-archive-keyring.gpg && chmod 644 /usr/share/keyrings/salt-archive-keyring.gpg || return 1 + rm -f "$tempfile" + + return 0 +} # ---------- end of function __apt_key_fetch ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __rpm_import_gpg +# DESCRIPTION: Download and import GPG public key to rpm database +# PARAMETERS: url +#---------------------------------------------------------------------------------------------------------------------- +__rpm_import_gpg() { + url=$1 + + tempfile="$(__temp_gpg_pub)" + + __fetch_url "$tempfile" "$url" || return 1 + + # At least on CentOS 8, a missing newline at the end causes: + # error: /tmp/salt-gpg-n1gKUb1u.pub: key 1 not an armored public key. + # shellcheck disable=SC1003,SC2086 + sed -i -e '$a\' $tempfile + + rpm --import "$tempfile" || return 1 + rm -f "$tempfile" + + return 0 +} # ---------- end of function __rpm_import_gpg ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __yum_install_noinput +# DESCRIPTION: (DRY) yum install with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__yum_install_noinput() { + + ENABLE_EPEL_CMD="" + # Skip Amazon Linux for the first round, since EPEL is no longer required. + # See issue #724 + if [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DISTRO_NAME_L" != "amazon_linux_ami" ]; then + ENABLE_EPEL_CMD="--enablerepo=${_EPEL_REPO}" + fi + + if [ "$DISTRO_NAME_L" = "oracle_linux" ]; then + # We need to install one package at a time because --enablerepo=X disables ALL OTHER REPOS!!!! + for package in "${@}"; do + yum -y install "${package}" || yum -y install "${package}" ${ENABLE_EPEL_CMD} || return $? + done + else + yum -y install "${@}" ${ENABLE_EPEL_CMD} || return $? + fi +} # ---------- end of function __yum_install_noinput ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __dnf_install_noinput +# DESCRIPTION: (DRY) dnf install with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__dnf_install_noinput() { + + dnf -y install "${@}" || return $? +} # ---------- end of function __dnf_install_noinput ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __git_clone_and_checkout +# DESCRIPTION: (DRY) Helper function to clone and checkout salt to a +# specific revision. +#---------------------------------------------------------------------------------------------------------------------- +__git_clone_and_checkout() { + + echodebug "Installed git version: $(git --version | awk '{ print $3 }')" + # Turn off SSL verification if -I flag was set for insecure downloads + if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then + export GIT_SSL_NO_VERIFY=1 + fi + + __SALT_GIT_CHECKOUT_PARENT_DIR=$(dirname "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null) + __SALT_GIT_CHECKOUT_PARENT_DIR="${__SALT_GIT_CHECKOUT_PARENT_DIR:-/tmp/git}" + __SALT_CHECKOUT_REPONAME="$(basename "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)" + __SALT_CHECKOUT_REPONAME="${__SALT_CHECKOUT_REPONAME:-salt}" + [ -d "${__SALT_GIT_CHECKOUT_PARENT_DIR}" ] || mkdir "${__SALT_GIT_CHECKOUT_PARENT_DIR}" + # shellcheck disable=SC2164 + cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" + if [ -d "${_SALT_GIT_CHECKOUT_DIR}" ]; then + echodebug "Found a checked out Salt repository" + # shellcheck disable=SC2164 + cd "${_SALT_GIT_CHECKOUT_DIR}" + echodebug "Fetching git changes" + git fetch || return 1 + # Tags are needed because of salt's versioning, also fetch that + echodebug "Fetching git tags" + git fetch --tags || return 1 + + # If we have the SaltStack remote set as upstream, we also need to fetch the tags from there + if [ "$(git remote -v | grep $_SALTSTACK_REPO_URL)" != "" ]; then + echodebug "Fetching upstream(SaltStack's Salt repository) git tags" + git fetch --tags upstream + else + echoinfo "Adding SaltStack's Salt repository as a remote" + git remote add upstream "$_SALTSTACK_REPO_URL" + echodebug "Fetching upstream(SaltStack's Salt repository) git tags" + git fetch --tags upstream + fi + + echodebug "Hard reseting the cloned repository to ${GIT_REV}" + git reset --hard "$GIT_REV" || return 1 + + # Just calling `git reset --hard $GIT_REV` on a branch name that has + # already been checked out will not update that branch to the upstream + # HEAD; instead it will simply reset to itself. Check the ref to see + # if it is a branch name, check out the branch, and pull in the + # changes. + if git branch -a | grep -q "${GIT_REV}"; then + echodebug "Rebasing the cloned repository branch" + git pull --rebase || return 1 + fi + else + if [ "$_FORCE_SHALLOW_CLONE" -eq "${BS_TRUE}" ]; then + echoinfo "Forced shallow cloning of git repository." + __SHALLOW_CLONE=$BS_TRUE + elif [ "$__TAG_REGEX_MATCH" = "MATCH" ]; then + echoinfo "Git revision matches a Salt version tag, shallow cloning enabled." + __SHALLOW_CLONE=$BS_TRUE + else + echowarn "The git revision being installed does not match a Salt version tag. Shallow cloning disabled" + __SHALLOW_CLONE=$BS_FALSE + fi + + if [ "$__SHALLOW_CLONE" -eq $BS_TRUE ]; then + # Let's try shallow cloning to speed up. + # Test for "--single-branch" option introduced in git 1.7.10, the minimal version of git where the shallow + # cloning we need actually works + if [ "$(git clone 2>&1 | grep 'single-branch')" != "" ]; then + # The "--single-branch" option is supported, attempt shallow cloning + echoinfo "Attempting to shallow clone $GIT_REV from Salt's repository ${_SALT_REPO_URL}" + if git clone --depth 1 --branch "$GIT_REV" "$_SALT_REPO_URL" "$__SALT_CHECKOUT_REPONAME"; then + # shellcheck disable=SC2164 + cd "${_SALT_GIT_CHECKOUT_DIR}" + __SHALLOW_CLONE=$BS_TRUE + else + # Shallow clone above failed(missing upstream tags???), let's resume the old behaviour. + echowarn "Failed to shallow clone." + echoinfo "Resuming regular git clone and remote SaltStack repository addition procedure" + __SHALLOW_CLONE=$BS_FALSE + fi + else + echodebug "Shallow cloning not possible. Required git version not met." + __SHALLOW_CLONE=$BS_FALSE + fi + fi + + if [ "$__SHALLOW_CLONE" -eq $BS_FALSE ]; then + git clone "$_SALT_REPO_URL" "$__SALT_CHECKOUT_REPONAME" || return 1 + # shellcheck disable=SC2164 + cd "${_SALT_GIT_CHECKOUT_DIR}" + + if ! echo "$_SALT_REPO_URL" | grep -q -F -w "${_SALTSTACK_REPO_URL#*://}"; then + # We need to add the saltstack repository as a remote and fetch tags for proper versioning + echoinfo "Adding SaltStack's Salt repository as a remote" + git remote add upstream "$_SALTSTACK_REPO_URL" || return 1 + + echodebug "Fetching upstream (SaltStack's Salt repository) git tags" + git fetch --tags upstream || return 1 + + # Check if GIT_REV is a remote branch or just a commit hash + if git branch -r | grep -q -F -w "origin/$GIT_REV"; then + GIT_REV="origin/$GIT_REV" + fi + fi + + echodebug "Checking out $GIT_REV" + git checkout "$GIT_REV" || return 1 + fi + + fi + + echoinfo "Cloning Salt's git repository succeeded" + return 0 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __copyfile +# DESCRIPTION: Simple function to copy files. Overrides if asked. +#---------------------------------------------------------------------------------------------------------------------- +__copyfile() { + overwrite=$_FORCE_OVERWRITE + if [ $# -eq 2 ]; then + sfile=$1 + dfile=$2 + elif [ $# -eq 3 ]; then + sfile=$1 + dfile=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for __copyfile()" + echoinfo "USAGE: __copyfile OR __copyfile " + exit 1 + fi + + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + # If the destination is a directory, let's make it a full path so the logic + # below works as expected + if [ -d "$dfile" ]; then + echodebug "The passed destination ($dfile) is a directory" + dfile="${dfile}/$(basename "$sfile")" + echodebug "Full destination path is now: $dfile" + fi + + if [ ! -f "$dfile" ]; then + # The destination file does not exist, copy + echodebug "Copying $sfile to $dfile" + cp "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overwriting $dfile with $sfile" + cp -f "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -ne $BS_TRUE ]; then + echodebug "Not overwriting $dfile with $sfile" + fi + return 0 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __movefile +# DESCRIPTION: Simple function to move files. Overrides if asked. +#---------------------------------------------------------------------------------------------------------------------- +__movefile() { + overwrite=$_FORCE_OVERWRITE + if [ $# -eq 2 ]; then + sfile=$1 + dfile=$2 + elif [ $# -eq 3 ]; then + sfile=$1 + dfile=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for __movefile()" + echoinfo "USAGE: __movefile OR __movefile " + exit 1 + fi + + if [ $_KEEP_TEMP_FILES -eq $BS_TRUE ]; then + # We're being told not to move files, instead copy them so we can keep + # them around + echodebug "Since BS_KEEP_TEMP_FILES=1 we're copying files instead of moving them" + __copyfile "$sfile" "$dfile" "$overwrite" + return $? + fi + + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + # If the destination is a directory, let's make it a full path so the logic + # below works as expected + if [ -d "$dfile" ]; then + echodebug "The passed destination($dfile) is a directory" + dfile="${dfile}/$(basename "$sfile")" + echodebug "Full destination path is now: $dfile" + fi + + if [ ! -f "$dfile" ]; then + # The destination file does not exist, move + echodebug "Moving $sfile to $dfile" + mv "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overriding $dfile with $sfile" + mv -f "$sfile" "$dfile" || return 1 + elif [ -f "$dfile" ] && [ "$overwrite" -ne $BS_TRUE ]; then + echodebug "Not overriding $dfile with $sfile" + fi + + return 0 +} + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __linkfile +# DESCRIPTION: Simple function to create symlinks. Overrides if asked. Accepts globs. +#---------------------------------------------------------------------------------------------------------------------- +__linkfile() { + overwrite=$_FORCE_OVERWRITE + if [ $# -eq 2 ]; then + target=$1 + linkname=$2 + elif [ $# -eq 3 ]; then + target=$1 + linkname=$2 + overwrite=$3 + else + echoerror "Wrong number of arguments for __linkfile()" + echoinfo "USAGE: __linkfile OR __linkfile " + exit 1 + fi + + for sfile in $target; do + # Does the source file exist? + if [ ! -f "$sfile" ]; then + echowarn "$sfile does not exist!" + return 1 + fi + + # If the destination is a directory, let's make it a full path so the logic + # below works as expected + if [ -d "$linkname" ]; then + echodebug "The passed link name ($linkname) is a directory" + linkname="${linkname}/$(basename "$sfile")" + echodebug "Full destination path is now: $linkname" + fi + + if [ ! -e "$linkname" ]; then + # The destination file does not exist, create link + echodebug "Creating $linkname symlink pointing to $sfile" + ln -s "$sfile" "$linkname" || return 1 + elif [ -e "$linkname" ] && [ "$overwrite" -eq $BS_TRUE ]; then + # The destination exist and we're overwriting + echodebug "Overwriting $linkname symlink to point on $sfile" + ln -sf "$sfile" "$linkname" || return 1 + elif [ -e "$linkname" ] && [ "$overwrite" -ne $BS_TRUE ]; then + echodebug "Not overwriting $linkname symlink to point on $sfile" + fi + done + + return 0 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __overwriteconfig() +# DESCRIPTION: Simple function to overwrite master or minion config files. +#---------------------------------------------------------------------------------------------------------------------- +__overwriteconfig() { + if [ $# -eq 2 ]; then + target=$1 + json=$2 + else + echoerror "Wrong number of arguments for __convert_json_to_yaml_str()" + echoinfo "USAGE: __convert_json_to_yaml_str " + exit 1 + fi + + # Make a tempfile to dump any python errors into. + if __check_command_exists mktemp; then + tempfile="$(mktemp /tmp/salt-config-XXXXXXXX 2>/dev/null)" + + if [ -z "$tempfile" ]; then + echoerror "Failed to create temporary file in /tmp" + return 1 + fi + else + tempfile="/tmp/salt-config-$$" + fi + + if [ -n "$_PY_EXE" ]; then + good_python="$_PY_EXE" + # If python does not have yaml installed we're on Arch and should use python2 + elif python -c "import yaml" 2> /dev/null; then + good_python=python + else + good_python=python2 + fi + + # Convert json string to a yaml string and write it to config file. Output is dumped into tempfile. + "$good_python" -c "import json; import yaml; jsn=json.loads('$json'); yml=yaml.safe_dump(jsn, line_break='\\n', default_flow_style=False); config_file=open('$target', 'w'); config_file.write(yml); config_file.close();" 2>$tempfile + + # No python errors output to the tempfile + if [ ! -s "$tempfile" ]; then + rm -f "$tempfile" + return 0 + fi + + # Errors are present in the tempfile - let's expose them to the user. + fullerror=$(cat "$tempfile") + echodebug "$fullerror" + echoerror "Python error encountered. This is likely due to passing in a malformed JSON string. Please use -D to see stacktrace." + + rm -f "$tempfile" + + return 1 + +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_systemd +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_systemd() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + if [ "$(systemctl is-enabled "${servicename}")" = "enabled" ]; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_systemd ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_upstart +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_upstart() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # Check if service is enabled to start at boot + if initctl list | grep "${servicename}" > /dev/null 2>&1; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_upstart ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_sysvinit +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_sysvinit() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + if [ "$(LC_ALL=C /sbin/chkconfig --list | grep "\\<${servicename}\\>" | grep '[2-5]:on')" != "" ]; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_sysvinit ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_debian +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_debian() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # Check if the service is going to be started at any runlevel, fixes bootstrap in container (Docker, LXC) + if ls /etc/rc?.d/S*"${servicename}" >/dev/null 2>&1; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_debian ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_openbsd +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_openbsd() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # shellcheck disable=SC2086,SC2046,SC2144 + if rcctl get ${servicename} status; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_openbsd ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_services_openrc +# DESCRIPTION: Return 0 or 1 in case the service is enabled or not +# PARAMETERS: servicename +#---------------------------------------------------------------------------------------------------------------------- +__check_services_openrc() { + if [ $# -eq 0 ]; then + echoerror "You need to pass a service name to check!" + exit 1 + elif [ $# -ne 1 ]; then + echoerror "You need to pass a service name to check as the single argument to the function" + fi + + servicename=$1 + echodebug "Checking if service ${servicename} is enabled" + + # shellcheck disable=SC2086,SC2046,SC2144 + if rc-status $(rc-status -r) | tail -n +2 | grep -q "\\<$servicename\\>"; then + echodebug "Service ${servicename} is enabled" + return 0 + else + echodebug "Service ${servicename} is NOT enabled" + return 1 + fi +} # ---------- end of function __check_services_openrc ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __create_virtualenv +# DESCRIPTION: Return 0 or 1 depending on successful creation of virtualenv +#---------------------------------------------------------------------------------------------------------------------- +__create_virtualenv() { + if [ ! -d "$_VIRTUALENV_DIR" ]; then + echoinfo "Creating virtualenv ${_VIRTUALENV_DIR}" + if [ $_PIP_ALL -eq $BS_TRUE ]; then + virtualenv --no-site-packages "${_VIRTUALENV_DIR}" || return 1 + else + virtualenv --system-site-packages "${_VIRTUALENV_DIR}" || return 1 + fi + fi + return 0 +} # ---------- end of function __create_virtualenv ---------- + + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __activate_virtualenv +# DESCRIPTION: Return 0 or 1 depending on successful activation of virtualenv +#---------------------------------------------------------------------------------------------------------------------- +__activate_virtualenv() { + set +o nounset + # Is virtualenv empty + if [ -z "$_VIRTUALENV_DIR" ]; then + __create_virtualenv || return 1 + # shellcheck source=/dev/null + . "${_VIRTUALENV_DIR}/bin/activate" || return 1 + echoinfo "Activated virtualenv ${_VIRTUALENV_DIR}" + fi + set -o nounset + return 0 +} # ---------- end of function __activate_virtualenv ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_pip_pkgs +# DESCRIPTION: Return 0 or 1 if successfully able to install pip packages. Can provide a different python version to +# install pip packages with. If $py_ver is not specified it will use the default python version. +# PARAMETERS: pkgs, py_ver +#---------------------------------------------------------------------------------------------------------------------- + +__install_pip_pkgs() { + _pip_pkgs="$1" + _py_exe="$2" + _py_pkg=$(echo "$_py_exe" | sed -E "s/\\.//g") + _pip_cmd="${_py_exe} -m pip" + + if [ "${_py_exe}" = "" ]; then + _py_exe='python' + fi + + __check_pip_allowed + + # Install pip and pip dependencies + if ! __check_command_exists "${_pip_cmd} --version"; then + __PACKAGES="${_py_pkg}-setuptools ${_py_pkg}-pip gcc" + # shellcheck disable=SC2086 + if [ "$DISTRO_NAME_L" = "debian" ] || [ "$DISTRO_NAME_L" = "ubuntu" ];then + __PACKAGES="${__PACKAGES} ${_py_pkg}-dev" + __apt_get_install_noinput ${__PACKAGES} || return 1 + else + __PACKAGES="${__PACKAGES} ${_py_pkg}-devel" + if [ "$DISTRO_NAME_L" = "fedora" ];then + __dnf_install_noinput ${__PACKAGES} || return 1 + else + __yum_install_noinput ${__PACKAGES} || return 1 + fi + fi + + fi + + echoinfo "Installing pip packages: ${_pip_pkgs} using ${_py_exe}" + # shellcheck disable=SC2086 + ${_pip_cmd} install ${_pip_pkgs} || return 1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_tornado_pip +# PARAMETERS: python executable +# DESCRIPTION: Return 0 or 1 if successfully able to install tornado<5.0 +#---------------------------------------------------------------------------------------------------------------------- +__install_tornado_pip() { + # OS needs tornado <5.0 from pip + __check_pip_allowed "You need to allow pip based installations (-P) for Tornado <5.0 in order to install Salt on Python 3" + ## install pip if its not installed and install tornado + __install_pip_pkgs "tornado<5.0" "${1}" || return 1 +} + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_pip_deps +# DESCRIPTION: Return 0 or 1 if successfully able to install pip packages via requirements file +# PARAMETERS: requirements_file +#---------------------------------------------------------------------------------------------------------------------- +__install_pip_deps() { + # Install virtualenv to system pip before activating virtualenv if thats going to be used + # We assume pip pkg is installed since that is distro specific + if [ "$_VIRTUALENV_DIR" != "null" ]; then + if ! __check_command_exists pip; then + echoerror "Pip not installed: required for -a installs" + exit 1 + fi + pip install -U virtualenv + __activate_virtualenv || return 1 + else + echoerror "Must have virtualenv dir specified for -a installs" + fi + + requirements_file=$1 + if [ ! -f "${requirements_file}" ]; then + echoerror "Requirements file: ${requirements_file} cannot be found, needed for -a (pip pkg) installs" + exit 1 + fi + + __PIP_PACKAGES='' + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # shellcheck disable=SC2089 + __PIP_PACKAGES="${__PIP_PACKAGES} 'apache-libcloud>=$_LIBCLOUD_MIN_VERSION'" + fi + + # shellcheck disable=SC2086,SC2090 + pip install -U -r ${requirements_file} ${__PIP_PACKAGES} +} # ---------- end of function __install_pip_deps ---------- + +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __install_salt_from_repo_post_neon +# DESCRIPTION: Return 0 or 1 if successfully able to install. Can provide a different python version to +# install pip packages with. If $py_exe is not specified it will use the default python version. +# PARAMETERS: py_exe +#---------------------------------------------------------------------------------------------------------------------- +__install_salt_from_repo_post_neon() { + _py_exe="$1" + + if [ "${_py_exe}" = "" ]; then + _py_exe='python' + fi + + echodebug "__install_salt_from_repo_post_neon py_exe=$_py_exe" + + _py_version=$(${_py_exe} -c "import sys; print('{0}.{1}'.format(*sys.version_info))") + _pip_cmd="pip${_py_version}" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip$(echo "${_py_version}" | cut -c -1)" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip" + if ! __check_command_exists "${_pip_cmd}"; then + echoerror "Unable to find a pip binary" + return 1 + fi + fi + fi + + __check_pip_allowed + + echodebug "Installed pip version: $(${_pip_cmd} --version)" + + CHECK_PIP_VERSION_SCRIPT=$(cat << EOM +import sys +try: + import pip + installed_pip_version=tuple([int(part.strip()) for part in pip.__version__.split('.') if part.isdigit()]) + desired_pip_version=($(echo ${_MINIMUM_PIP_VERSION} | sed 's/\./, /g' )) + if installed_pip_version < desired_pip_version: + print('Desired pip version {!r} > Installed pip version {!r}'.format('.'.join(map(str, desired_pip_version)), '.'.join(map(str, installed_pip_version)))) + sys.exit(1) + print('Desired pip version {!r} < Installed pip version {!r}'.format('.'.join(map(str, desired_pip_version)), '.'.join(map(str, installed_pip_version)))) + sys.exit(0) +except ImportError: + print('Failed to import pip') + sys.exit(1) +EOM +) + if ! ${_py_exe} -c "$CHECK_PIP_VERSION_SCRIPT"; then + # Upgrade pip to at least 1.2 which is when we can start using "python -m pip" + if [ "${_py_version}" = "3.5" ]; then + echodebug "Running '${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} pip>=${_MINIMUM_PIP_VERSION},<21.0'" + ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} -v "pip>=${_MINIMUM_PIP_VERSION},<21.0" + else + echodebug "Running '${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} pip>=${_MINIMUM_PIP_VERSION}'" + ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} -v "pip>=${_MINIMUM_PIP_VERSION}" + fi + sleep 1 + echodebug "PATH: ${PATH}" + _pip_cmd="pip${_py_version}" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip$(echo "${_py_version}" | cut -c -1)" + if ! __check_command_exists "${_pip_cmd}"; then + echodebug "The pip binary '${_pip_cmd}' was not found in PATH" + _pip_cmd="pip" + if ! __check_command_exists "${_pip_cmd}"; then + echoerror "Unable to find a pip binary" + return 1 + fi + fi + fi + echodebug "Installed pip version: $(${_pip_cmd} --version)" + fi + + _setuptools_dep="setuptools>=${_MINIMUM_SETUPTOOLS_VERSION}" + if [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + # We also lock setuptools to <45 which is the latest release to support both py2 and py3 + _setuptools_dep="${_setuptools_dep},<45" + fi + + echodebug "Running '${_pip_cmd} install wheel ${_setuptools_dep}'" + ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} wheel "${_setuptools_dep}" + + echoinfo "Installing salt using ${_py_exe}" + cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 + + mkdir /tmp/git/deps + echoinfo "Downloading Salt Dependencies from PyPi" + echodebug "Running '${_pip_cmd} download -d /tmp/git/deps .'" + ${_pip_cmd} download -d /tmp/git/deps . || (echo "Failed to download salt dependencies" && return 1) + + echoinfo "Installing Downloaded Salt Dependencies" + echodebug "Running '${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/*'" + ${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/* || return 1 + rm -f /tmp/git/deps/* + + echoinfo "Building Salt Python Wheel" + + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + SETUP_PY_INSTALL_ARGS="-v" + fi + + echodebug "Running '${_py_exe} setup.py --salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} bdist_wheel'" + ${_py_exe} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} bdist_wheel || return 1 + mv dist/salt*.whl /tmp/git/deps/ || return 1 + + cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" || return 1 + + echoinfo "Installing Built Salt Wheel" + ${_pip_cmd} uninstall --yes salt 2>/dev/null || true + echodebug "Running '${_pip_cmd} install --no-deps --force-reinstall ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/salt*.whl'" + ${_pip_cmd} install --no-deps --force-reinstall \ + ${_POST_NEON_PIP_INSTALL_ARGS} \ + --global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \ + /tmp/git/deps/salt*.whl || return 1 + + echoinfo "Checking if Salt can be imported using ${_py_exe}" + CHECK_SALT_SCRIPT=$(cat << EOM +import os +import sys +try: + import salt + import salt.version + print('\nInstalled Salt Version: {}'.format(salt.version.__version__)) + print('Installed Salt Package Path: {}\n'.format(os.path.dirname(salt.__file__))) + sys.exit(0) +except ImportError: + print('\nFailed to import salt\n') + sys.exit(1) +EOM +) + if ! ${_py_exe} -c "$CHECK_SALT_SCRIPT"; then + return 1 + fi + return 0 +} # ---------- end of function __install_salt_from_repo_post_neon ---------- + + +if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + if [ "x${_PY_MAJOR_VERSION}" = "x" ]; then + # Default to python 2 for pre Neon installs + _PY_MAJOR_VERSION=2 + fi +else + if [ "x${_PY_MAJOR_VERSION}" = "x" ]; then + # Default to python 3 for post Neon install + _PY_MAJOR_VERSION=3 + fi +fi + +####################################################################################################################### +# +# Distribution install functions +# +# In order to install salt for a distribution you need to define: +# +# To Install Dependencies, which is required, one of: +# 1. install____deps +# 2. install_____deps +# 3. install___deps +# 4 install____deps +# 5. install___deps +# 6. install__deps +# +# Optionally, define a salt configuration function, which will be called if +# the -c (config-dir) option is passed. One of: +# 1. config____salt +# 2. config_____salt +# 3. config___salt +# 4 config____salt +# 5. config___salt +# 6. config__salt +# 7. config_salt [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# Optionally, define a salt master pre-seed function, which will be called if +# the -k (pre-seed master keys) option is passed. One of: +# 1. preseed____master +# 2. preseed_____master +# 3. preseed___master +# 4 preseed____master +# 5. preseed___master +# 6. preseed__master +# 7. preseed_master [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# To install salt, which, of course, is required, one of: +# 1. install___ +# 2. install____ +# 3. install__ +# +# Optionally, define a post install function, one of: +# 1. install____post +# 2. install_____post +# 3. install___post +# 4 install____post +# 5. install___post +# 6. install__post +# +# Optionally, define a start daemons function, one of: +# 1. install____restart_daemons +# 2. install_____restart_daemons +# 3. install___restart_daemons +# 4 install____restart_daemons +# 5. install___restart_daemons +# 6. install__restart_daemons +# +# NOTE: The start daemons function should be able to restart any daemons +# which are running, or start if they're not running. +# +# Optionally, define a daemons running function, one of: +# 1. daemons_running___ +# 2. daemons_running____ +# 3. daemons_running__ +# 4 daemons_running___ +# 5. daemons_running__ +# 6. daemons_running_ +# 7. daemons_running [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] +# +# Optionally, check enabled Services: +# 1. install____check_services +# 2. install_____check_services +# 3. install___check_services +# 4 install____check_services +# 5. install___check_services +# 6. install__check_services +# +####################################################################################################################### + + +####################################################################################################################### +# +# Ubuntu Install Functions +# +__enable_universe_repository() { + if [ "$(grep -R universe /etc/apt/sources.list /etc/apt/sources.list.d/ | grep -v '#')" != "" ]; then + # The universe repository is already enabled + return 0 + fi + + echodebug "Enabling the universe repository" + + add-apt-repository -y "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe" || return 1 + + return 0 +} + +__install_saltstack_ubuntu_repository() { + # Workaround for latest non-LTS Ubuntu + if { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 21 ] && [ "$DISTRO_MINOR_VERSION" -eq 04 ]; }; then + echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages for previous LTS release. You may experience problems." + UBUNTU_VERSION=20.04 + UBUNTU_CODENAME="focal" + else + UBUNTU_VERSION=${DISTRO_VERSION} + UBUNTU_CODENAME=${DISTRO_CODENAME} + fi + + # Install downloader backend for GPG keys fetching + __PACKAGES='wget' + + # Required as it is not installed by default on Ubuntu 18+ + if [ "$DISTRO_MAJOR_VERSION" -ge 18 ]; then + __PACKAGES="${__PACKAGES} gnupg" + fi + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" + fi + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + __PY_VERSION_REPO="apt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # SaltStack's stable Ubuntu repository: + SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${STABLE_REV}" + echo "$__REPO_ARCH_DEB $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/salt.list + + __apt_key_fetch "$SALTSTACK_UBUNTU_URL/salt-archive-keyring.gpg" || return 1 + + __wait_for_apt apt-get update || return 1 +} + +install_ubuntu_deps() { + if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + # Install add-apt-repository + if ! __check_command_exists add-apt-repository; then + __apt_get_install_noinput software-properties-common || return 1 + fi + + __enable_universe_repository || return 1 + + __wait_for_apt apt-get update || return 1 + fi + + __PACKAGES='' + + if [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then + # Minimal systems might not have upstart installed, install it + __PACKAGES="upstart" + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 16 ] && [ -z "$_PY_EXE" ]; then + __PACKAGES="${__PACKAGES} python2.7" + fi + + if [ "$_VIRTUALENV_DIR" != "null" ]; then + __PACKAGES="${__PACKAGES} python-virtualenv" + fi + # Need python-apt for managing packages via Salt + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apt" + + # requests is still used by many salt modules + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-requests" + + # YAML module is used for generating custom master/minion configs + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" + + # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 + __PACKAGES="${__PACKAGES} procps pciutils" + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_ubuntu_stable_deps() { + if [ "${_SLEEP}" -eq "${__DEFAULT_SLEEP}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then + # The user did not pass a custom sleep value as an argument, let's increase the default value + echodebug "On Ubuntu systems we increase the default sleep value to 10." + echodebug "See https://github.com/saltstack/salt/issues/12248 for more info." + _SLEEP=10 + fi + + if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." + fi + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + __wait_for_apt apt-get update || return 1 + + if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then + if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ]; then + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 + else + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && + apt-key update && apt-get update || return 1 + fi + fi + + __apt_get_upgrade_noinput || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __check_dpkg_architecture || return 1 + __install_saltstack_ubuntu_repository || return 1 + fi + + install_ubuntu_deps || return 1 +} + +install_ubuntu_git_deps() { + __wait_for_apt apt-get update || return 1 + + if ! __check_command_exists git; then + __apt_get_install_noinput git-core || return 1 + fi + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __apt_get_install_noinput ca-certificates + fi + + __git_clone_and_checkout || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + __PACKAGES="" + + # See how we are installing packages + if [ "${_PIP_ALL}" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-dev swig libssl-dev libzmq3 libzmq3-dev" + + if ! __check_command_exists pip; then + __PACKAGES="${__PACKAGES} python-setuptools python-pip" + fi + + # Get just the apt packages that are required to build all the pythons + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + # Install the pythons from requirements (only zmq for now) + __install_pip_deps "${_SALT_GIT_CHECKOUT_DIR}/requirements/zeromq.txt" || return 1 + else + install_ubuntu_stable_deps || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python3-setuptools" + else + # There is no m2crypto package for Py3 at this time - only install for Py2 + __PACKAGES="${__PACKAGES} python-m2crypto" + fi + + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-zmq" + __PACKAGES="${__PACKAGES} python-concurrent.futures" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + fi + else + __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_ubuntu_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_ubuntu_git() { + # Activate virtualenv before install + if [ "${_VIRTUALENV_DIR}" != "null" ]; then + __activate_virtualenv || return 1 + fi + + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python2.7 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + # We can use --prefix on debian based ditributions + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python3/dist-packages --install-option=--install-scripts=/usr/bin" + else + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python2.7/dist-packages --install-option=--install-scripts=/usr/bin" + fi + _POST_NEON_PIP_INSTALL_ARGS="" + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 + sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + else + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + fi + + return 0 +} + +install_ubuntu_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + # Using systemd + /bin/systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( + /bin/systemctl preset salt-$fname.service > /dev/null 2>&1 && + /bin/systemctl enable salt-$fname.service > /dev/null 2>&1 + ) + sleep 1 + /bin/systemctl daemon-reload + elif [ -f /etc/init.d/salt-$fname ]; then + update-rc.d salt-$fname defaults + fi + done + + return 0 +} + +install_ubuntu_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + elif [ -f /sbin/initctl ]; then + _upstart_conf="/etc/init/salt-$fname.conf" + # We have upstart support + echodebug "There's upstart support" + if [ ! -f $_upstart_conf ]; then + # upstart does not know about our service, let's copy the proper file + echowarn "Upstart does not appear to know about salt-$fname" + echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.upstart to $_upstart_conf" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.upstart" "$_upstart_conf" + # Set service to know about virtualenv + if [ "${_VIRTUALENV_DIR}" != "null" ]; then + echo "SALT_USE_VIRTUALENV=${_VIRTUALENV_DIR}" > /etc/default/salt-${fname} + fi + /sbin/initctl reload-configuration || return 1 + fi + # No upstart support in Ubuntu!? + elif [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" ]; then + echodebug "There's NO upstart support!?" + echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init to /etc/init.d/salt-$fname" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" "/etc/init.d/salt-$fname" + chmod +x /etc/init.d/salt-$fname + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + update-rc.d salt-$fname defaults + else + echoerror "Neither upstart nor init.d was setup for salt-$fname" + fi + done + + return 0 +} + +install_ubuntu_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + # Ensure upstart configs / systemd units are loaded + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + systemctl daemon-reload + elif [ -f /sbin/initctl ]; then + /sbin/initctl reload-configuration + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + echodebug "There's systemd support while checking salt-$fname" + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + # We failed to start the service, let's test the SysV code below + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + + if [ -f /sbin/initctl ]; then + echodebug "There's upstart support while checking salt-$fname" + + if status salt-$fname 2>/dev/null | grep -q running; then + stop salt-$fname || (echodebug "Failed to stop salt-$fname" && return 1) + fi + + start salt-$fname && continue + # We failed to start the service, let's test the SysV code below + echodebug "Failed to start salt-$fname using Upstart" + fi + + if [ ! -f /etc/init.d/salt-$fname ]; then + echoerror "No init.d support for salt-$fname was found" + return 1 + fi + + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + done + + return 0 +} + +install_ubuntu_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then + __check_services_systemd salt-$fname || return 1 + elif [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then + __check_services_upstart salt-$fname || return 1 + elif [ -f /etc/init.d/salt-$fname ]; then + __check_services_debian salt-$fname || return 1 + fi + done + + return 0 +} +# +# End of Ubuntu Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Debian Install Functions +# +__install_saltstack_debian_repository() { + if [ "$DISTRO_MAJOR_VERSION" -eq 11 ]; then + # Packages for Debian 11 at repo.saltproject.io are not yet available + # Set up repository for Debian 10 for Debian 11 for now until support + # is available at repo.saltproject.io for Debian 11. + echowarn "Debian 11 distribution detected, but stable packages requested. Trying packages from Debian 10. You may experience problems." + DEBIAN_RELEASE="10" + DEBIAN_CODENAME="buster" + else + DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" + DEBIAN_CODENAME="$DISTRO_CODENAME" + fi + + __PY_VERSION_REPO="apt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # Install downloader backend for GPG keys fetching + __PACKAGES='wget' + + # Required as it is not installed by default on Debian 9+ + if [ "$DISTRO_MAJOR_VERSION" -ge 9 ]; then + __PACKAGES="${__PACKAGES} gnupg2" + fi + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" + fi + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location + SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/${STABLE_REV}" + echo "$__REPO_ARCH_DEB $SALTSTACK_DEBIAN_URL $DEBIAN_CODENAME main" > "/etc/apt/sources.list.d/salt.list" + + __apt_key_fetch "$SALTSTACK_DEBIAN_URL/salt-archive-keyring.gpg" || return 1 + + __wait_for_apt apt-get update || return 1 +} + +install_debian_deps() { + if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." + fi + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + __wait_for_apt apt-get update || return 1 + + if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then + # Try to update GPG keys first if allowed + if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 10 ]; then + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 + else + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && + apt-key update && apt-get update || return 1 + fi + fi + + __apt_get_upgrade_noinput || return 1 + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 + __PACKAGES='procps pciutils' + + # YAML module is used for generating custom master/minion configs + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __check_dpkg_architecture || return 1 + __install_saltstack_debian_repository || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_debian_git_pre() { + if ! __check_command_exists git; then + __apt_get_install_noinput git || return 1 + fi + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __apt_get_install_noinput ca-certificates + fi + + __git_clone_and_checkout || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi +} + +install_debian_git_deps() { + install_debian_deps || return 1 + install_debian_git_pre || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-backports.ssl-match-hostname" + __PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-msgpack python-m2crypto" + __PACKAGES="${__PACKAGES} python-requests python-tornado python-yaml python-zmq" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python-libcloud" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + else + __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + echodebug "install_debian_git_deps() Installing ${__PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + fi + + return 0 +} + +install_debian_7_git_deps() { + install_debian_deps || return 1 + install_debian_git_deps || return 1 + + return 0 +} + +install_debian_8_git_deps() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + echodebug "CALLING install_debian_git_deps" + install_debian_git_deps || return 1 + return 0 + fi + + install_debian_deps || return 1 + + if ! __check_command_exists git; then + __apt_get_install_noinput git || return 1 + fi + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __apt_get_install_noinput ca-certificates + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-crypto python-jinja2" + __PACKAGES="${__PACKAGES} python-m2crypto python-msgpack python-requests python-systemd" + __PACKAGES="${__PACKAGES} python-yaml python-zmq python-concurrent.futures" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python-libcloud" + fi + + __PIP_PACKAGES='' + if (__check_pip_allowed >/dev/null 2>&1); then + __PIP_PACKAGES='tornado<5.0' + # Install development environment for building tornado Python module + __PACKAGES="${__PACKAGES} build-essential python-dev" + + if ! __check_command_exists pip; then + __PACKAGES="${__PACKAGES} python-pip" + fi + # Attempt to configure backports repo on non-x86_64 system + elif [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DPKG_ARCHITECTURE" != "amd64" ]; then + # Check if Debian Backports repo already configured + if ! apt-cache policy | grep -q 'Debian Backports'; then + echo 'deb http://httpredir.debian.org/debian jessie-backports main' > \ + /etc/apt/sources.list.d/backports.list + fi + + __wait_for_apt apt-get update || return 1 + + # python-tornado package should be installed from backports repo + __PACKAGES="${__PACKAGES} python-backports.ssl-match-hostname python-tornado/jessie-backports" + else + __PACKAGES="${__PACKAGES} python-backports.ssl-match-hostname python-tornado" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "${__PIP_PACKAGES}" != "" ]; then + # shellcheck disable=SC2086,SC2090 + pip install -U ${__PIP_PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_debian_9_git_deps() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + install_debian_git_deps || return 1 + return 0 + fi + + install_debian_deps || return 1 + install_debian_git_pre || return 1 + + __PACKAGES="libzmq5 lsb-release" + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + + # These packages are PY2-ONLY + __PACKAGES="${__PACKAGES} python-backports-abc python-m2crypto python-concurrent.futures" + fi + + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apt python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-systemd" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml python${PY_PKG_VER}-zmq" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Install python-libcloud if asked to + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_debian_10_git_deps() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + install_debian_git_deps || return 1 + return 0 + fi + + install_debian_deps || return 1 + install_debian_git_pre || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _py=${_PY_EXE} + PY_PKG_VER=3 + __PACKAGES="python${PY_PKG_VER}-distutils" + else + _py="python" + PY_PKG_VER="" + __PACKAGES="" + fi + + __install_tornado_pip ${_py}|| return 1 + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml python${PY_PKG_VER}-zmq" + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_debian_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_debian_7_stable() { + install_debian_stable || return 1 + return 0 +} + +install_debian_8_stable() { + install_debian_stable || return 1 + return 0 +} + +install_debian_9_stable() { + install_debian_stable || return 1 + return 0 +} + +install_debian_git() { + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + # We can use --prefix on debian based ditributions + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python3/dist-packages --install-option=--install-scripts=/usr/bin" + else + _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python2.7/dist-packages --install-option=--install-scripts=/usr/bin" + fi + _POST_NEON_PIP_INSTALL_ARGS="" + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 + sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + else + # shellcheck disable=SC2086 + "${_PYEXE}" setup.py ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 + fi +} + +install_debian_7_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_8_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_9_git() { + install_debian_git || return 1 + return 0 +} + +install_debian_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ "$fname" = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ "$fname" = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ "$fname" = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ "$fname" = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Configure SystemD for Debian 8 "Jessie" and later + if [ -f /bin/systemctl ]; then + if [ ! -f /lib/systemd/system/salt-${fname}.service ] || \ + { [ -f /lib/systemd/system/salt-${fname}.service ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]; }; then + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.environment" "/etc/default/salt-${fname}" + else + # workaround before adding Debian-specific unit files to the Salt main repo + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system + sed -i -e '/^Type/ s/notify/simple/' /lib/systemd/system/salt-${fname}.service + fi + fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ "$fname" = "api" ] && continue + + /bin/systemctl enable "salt-${fname}.service" + SYSTEMD_RELOAD=$BS_TRUE + + # Install initscripts for Debian 7 "Wheezy" + elif [ ! -f "/etc/init.d/salt-$fname" ] || \ + { [ -f "/etc/init.d/salt-$fname" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.init" "/etc/init.d/salt-${fname}" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.environment" "/etc/default/salt-${fname}" + + if [ ! -f "/etc/init.d/salt-${fname}" ]; then + echowarn "The init script for salt-${fname} was not found, skipping it..." + continue + fi + + chmod +x "/etc/init.d/salt-${fname}" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ "$fname" = "api" ] && continue + + update-rc.d "salt-${fname}" defaults + fi + done +} + +install_debian_restart_daemons() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + # Debian 8 uses systemd + /bin/systemctl stop salt-$fname > /dev/null 2>&1 + /bin/systemctl start salt-$fname.service && continue + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + elif [ -f /etc/init.d/salt-$fname ]; then + # Still in SysV init + /etc/init.d/salt-$fname stop > /dev/null 2>&1 + /etc/init.d/salt-$fname start + fi + done +} + +install_debian_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + __check_services_systemd salt-$fname || return 1 + elif [ -f /etc/init.d/salt-$fname ]; then + __check_services_debian salt-$fname || return 1 + fi + done + return 0 +} +# +# Ended Debian Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Fedora Install Functions +# + +install_fedora_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + dnf -y update || return 1 + fi + + __PACKAGES="${__PACKAGES:=}" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -lt 3 ]; then + echoerror "There are no Python 2 stable packages for Fedora, only Py3 packages" + return 1 + fi + + # Salt on Fedora is Py3 + PY_PKG_VER=3 + + __PACKAGES="${__PACKAGES} dnf-utils libyaml procps-ng python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-zmq" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip python${PY_PKG_VER}-m2crypto python${PY_PKG_VER}-pyyaml" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-systemd" + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + fi + + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} ${_EXTRA_PACKAGES} || return 1 + + return 0 +} + +install_fedora_stable() { + if [ "$STABLE_REV" = "latest" ]; then + __SALT_VERSION="" + else + __SALT_VERSION="$(dnf list --showduplicates salt | grep "$STABLE_REV" | head -n 1 | awk '{print $2}')" + if [ "x${__SALT_VERSION}" = "x" ]; then + echoerror "Could not find a stable install for Salt ${STABLE_REV}" + exit 1 + fi + echoinfo "Installing Stable Package Version ${__SALT_VERSION}" + __SALT_VERSION="-${__SALT_VERSION}" + fi + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud${__SALT_VERSION}" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master${__SALT_VERSION}" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion${__SALT_VERSION}" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic${__SALT_VERSION}" + fi + + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} || return 1 + + __python="python3" + if ! __check_command_exists python3; then + echoerror "Could not find a python3 binary?!" + return 1 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __check_pip_allowed "You need to allow pip based installations (-P) for Tornado <5.0 in order to install Salt" + __installed_tornado_rpm=$(rpm -qa | grep python${PY_PKG_VER}-tornado) + if [ -n "${__installed_tornado_rpm}" ]; then + echodebug "Removing system package ${__installed_tornado_rpm}" + rpm -e --nodeps "${__installed_tornado_rpm}" || return 1 + fi + __get_site_packages_dir_code=$(cat << EOM +import site +print([d for d in site.getsitepackages() if d.startswith('/usr/lib/python')][0]) +EOM +) + __target_path=$(${__python} -c "${__get_site_packages_dir_code}") + echodebug "Running '${__python}' -m pip install --target ${__target_path} 'tornado<5.0'" + "${__python}" -m pip install --target "${__target_path}" "tornado<5" || return 1 + fi + + return 0 +} + +install_fedora_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + done +} + +install_fedora_git_deps() { + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + # Packages are named python3- + PY_PKG_VER=3 + else + PY_PKG_VER=2 + fi + + __PACKAGES="" + if ! __check_command_exists ps; then + __PACKAGES="${__PACKAGES} procps-ng" + fi + if ! __check_command_exists git; then + __PACKAGES="${__PACKAGES} git" + fi + + if [ -n "${__PACKAGES}" ]; then + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} || return 1 + __PACKAGES="" + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __PACKAGES="${__PACKAGES} ca-certificates" + fi + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr" + fi + + install_fedora_deps || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + if __check_command_exists python3; then + __python="python3" + fi + elif [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + if __check_command_exists python2; then + __python="python2" + fi + else + if ! __check_command_exists python; then + echoerror "Unable to find a python binary?!" + return 1 + fi + # Let's hope it's the right one + __python="python" + fi + + grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" | while IFS=' + ' read -r dep; do + echodebug "Running '${__python}' -m pip install '${dep}'" + "${__python}" -m pip install "${dep}" || return 1 + done + else + __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + __dnf_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_fedora_git() { + if [ "${_PY_EXE}" != "" ]; then + _PYEXE=${_PY_EXE} + echoinfo "Using the following python version: ${_PY_EXE} to install salt" + else + _PYEXE='python2' + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + ${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + else + ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + fi + return 0 +} + +install_fedora_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + done +} + +install_fedora_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + done +} + +install_fedora_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} +# +# Ended Fedora Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# CentOS Install Functions +# +__install_epel_repository() { + if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_TRUE ]; then + return 0 + fi + + # Check if epel repo is already enabled and flag it accordingly + if yum repolist | grep -q "^[!]\\?${_EPEL_REPO}/"; then + _EPEL_REPOS_INSTALLED=$BS_TRUE + return 0 + fi + + # Download latest 'epel-release' package for the distro version directly + epel_repo_url="${HTTP_VAL}://dl.fedoraproject.org/pub/epel/epel-release-latest-${DISTRO_MAJOR_VERSION}.noarch.rpm" + rpm -Uvh --force "$epel_repo_url" || return 1 + + _EPEL_REPOS_INSTALLED=$BS_TRUE + + return 0 +} + +__install_saltstack_rhel_repository() { + if [ "$ITYPE" = "stable" ]; then + repo_rev="$STABLE_REV" + else + repo_rev="latest" + fi + + __PY_VERSION_REPO="yum" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # Avoid using '$releasever' variable for yum. + # Instead, this should work correctly on all RHEL variants. + base_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/${repo_rev}/" + gpg_key="SALTSTACK-GPG-KEY.pub" + repo_file="/etc/yum.repos.d/salt.repo" + + if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + cat <<_eof > "$repo_file" +[saltstack] +name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever +baseurl=${base_url} +skip_if_unavailable=True +gpgcheck=1 +gpgkey=${base_url}${gpg_key} +enabled=1 +enabled_metadata=1 +_eof + + fetch_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${repo_rev}/" + __rpm_import_gpg "${fetch_url}${gpg_key}" || return 1 + yum clean metadata || return 1 + elif [ "$repo_rev" != "latest" ]; then + echowarn "salt.repo already exists, ignoring salt version argument." + echowarn "Use -F (forced overwrite) to install $repo_rev." + fi + + return 0 +} + +install_centos_stable_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + echowarn "Detected -r or -R option while installing Salt packages for Python 3." + echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." + echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then + __install_epel_repository || return 1 + __install_saltstack_rhel_repository || return 1 + fi + + # If -R was passed, we need to configure custom repo url with rsync-ed packages + # Which is still handled in __install_saltstack_rhel_repository. This call has + # its own check in case -r was passed without -R. + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_rhel_repository || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __PACKAGES="dnf-utils chkconfig" + else + __PACKAGES="yum-utils chkconfig" + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + # YAML module is used for generating custom master/minion configs + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python3-pyyaml" + else + __PACKAGES="${__PACKAGES} python2-pyyaml" + fi + elif [ "$DISTRO_MAJOR_VERSION" -eq 7 ]; then + # YAML module is used for generating custom master/minion configs + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python36-PyYAML" + else + __PACKAGES="${__PACKAGES} PyYAML" + fi + else + # YAML module is used for generating custom master/minion configs + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PACKAGES="${__PACKAGES} python34-PyYAML" + else + __PACKAGES="${__PACKAGES} PyYAML" + fi + fi + fi + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + + return 0 +} + +install_centos_stable() { + __PACKAGES="" + + local cloud='salt-cloud' + local master='salt-master' + local minion='salt-minion' + local syndic='salt-syndic' + + if echo "$STABLE_REV" | grep -q "archive";then # point release being applied + local ver=$(echo "$STABLE_REV"|awk -F/ '{print $2}') # strip archive/ + elif echo "$STABLE_REV" | egrep -vq "archive|latest";then # latest or major version(3003, 3004, etc) being applie + local ver=$STABLE_REV + fi + + if [ ! -z $ver ]; then + cloud+="-$ver" + master+="-$ver" + minion+="-$ver" + syndic+="-$ver" + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} $cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} $master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} $minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} $syndic" + fi + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_centos_stable_post() { + SYSTEMD_RELOAD=$BS_FALSE + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + /bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( + /bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && + /bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 + ) + + SYSTEMD_RELOAD=$BS_TRUE + elif [ -f "/etc/init.d/salt-${fname}" ]; then + /sbin/chkconfig salt-${fname} on + fi + done + + if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then + /bin/systemctl daemon-reload + fi + + return 0 +} + +install_centos_git_deps() { + install_centos_stable_deps || return 1 + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __yum_install_noinput ca-certificates || return 1 + fi + + if ! __check_command_exists git; then + __yum_install_noinput git || return 1 + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="" + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + # Packages are named python3- + PY_PKG_VER=3 + __PACKAGES="${__PACKAGES} python3" + else + # Packages are named python36- + PY_PKG_VER=36 + __PACKAGES="${__PACKAGES} python36" + fi + else + PY_PKG_VER="" + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __PACKAGES="${__PACKAGES} python2" + elif [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then + PY_PKG_VER=27 + __PACKAGES="${__PACKAGES} python27" + else + __PACKAGES="${__PACKAGES} python" + fi + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + _install_m2crypto_req=false + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + _py=${_PY_EXE} + if [ "$DISTRO_MAJOR_VERSION" -gt 6 ]; then + _install_m2crypto_req=true + fi + else + if [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then + _install_m2crypto_req=true + fi + _py="python" + + # Only Py2 needs python-futures + __PACKAGES="${__PACKAGES} python-futures" + + # There is no systemd-python3 package as of this writing + if [ "$DISTRO_MAJOR_VERSION" -ge 7 ]; then + __PACKAGES="${__PACKAGES} systemd-python" + fi + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __install_tornado_pip ${_py} || return 1 + __PACKAGES="${__PACKAGES} python3-m2crypto" + else + __PACKAGES="${__PACKAGES} m2crypto python${PY_PKG_VER}-crypto" + fi + + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-zmq" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" + fi + + if [ "${_INSTALL_PY}" -eq "${BS_TRUE}" ]; then + # Install Python if "-y" was passed in. + __install_python || return 1 + fi + + if [ "${_PY_EXE}" != "" ] && [ "$_PIP_ALLOWED" -eq "$BS_TRUE" ]; then + # If "-x" is defined, install dependencies with pip based on the Python version given. + _PIP_PACKAGES="m2crypto!=0.33.0 jinja2 msgpack-python pycrypto PyYAML tornado<5.0 zmq futures>=2.0" + + # install swig and openssl on cent6 + if $_install_m2crypto_req; then + __yum_install_noinput openssl-devel swig || return 1 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # Filter out any commented lines from the requirements file + _REQ_LINES="$(grep '^[^#]' "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + for SINGLE_PACKAGE in ${_PIP_PACKAGES}; do + __REQUIRED_VERSION="$(grep "${SINGLE_PACKAGE}" "${_REQ_LINES}")" + if [ "${__REQUIRED_VERSION}" != "" ]; then + _PIP_PACKAGES=$(echo "$_PIP_PACKAGES" | sed "s/${SINGLE_PACKAGE}/${__REQUIRED_VERSION}/") + fi + done + fi + + if [ "$_INSTALL_CLOUD" -eq "${BS_TRUE}" ]; then + _PIP_PACKAGES="${_PIP_PACKAGES} apache-libcloud" + fi + + __install_pip_pkgs "${_PIP_PACKAGES}" "${_PY_EXE}" || return 1 + else + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + else + if [ "${_INSTALL_PY}" -eq "${BS_TRUE}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then + # Install Python if "-y" was passed in. + __install_python || return 1 + fi + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_centos_git() { + if [ "${_PY_EXE}" != "" ]; then + _PYEXE=${_PY_EXE} + echoinfo "Using the following python version: ${_PY_EXE} to install salt" + else + _PYEXE='python2' + fi + + echodebug "_PY_EXE: $_PY_EXE" + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + $_PYEXE setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + else + $_PYEXE setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + fi + + return 0 +} + +install_centos_git_post() { + SYSTEMD_RELOAD=$BS_FALSE + + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + if [ ! -f "/usr/lib/systemd/system/salt-${fname}.service" ] || \ + { [ -f "/usr/lib/systemd/system/salt-${fname}.service" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" /usr/lib/systemd/system + fi + + SYSTEMD_RELOAD=$BS_TRUE + elif [ ! -f "/etc/init.d/salt-$fname" ] || \ + { [ -f "/etc/init.d/salt-$fname" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}" /etc/init.d + chmod +x /etc/init.d/salt-${fname} + fi + done + + if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then + /bin/systemctl daemon-reload + fi + + install_centos_stable_post || return 1 + + return 0 +} + +install_centos_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then + # We have upstart support and upstart knows about our service + if ! /sbin/initctl status salt-$fname > /dev/null 2>&1; then + # Everything is in place and upstart gave us an error code? Fail! + return 1 + fi + + # upstart knows about this service. + # Let's try to stop it, and then start it + /sbin/initctl stop salt-$fname > /dev/null 2>&1 + # Restart service + if ! /sbin/initctl start salt-$fname > /dev/null 2>&1; then + # Failed the restart?! + return 1 + fi + elif [ -f /etc/init.d/salt-$fname ]; then + # Disable stdin to fix shell session hang on killing tee pipe + service salt-$fname stop < /dev/null > /dev/null 2>&1 + service salt-$fname start < /dev/null + elif [ -f /usr/bin/systemctl ]; then + # CentOS 7 uses systemd + /usr/bin/systemctl stop salt-$fname > /dev/null 2>&1 + /usr/bin/systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + done +} + +install_centos_testing_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_centos_testing() { + install_centos_stable || return 1 + return 0 +} + +install_centos_testing_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_centos_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then + __check_services_upstart salt-$fname || return 1 + elif [ -f /etc/init.d/salt-$fname ]; then + __check_services_sysvinit salt-$fname || return 1 + elif [ -f /usr/bin/systemctl ]; then + __check_services_systemd salt-$fname || return 1 + fi + done + + return 0 +} +# +# Ended CentOS Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# RedHat Install Functions +# +install_red_hat_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_red_hat_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_stable_deps() { + install_red_hat_linux_stable_deps || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_git_deps() { + install_red_hat_linux_git_deps || return 1 + return 0 +} + +install_red_hat_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_red_hat_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_red_hat_enterprise_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_server_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_stable() { + install_red_hat_linux_stable || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_git() { + install_red_hat_linux_git || return 1 + return 0 +} + +install_red_hat_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_red_hat_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_red_hat_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_linux_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_linux_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_linux_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_server_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_stable_post() { + install_red_hat_linux_stable_post || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_restart_daemons() { + install_red_hat_linux_restart_daemons || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_git_post() { + install_red_hat_linux_git_post || return 1 + return 0 +} + +install_red_hat_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_red_hat_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_red_hat_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_red_hat_enterprise_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_red_hat_enterprise_testing() { + install_centos_testing || return 1 + return 0 +} + +install_red_hat_enterprise_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_red_hat_enterprise_server_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_red_hat_enterprise_server_testing() { + install_centos_testing || return 1 + return 0 +} + +install_red_hat_enterprise_server_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_testing() { + install_centos_testing || return 1 + return 0 +} + +install_red_hat_enterprise_workstation_testing_post() { + install_centos_testing_post || return 1 + return 0 +} +# +# Ended RedHat Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Oracle Linux Install Functions +# +install_oracle_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_oracle_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_oracle_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_oracle_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_oracle_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_oracle_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_oracle_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_oracle_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_oracle_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_oracle_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_oracle_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# Ended Oracle Linux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Scientific Linux Install Functions +# +install_scientific_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_scientific_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_scientific_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_scientific_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_scientific_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_scientific_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_scientific_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_scientific_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_scientific_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_scientific_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_scientific_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# Ended Scientific Linux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# CloudLinux Install Functions +# +install_cloud_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_cloud_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_cloud_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_cloud_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_cloud_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_cloud_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_cloud_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_cloud_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_cloud_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_cloud_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_cloud_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# End of CloudLinux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Alpine Linux Install Functions +# +install_alpine_linux_stable_deps() { + if ! grep -q '^[^#].\+alpine/.\+/community' /etc/apk/repositories; then + # Add community repository entry based on the "main" repo URL + __REPO=$(grep '^[^#].\+alpine/.\+/main\>' /etc/apk/repositories) + echo "${__REPO}" | sed -e 's/main/community/' >> /etc/apk/repositories + fi + + apk update + + # Get latest root CA certs + apk -U add ca-certificates + + if ! __check_command_exists openssl; then + # Install OpenSSL to be able to pull from https:// URLs + apk -U add openssl + fi +} + +install_alpine_linux_git_deps() { + install_alpine_linux_stable_deps || return 1 + + if ! __check_command_exists git; then + apk -U add git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + apk -U add python2 py-virtualenv py2-crypto py2-m2crypto py2-setuptools \ + py2-jinja2 py2-yaml py2-markupsafe py2-msgpack py2-psutil \ + py2-zmq zeromq py2-requests || return 1 + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + apk -U add py2-tornado || return 1 + fi + fi + else + apk -U add python2 py2-pip py2-setuptools || return 1 + _PY_EXE=python2 + return 0 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi +} + +install_alpine_linux_stable() { + __PACKAGES="salt" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + apk -U add ${__PACKAGES} || return 1 + return 0 +} + +install_alpine_linux_git() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + python2 setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 + else + python2 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + fi +} + +install_alpine_linux_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /sbin/rc-update ]; then + script_url="${_SALTSTACK_REPO_URL%.git}/raw/master/pkg/alpine/salt-$fname" + [ -f "/etc/init.d/salt-$fname" ] || __fetch_url "/etc/init.d/salt-$fname" "$script_url" + + # shellcheck disable=SC2181 + if [ $? -eq 0 ]; then + chmod +x "/etc/init.d/salt-$fname" + else + echoerror "Failed to get OpenRC init script for $OS_NAME from $script_url." + return 1 + fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + /sbin/rc-update add "salt-$fname" > /dev/null 2>&1 || return 1 + fi + done +} + +install_alpine_linux_restart_daemons() { + [ "${_START_DAEMONS}" -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Disable stdin to fix shell session hang on killing tee pipe + /sbin/rc-service salt-$fname stop < /dev/null > /dev/null 2>&1 + /sbin/rc-service salt-$fname start < /dev/null || return 1 + done +} + +install_alpine_linux_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_openrc salt-$fname || return 1 + done + + return 0 +} + +daemons_running_alpine_linux() { + [ "${_START_DAEMONS}" -eq $BS_FALSE ] && return + + FAILED_DAEMONS=0 + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # shellcheck disable=SC2009 + if [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} + +# +# Ended Alpine Linux Install Functions +# +####################################################################################################################### + + +####################################################################################################################### +# +# Amazon Linux AMI Install Functions +# + +install_amazon_linux_ami_deps() { + # Shim to figure out if we're using old (rhel) or new (aws) rpms. + _USEAWS=$BS_FALSE + pkg_append="python" + + if [ "$ITYPE" = "stable" ]; then + repo_rev="$STABLE_REV" + else + repo_rev="latest" + fi + + if echo $repo_rev | grep -E -q '^archive'; then + year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) + else + year=$(echo "$repo_rev" | cut -c1-4) + fi + + if echo "$repo_rev" | grep -E -q '^(latest|2016\.11)$' || \ + [ "$year" -gt 2016 ]; then + _USEAWS=$BS_TRUE + pkg_append="python27" + fi + + # We need to install yum-utils before doing anything else when installing on + # Amazon Linux ECS-optimized images. See issue #974. + __yum_install_noinput yum-utils + + # Do upgrade early + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __REPO_FILENAME="salt.repo" + + # Set a few vars to make life easier. + if [ $_USEAWS -eq $BS_TRUE ]; then + base_url="$HTTP_VAL://${_REPO_URL}/yum/amazon/latest/\$basearch/$repo_rev/" + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + repo_name="SaltStack repo for Amazon Linux" + else + base_url="$HTTP_VAL://${_REPO_URL}/yum/redhat/6/\$basearch/$repo_rev/" + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + repo_name="SaltStack repo for RHEL/CentOS 6" + fi + + # This should prob be refactored to use __install_saltstack_rhel_repository() + # With args passed in to do the right thing. Reformatted to be more like the + # amazon linux yum file. + if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then + cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" +[saltstack-repo] +name=$repo_name +failovermethod=priority +priority=10 +gpgcheck=1 +gpgkey=$gpg_key +baseurl=$base_url +_eof + fi + + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 + # which is already installed + __PACKAGES="m2crypto ${pkg_append}-crypto ${pkg_append}-jinja2 ${pkg_append}-PyYAML" + __PACKAGES="${__PACKAGES} ${pkg_append}-msgpack ${pkg_append}-requests ${pkg_append}-zmq" + __PACKAGES="${__PACKAGES} ${pkg_append}-futures" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_amazon_linux_ami_git_deps() { + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + yum -y install ca-certificates || return 1 + fi + + PIP_EXE='pip' + if __check_command_exists python2.7; then + if ! __check_command_exists pip2.7; then + if ! __check_command_exists easy_install-2.7; then + __yum_install_noinput python27-setuptools + fi + /usr/bin/easy_install-2.7 pip || return 1 + fi + PIP_EXE='/usr/local/bin/pip2.7' + _PY_EXE='python2.7' + fi + + install_amazon_linux_ami_deps || return 1 + + if ! __check_command_exists git; then + __yum_install_noinput git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __PACKAGES="" + __PIP_PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud" + __PACKAGES="${__PACKAGES} python27-pip" + __PIP_PACKAGES="${__PIP_PACKAGES} apache-libcloud>=$_LIBCLOUD_MIN_VERSION" + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} ${pkg_append}-tornado" + fi + fi + + if [ "${__PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${__PIP_PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + ${PIP_EXE} install ${__PIP_PACKAGES} || return 1 + fi + else + __PACKAGES="python27-pip python27-setuptools python27-devel gcc" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_amazon_linux_ami_2_git_deps() { + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + yum -y install ca-certificates || return 1 + fi + + install_amazon_linux_ami_2_deps || return 1 + + if [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + PIP_EXE='/bin/pip' + else + PY_PKG_VER=3 + PIP_EXE='/bin/pip3' + fi + __PACKAGES="python${PY_PKG_VER}-pip" + + if ! __check_command_exists "${PIP_EXE}"; then + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if ! __check_command_exists git; then + __yum_install_noinput git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + __PACKAGES="" + __PIP_PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq "$BS_TRUE" ]; then + __check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud" + if [ "$PARSED_VERSION" -eq "2" ]; then + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq "3" ]; then + __PACKAGES="${__PACKAGES} python3-pip" + __PIP_PACKAGES="${__PIP_PACKAGES} tornado<$_TORNADO_MAX_PY3_VERSION" + else + __PACKAGES="${__PACKAGES} python2-pip" + fi + else + __PACKAGES="${__PACKAGES} python27-pip" + fi + __PIP_PACKAGES="${__PIP_PACKAGES} apache-libcloud>=$_LIBCLOUD_MIN_VERSION" + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq "3" ]; then + __PIP_PACKAGES="${__PIP_PACKAGES} tornado<$_TORNADO_MAX_PY3_VERSION" + else + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-tornado" + fi + fi + fi + + if [ "${__PIP_PACKAGES}" != "" ]; then + __check_pip_allowed "You need to allow pip based installations (-P) in order to install ${__PIP_PACKAGES}" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip" + fi + + if [ "${__PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${__PIP_PACKAGES}" != "" ]; then + # shellcheck disable=SC2086 + ${PIP_EXE} install ${__PIP_PACKAGES} || return 1 + fi + else + __PACKAGES="python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools python${PY_PKG_VER}-devel gcc" + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_amazon_linux_ami_2_deps() { + # Shim to figure out if we're using old (rhel) or new (aws) rpms. + _USEAWS=$BS_FALSE + pkg_append="python" + + if [ "$ITYPE" = "stable" ]; then + repo_rev="$STABLE_REV" + else + repo_rev="latest" + fi + + if echo $repo_rev | grep -E -q '^archive'; then + year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) + else + year=$(echo "$repo_rev" | cut -c1-4) + fi + + if echo "$repo_rev" | grep -E -q '^(latest|2016\.11)$' || \ + [ "$year" -gt 2016 ]; then + _USEAWS=$BS_TRUE + pkg_append="python" + fi + + # We need to install yum-utils before doing anything else when installing on + # Amazon Linux ECS-optimized images. See issue #974. + __yum_install_noinput yum-utils + + # Do upgrade early + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __REPO_FILENAME="salt.repo" + __PY_VERSION_REPO="yum" + PY_PKG_VER="" + repo_label="saltstack-repo" + repo_name="SaltStack repo for Amazon Linux 2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __REPO_FILENAME="salt.repo" + __PY_VERSION_REPO="py3" + PY_PKG_VER=3 + repo_label="saltstack-py3-repo" + repo_name="SaltStack Python 3 repo for Amazon Linux 2" + fi + + base_url="$HTTP_VAL://${_REPO_URL}/${__PY_VERSION_REPO}/amazon/2/\$basearch/$repo_rev/" + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub,${base_url}base/RPM-GPG-KEY-CentOS-7" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + fi + + # This should prob be refactored to use __install_saltstack_rhel_repository() + # With args passed in to do the right thing. Reformatted to be more like the + # amazon linux yum file. + if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then + cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" +[$repo_label] +name=$repo_name +failovermethod=priority +priority=10 +gpgcheck=1 +gpgkey=$gpg_key +baseurl=$base_url +_eof + fi + + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 + # which is already installed + if [ -n "${PY_PKG_VER}" ] && [ "${PY_PKG_VER}" -eq 3 ]; then + __PACKAGES="${pkg_append}${PY_PKG_VER}-m2crypto ${pkg_append}${PY_PKG_VER}-pyyaml" + else + __PACKAGES="m2crypto PyYAML ${pkg_append}-futures" + fi + + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-crypto ${pkg_append}${PY_PKG_VER}-jinja2 procps-ng" + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-msgpack ${pkg_append}${PY_PKG_VER}-requests ${pkg_append}${PY_PKG_VER}-zmq" + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_amazon_linux_ami_stable() { + install_centos_stable || return 1 + return 0 +} + +install_amazon_linux_ami_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_amazon_linux_ami_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_amazon_linux_ami_git() { + install_centos_git || return 1 + return 0 +} + +install_amazon_linux_ami_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_amazon_linux_ami_testing() { + install_centos_testing || return 1 + return 0 +} + +install_amazon_linux_ami_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_stable() { + install_centos_stable || return 1 + return 0 +} + +install_amazon_linux_ami_2_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_amazon_linux_ami_2_git() { + install_centos_git || return 1 + return 0 +} + +install_amazon_linux_ami_2_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_testing() { + install_centos_testing || return 1 + return 0 +} + +install_amazon_linux_ami_2_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_amazon_linux_ami_2_check_services() { + install_centos_check_services || return 1 + return 0 +} + +# +# Ended Amazon Linux AMI Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Arch Install Functions +# +install_arch_linux_stable_deps() { + if [ ! -f /etc/pacman.d/gnupg ]; then + pacman-key --init && pacman-key --populate archlinux || return 1 + fi + + # Pacman does not resolve dependencies on outdated versions + # They always need to be updated + pacman -Syy --noconfirm + + pacman -S --noconfirm --needed archlinux-keyring || return 1 + + pacman -Su --noconfirm --needed pacman || return 1 + + if __check_command_exists pacman-db-upgrade; then + pacman-db-upgrade || return 1 + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER="" + fi + + # YAML module is used for generating custom master/minion configs + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed python${PY_PKG_VER}-yaml + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed python${PY_PKG_VER}-apache-libcloud || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed ${_EXTRA_PACKAGES} || return 1 + fi +} + +install_arch_linux_git_deps() { + install_arch_linux_stable_deps + + # Don't fail if un-installing python2-distribute threw an error + if ! __check_command_exists git; then + pacman -Sy --noconfirm --needed git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + pacman -R --noconfirm python2-distribute + pacman -Su --noconfirm --needed python2-crypto python2-setuptools python2-jinja \ + python2-m2crypto python2-markupsafe python2-msgpack python2-psutil \ + python2-pyzmq zeromq python2-requests python2-systemd || return 1 + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + pacman -Su --noconfirm --needed python2-tornado + fi + fi + else + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER="" + fi + __PACKAGES="python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + # shellcheck disable=SC2086 + pacman -Su --noconfirm --needed ${__PACKAGES} + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_arch_linux_stable() { + # Pacman does not resolve dependencies on outdated versions + # They always need to be updated + pacman -Syy --noconfirm + + pacman -Su --noconfirm --needed pacman || return 1 + # See https://mailman.archlinux.org/pipermail/arch-dev-public/2013-June/025043.html + # to know why we're ignoring below. + pacman -Syu --noconfirm --ignore filesystem,bash || return 1 + pacman -S --noconfirm --needed bash || return 1 + pacman -Su --noconfirm || return 1 + # We can now resume regular salt update + pacman -Syu --noconfirm salt || return 1 + return 0 +} + +install_arch_linux_git() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + python2 setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 + else + python2 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + fi + return 0 +} + +install_arch_linux_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Since Arch's pacman renames configuration files + if [ "$_TEMP_CONFIG_DIR" != "null" ] && [ -f "$_SALT_ETC_DIR/$fname.pacorig" ]; then + # Since a configuration directory was provided, it also means that any + # configuration file copied was renamed by Arch, see: + # https://wiki.archlinux.org/index.php/Pacnew_and_Pacsave_Files#.pacorig + __copyfile "$_SALT_ETC_DIR/$fname.pacorig" "$_SALT_ETC_DIR/$fname" $BS_TRUE + fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + if [ -f /usr/bin/systemctl ]; then + # Using systemd + /usr/bin/systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( + /usr/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 && + /usr/bin/systemctl enable salt-$fname.service > /dev/null 2>&1 + ) + sleep 1 + /usr/bin/systemctl daemon-reload + continue + fi + + # XXX: How do we enable old Arch init.d scripts? + done +} + +install_arch_linux_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /usr/bin/systemctl ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + /usr/bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( + /usr/bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && + /usr/bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 + ) + sleep 1 + /usr/bin/systemctl daemon-reload + continue + fi + + # SysV init!? + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname" "/etc/rc.d/init.d/salt-$fname" + chmod +x /etc/rc.d/init.d/salt-$fname + done +} + +install_arch_linux_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /usr/bin/systemctl ]; then + /usr/bin/systemctl stop salt-$fname.service > /dev/null 2>&1 + /usr/bin/systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + + /etc/rc.d/salt-$fname stop > /dev/null 2>&1 + /etc/rc.d/salt-$fname start + done +} + +install_arch_check_services() { + if [ ! -f /usr/bin/systemctl ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} +# +# Ended Arch Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# FreeBSD Install Functions +# + +# Using a separate conf step to head for idempotent install... +__configure_freebsd_pkg_details() { + _SALT_ETC_DIR="/usr/local/etc/salt" + _PKI_DIR=${_SALT_ETC_DIR}/pki + _POST_NEON_PIP_INSTALL_ARGS="--prefix=/usr/local" +} + +install_freebsd_deps() { + __configure_freebsd_pkg_details + pkg install -y pkg +} + +install_freebsd_git_deps() { + install_freebsd_deps || return 1 + + if ! __check_command_exists git; then + /usr/local/sbin/pkg install -y git || return 1 + fi + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + SALT_DEPENDENCIES=$(/usr/local/sbin/pkg rquery %dn py38-salt) + # shellcheck disable=SC2086 + /usr/local/sbin/pkg install -y ${SALT_DEPENDENCIES} python || return 1 + + /usr/local/sbin/pkg install -y py38-requests || return 1 + /usr/local/sbin/pkg install -y py38-tornado4 || return 1 + + else + /usr/local/sbin/pkg install -y python py38-pip py38-setuptools libzmq4 libunwind || return 1 + fi + + echodebug "Adapting paths to FreeBSD" + # The list of files was taken from Salt's BSD port Makefile + for file in doc/man/salt-key.1 doc/man/salt-cp.1 doc/man/salt-minion.1 \ + doc/man/salt-syndic.1 doc/man/salt-master.1 doc/man/salt-run.1 \ + doc/man/salt.7 doc/man/salt.1 doc/man/salt-call.1; do + [ ! -f $file ] && continue + echodebug "Patching ${file}" + sed -in -e "s|/etc/salt|${_SALT_ETC_DIR}|" \ + -e "s|/srv/salt|${_SALT_ETC_DIR}/states|" \ + -e "s|/srv/pillar|${_SALT_ETC_DIR}/pillar|" ${file} + done + if [ ! -f salt/syspaths.py ]; then + # We still can't provide the system paths, salt 0.16.x + # Let's patch salt's source and adapt paths to what's expected on FreeBSD + echodebug "Replacing occurrences of '/etc/salt' with ${_SALT_ETC_DIR}" + # The list of files was taken from Salt's BSD port Makefile + for file in conf/minion conf/master salt/config.py salt/client.py \ + salt/modules/mysql.py salt/utils/parsers.py salt/modules/tls.py \ + salt/modules/postgres.py salt/utils/migrations.py; do + [ ! -f $file ] && continue + echodebug "Patching ${file}" + sed -in -e "s|/etc/salt|${_SALT_ETC_DIR}|" \ + -e "s|/srv/salt|${_SALT_ETC_DIR}/states|" \ + -e "s|/srv/pillar|${_SALT_ETC_DIR}/pillar|" ${file} + done + fi + echodebug "Finished patching" + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + + fi + + return 0 +} + +install_freebsd_stable() { +# +# installing latest version of salt from FreeBSD CURRENT ports repo +# + # shellcheck disable=SC2086 + /usr/local/sbin/pkg install -y py38-salt || return 1 + + return 0 +} + +install_freebsd_git() { + + # /usr/local/bin/python3 in FreeBSD is a symlink to /usr/local/bin/python3.7 + __PYTHON_PATH=$(readlink -f "$(command -v python3)") + __ESCAPED_PYTHON_PATH=$(echo "${__PYTHON_PATH}" | sed 's/\//\\\//g') + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${__PYTHON_PATH}" || return 1 + for script in salt_api salt_master salt_minion salt_proxy salt_syndic; do + __fetch_url "/usr/local/etc/rc.d/${script}" "https://raw.githubusercontent.com/freebsd/freebsd-ports/master/sysutils/py-salt/files/${script}.in" || return 1 + sed -i '' 's/%%PREFIX%%/\/usr\/local/g' /usr/local/etc/rc.d/${script} + sed -i '' "s/%%PYTHON_CMD%%/${__ESCAPED_PYTHON_PATH}/g" /usr/local/etc/rc.d/${script} + chmod +x /usr/local/etc/rc.d/${script} || return 1 + done + + return 0 + fi + + # Install from git + if [ ! -f salt/syspaths.py ]; then + # We still can't provide the system paths, salt 0.16.x + ${__PYTHON_PATH} setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + else + ${__PYTHON_PATH} setup.py \ + --salt-root-dir=/ \ + --salt-config-dir="${_SALT_ETC_DIR}" \ + --salt-cache-dir="${_SALT_CACHE_DIR}" \ + --salt-sock-dir=/var/run/salt \ + --salt-srv-root-dir="${_SALT_ETC_DIR}" \ + --salt-base-file-roots-dir="${_SALT_ETC_DIR}/states" \ + --salt-base-pillar-roots-dir="${_SALT_ETC_DIR}/pillar" \ + --salt-base-master-roots-dir="${_SALT_ETC_DIR}/salt-master" \ + --salt-logs-dir=/var/log/salt \ + --salt-pidfile-dir=/var/run \ + ${SETUP_PY_INSTALL_ARGS} install \ + || return 1 + fi + + for script in salt_api salt_master salt_minion salt_proxy salt_syndic; do + __fetch_url "/usr/local/etc/rc.d/${script}" "https://raw.githubusercontent.com/freebsd/freebsd-ports/master/sysutils/py-salt/files/${script}.in" || return 1 + sed -i '' 's/%%PREFIX%%/\/usr\/local/g' /usr/local/etc/rc.d/${script} + sed -i '' "s/%%PYTHON_CMD%%/${__ESCAPED_PYTHON_PATH}/g" /usr/local/etc/rc.d/${script} + chmod +x /usr/local/etc/rc.d/${script} || return 1 + done + + # And we're good to go + return 0 +} + +install_freebsd_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + enable_string="salt_${fname}_enable=YES" + grep "$enable_string" /etc/rc.conf >/dev/null 2>&1 + [ $? -eq 1 ] && sysrc $enable_string + + done +} + +install_freebsd_git_post() { + install_freebsd_stable_post || return 1 + return 0 +} + +install_freebsd_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + service salt_$fname stop > /dev/null 2>&1 + service salt_$fname start + done +} +# +# Ended FreeBSD Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# OpenBSD Install Functions +# + +install_openbsd_deps() { + if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + OPENBSD_REPO='https://cdn.openbsd.org/pub/OpenBSD' + echoinfo "setting package repository to $OPENBSD_REPO" + echo "${OPENBSD_REPO}" >/etc/installurl || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + pkg_add -I -v ${_EXTRA_PACKAGES} || return 1 + fi + return 0 +} + +install_openbsd_git_deps() { + install_openbsd_deps || return 1 + + if ! __check_command_exists git; then + pkg_add -I -v git || return 1 + fi + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + pkg_add -I -v py-pip py-setuptools + fi + + # + # Let's trigger config_salt() + # + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_openbsd_git() { + # + # Install from git + # + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ ! -f salt/syspaths.py ]; then + # We still can't provide the system paths, salt 0.16.x + /usr/local/bin/python2.7 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 + fi + return 0 +} + +install_openbsd_stable() { + pkg_add -r -I -v salt || return 1 + return 0 +} + +install_openbsd_post() { + for fname in api master minion syndic; do + [ $fname = "api" ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + rcctl enable salt_$fname + done + + return 0 +} + +install_openbsd_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && continue + + if [ -f /etc/rc.d/salt_${fname} ]; then + __check_services_openbsd salt_${fname} || return 1 + fi + done + + return 0 +} + +install_openbsd_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + rcctl restart salt_${fname} + done + + return 0 +} + +# +# Ended OpenBSD Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# SmartOS Install Functions +# +install_smartos_deps() { + smartos_deps="$(pkgin show-deps salt | grep '^\s' | grep -v '\snot' | xargs) py27-m2crypto" + pkgin -y install "${smartos_deps}" || return 1 + + # Set _SALT_ETC_DIR to SmartOS default if they didn't specify + _SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt} + # We also need to redefine the PKI directory + _PKI_DIR=${_SALT_ETC_DIR}/pki + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + # Let's set the configuration directory to /tmp + _TEMP_CONFIG_DIR="/tmp" + CONFIG_SALT_FUNC="config_salt" + + # Let's download, since they were not provided, the default configuration files + if [ ! -f "$_SALT_ETC_DIR/minion" ] && [ ! -f "$_TEMP_CONFIG_DIR/minion" ]; then + # shellcheck disable=SC2086 + curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/minion" -L \ + https://raw.githubusercontent.com/saltstack/salt/master/conf/minion || return 1 + fi + if [ ! -f "$_SALT_ETC_DIR/master" ] && [ ! -f $_TEMP_CONFIG_DIR/master ]; then + # shellcheck disable=SC2086 + curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/master" -L \ + https://raw.githubusercontent.com/saltstack/salt/master/conf/master || return 1 + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + pkgin -y install py27-apache-libcloud || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + pkgin -y install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_smartos_git_deps() { + install_smartos_deps || return 1 + + if ! __check_command_exists git; then + pkgin -y install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # Install whichever tornado is in the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + __check_pip_allowed "You need to allow pip based installations (-P) in order to install the python package '${__REQUIRED_TORNADO}'" + + # Install whichever futures is in the requirements file + __REQUIRED_FUTURES="$(grep futures "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + __check_pip_allowed "You need to allow pip based installations (-P) in order to install the python package '${__REQUIRED_FUTURES}'" + + if [ "${__REQUIRED_TORNADO}" != "" ]; then + if ! __check_command_exists pip; then + pkgin -y install py27-pip + fi + pip install -U "${__REQUIRED_TORNADO}" + fi + + if [ "${__REQUIRED_FUTURES}" != "" ]; then + if ! __check_command_exists pip; then + pkgin -y install py27-pip + fi + pip install -U "${__REQUIRED_FUTURES}" + fi + fi + else + if ! __check_command_exists pip; then + pkgin -y install py27-pip + fi + pkgin -y install py27-setuptools + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_smartos_stable() { + pkgin -y install salt || return 1 + return 0 +} + +install_smartos_git() { + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + # Use setuptools in order to also install dependencies + # lets force our config path on the setup for now, since salt/syspaths.py only got fixed in 2015.5.0 + USE_SETUPTOOLS=1 /opt/local/bin/python setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 + return 0 +} + +install_smartos_post() { + smf_dir="/opt/custom/smf" + + # Install manifest files if needed. + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + svcs network/salt-$fname > /dev/null 2>&1 + if [ $? -eq 1 ]; then + if [ ! -f "$_TEMP_CONFIG_DIR/salt-$fname.xml" ]; then + # shellcheck disable=SC2086 + curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/salt-$fname.xml" -L \ + "https://raw.githubusercontent.com/saltstack/salt/master/pkg/smartos/salt-$fname.xml" + fi + svccfg import "$_TEMP_CONFIG_DIR/salt-$fname.xml" + if [ "${VIRTUAL_TYPE}" = "global" ]; then + if [ ! -d "$smf_dir" ]; then + mkdir -p "$smf_dir" || return 1 + fi + if [ ! -f "$smf_dir/salt-$fname.xml" ]; then + __copyfile "$_TEMP_CONFIG_DIR/salt-$fname.xml" "$smf_dir/" || return 1 + fi + fi + fi + done + + return 0 +} + +install_smartos_git_post() { + smf_dir="/opt/custom/smf" + + # Install manifest files if needed. + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + svcs "network/salt-$fname" > /dev/null 2>&1 + if [ $? -eq 1 ]; then + svccfg import "${_SALT_GIT_CHECKOUT_DIR}/pkg/smartos/salt-$fname.xml" + if [ "${VIRTUAL_TYPE}" = "global" ]; then + if [ ! -d $smf_dir ]; then + mkdir -p "$smf_dir" + fi + if [ ! -f "$smf_dir/salt-$fname.xml" ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/smartos/salt-$fname.xml" "$smf_dir/" + fi + fi + fi + done + + return 0 +} + +install_smartos_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Stop if running && Start service + svcadm disable salt-$fname > /dev/null 2>&1 + svcadm enable salt-$fname + done + + return 0 +} +# +# Ended SmartOS Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# openSUSE Install Functions. +# +__ZYPPER_REQUIRES_REPLACE_FILES=-1 + +__set_suse_pkg_repo() { + + # Set distro repo variable + if [ "${DISTRO_MAJOR_VERSION}" -gt 2015 ]; then + DISTRO_REPO="openSUSE_Tumbleweed" + elif [ "${DISTRO_MAJOR_VERSION}" -ge 42 ] || [ "${DISTRO_MAJOR_VERSION}" -eq 15 ]; then + DISTRO_REPO="openSUSE_Leap_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" + else + DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}_SP${SUSE_PATCHLEVEL}" + fi + + if [ "$_DOWNSTREAM_PKG_REPO" -eq $BS_TRUE ]; then + suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack" + suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" + else + suse_pkg_url_base="${HTTP_VAL}://repo.saltproject.io/opensuse" + suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack:products.repo" + fi + SUSE_PKG_URL="$suse_pkg_url_base/$suse_pkg_url_path" +} + +__check_and_refresh_suse_pkg_repo() { + # Check to see if systemsmanagement_saltstack exists + __zypper repos | grep -q systemsmanagement_saltstack + + if [ $? -eq 1 ]; then + # zypper does not yet know anything about systemsmanagement_saltstack + __zypper addrepo --refresh "${SUSE_PKG_URL}" || return 1 + fi +} + +__version_lte() { + if ! __check_command_exists python; then + zypper --non-interactive install --replacefiles --auto-agree-with-licenses python || \ + zypper --non-interactive install --auto-agree-with-licenses python || return 1 + fi + + if [ "$(python -c 'import sys; V1=tuple([int(i) for i in sys.argv[1].split(".")]); V2=tuple([int(i) for i in sys.argv[2].split(".")]); print V1<=V2' "$1" "$2")" = "True" ]; then + __ZYPPER_REQUIRES_REPLACE_FILES=${BS_TRUE} + else + __ZYPPER_REQUIRES_REPLACE_FILES=${BS_FALSE} + fi +} + +__zypper() { + # Check if any zypper process is running before calling zypper again. + # This is useful when a zypper call is part of a boot process and will + # wait until the zypper process is finished, such as on AWS AMIs. + while pgrep -l zypper; do + sleep 1 + done + + zypper --non-interactive "${@}" + # Return codes between 100 and 104 are only informations, not errors + # https://en.opensuse.org/SDB:Zypper_manual#EXIT_CODES + if [ "$?" -gt "99" ] && [ "$?" -le "104" ]; then + return 0 + fi + return $? +} + +__zypper_install() { + if [ "${__ZYPPER_REQUIRES_REPLACE_FILES}" = "-1" ]; then + __version_lte "1.10.4" "$(zypper --version | awk '{ print $2 }')" + fi + if [ "${__ZYPPER_REQUIRES_REPLACE_FILES}" = "${BS_TRUE}" ]; then + # In case of file conflicts replace old files. + # Option present in zypper 1.10.4 and newer: + # https://github.com/openSUSE/zypper/blob/95655728d26d6d5aef7796b675f4cc69bc0c05c0/package/zypper.changes#L253 + __zypper install --auto-agree-with-licenses --replacefiles "${@}"; return $? + else + __zypper install --auto-agree-with-licenses "${@}"; return $? + fi +} + +__opensuse_prep_install() { + # DRY function for common installation preparatory steps for SUSE + if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then + # Is the repository already known + __set_suse_pkg_repo + # Check zypper repos and refresh if necessary + __check_and_refresh_suse_pkg_repo + fi + + __zypper --gpg-auto-import-keys refresh + + # shellcheck disable=SC2181 + if [ $? -ne 0 ] && [ $? -ne 4 ]; then + # If the exit code is not 0, and it's not 4 (failed to update a + # repository) return a failure. Otherwise continue. + return 1 + fi + + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + __zypper --gpg-auto-import-keys update || return 1 + fi +} + +install_opensuse_stable_deps() { + __opensuse_prep_install || return 1 + + if [ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$DISTRO_MINOR_VERSION" -eq 3 ]; then + # Because patterns-openSUSE-minimal_base-conflicts conflicts with python, lets remove the first one + __zypper remove patterns-openSUSE-minimal_base-conflicts + fi + + # YAML module is used for generating custom master/minion configs + # requests is still used by many salt modules + # Salt needs python-zypp installed in order to use the zypper module + __PACKAGES="python-PyYAML python-requests python-zypp" + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_opensuse_git_deps() { + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ] && ! __check_command_exists update-ca-certificates; then + __zypper_install ca-certificates || return 1 + fi + + install_opensuse_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + __zypper_install patch || return 1 + + __PACKAGES="libzmq5 python-Jinja2 python-m2crypto python-msgpack-python python-pycrypto python-pyzmq python-xml python-futures" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + # Check for Tumbleweed + elif [ "${DISTRO_MAJOR_VERSION}" -ge 20210101 ]; then + __PACKAGES="python3-pip" + else + __PACKAGES="python-pip python-setuptools gcc" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_opensuse_stable() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __zypper_install $__PACKAGES || return 1 + + return 0 +} + +install_opensuse_git() { + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + python setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + return 0 +} + +install_opensuse_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ] || [ -f /usr/bin/systemctl ]; then + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + continue + fi + + /sbin/chkconfig --add salt-$fname + /sbin/chkconfig salt-$fname on + done + + return 0 +} + +install_opensuse_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if command -v systemctl; then + use_usr_lib=$BS_FALSE + + if [ "${DISTRO_MAJOR_VERSION}" -ge 15 ]; then + use_usr_lib=$BS_TRUE + fi + + if [ "${DISTRO_MAJOR_VERSION}" -eq 12 ] && [ -d "/usr/lib/systemd/" ]; then + use_usr_lib=$BS_TRUE + fi + + if [ "${use_usr_lib}" -eq $BS_TRUE ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/usr/lib/systemd/system/salt-${fname}.service" + else + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + fi + + continue + fi + + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname" "/etc/init.d/salt-$fname" + chmod +x /etc/init.d/salt-$fname + done + + install_opensuse_stable_post || return 1 + + return 0 +} + +install_opensuse_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + fi + + service salt-$fname stop > /dev/null 2>&1 + service salt-$fname start + done +} + +install_opensuse_check_services() { + if [ ! -f /bin/systemctl ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname > /dev/null 2>&1 || __check_services_systemd salt-$fname.service > /dev/null 2>&1 || return 1 + done + + return 0 +} +# +# End of openSUSE Install Functions. +# +####################################################################################################################### + +####################################################################################################################### +# +# openSUSE Leap 15 +# + +install_opensuse_15_stable_deps() { + __opensuse_prep_install || return 1 + + # SUSE only packages Salt for Python 3 on Leap 15 + # Py3 is the default bootstrap install for Leap 15 + # However, git installs might specify "-x python2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER=3 + fi + + # YAML module is used for generating custom master/minion configs + # requests is still used by many salt modules + __PACKAGES="python${PY_PKG_VER}-PyYAML python${PY_PKG_VER}-requests" + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_opensuse_15_git_deps() { + install_opensuse_15_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + PY_PKG_VER=2 + else + PY_PKG_VER=3 + fi + + __PACKAGES="python${PY_PKG_VER}-xml" + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + # Py3 is the default bootstrap install for Leap 15 + # However, git installs might specify "-x python2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + # This is required by some of the python2 packages below + __PACKAGES="${__PACKAGES} libpython2_7-1_0 python2-futures python-ipaddress" + fi + + __PACKAGES="${__PACKAGES} libzmq5 python${PY_PKG_VER}-Jinja2 python${PY_PKG_VER}-msgpack" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pycrypto python${PY_PKG_VER}-pyzmq" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apache-libcloud" + fi + else + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_opensuse_15_git() { + + # Py3 is the default bootstrap install for Leap 15 + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python3 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + return 0 +} + +# +# End of openSUSE Leap 15 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise 15 +# + +install_suse_15_stable_deps() { + __opensuse_prep_install || return 1 + install_opensuse_15_stable_deps || return 1 + + return 0 +} + +install_suse_15_git_deps() { + install_suse_15_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git-core || return 1 + fi + + install_opensuse_15_git_deps || return 1 + + return 0 +} + +install_suse_15_stable() { + install_opensuse_stable || return 1 + return 0 +} + +install_suse_15_git() { + install_opensuse_15_git || return 1 + return 0 +} + +install_suse_15_stable_post() { + install_opensuse_stable_post || return 1 + return 0 +} + +install_suse_15_git_post() { + install_opensuse_git_post || return 1 + return 0 +} + +install_suse_15_restart_daemons() { + install_opensuse_restart_daemons || return 1 + return 0 +} + +# +# End of SUSE Enterprise 15 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise 12 +# + +install_suse_12_stable_deps() { + __opensuse_prep_install || return 1 + + # YAML module is used for generating custom master/minion configs + # requests is still used by many salt modules + # Salt needs python-zypp installed in order to use the zypper module + __PACKAGES="python-PyYAML python-requests python-zypp" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + + # shellcheck disable=SC2086,SC2090 + __zypper_install ${__PACKAGES} || return 1 + + # SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which + # we want to install, even with --non-interactive. + # Let's try to install the higher version first and then the lower one in case of failure + __zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_suse_12_git_deps() { + install_suse_12_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git-core || return 1 + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="" + # shellcheck disable=SC2089 + __PACKAGES="${__PACKAGES} libzmq4 python-Jinja2 python-msgpack-python python-pycrypto" + __PACKAGES="${__PACKAGES} python-pyzmq python-xml" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_suse_12_stable() { + install_opensuse_stable || return 1 + return 0 +} + +install_suse_12_git() { + install_opensuse_git || return 1 + return 0 +} + +install_suse_12_stable_post() { + install_opensuse_stable_post || return 1 + return 0 +} + +install_suse_12_git_post() { + install_opensuse_git_post || return 1 + return 0 +} + +install_suse_12_restart_daemons() { + install_opensuse_restart_daemons || return 1 + return 0 +} + +# +# End of SUSE Enterprise 12 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise 11 +# + +install_suse_11_stable_deps() { + __opensuse_prep_install || return 1 + + # YAML module is used for generating custom master/minion configs + __PACKAGES="python-PyYAML" + + # shellcheck disable=SC2086,SC2090 + __zypper_install ${__PACKAGES} || return 1 + + # SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which + # we want to install, even with --non-interactive. + # Let's try to install the higher version first and then the lower one in case of failure + __zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __zypper_install ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_suse_11_git_deps() { + install_suse_11_stable_deps || return 1 + + if ! __check_command_exists git; then + __zypper_install git || return 1 + fi + + __git_clone_and_checkout || return 1 + + __PACKAGES="" + # shellcheck disable=SC2089 + __PACKAGES="${__PACKAGES} libzmq4 python-Jinja2 python-msgpack-python python-pycrypto" + __PACKAGES="${__PACKAGES} python-pyzmq python-xml python-zypp" + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then + # We're on the master branch, install whichever tornado is on the requirements file + __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" + if [ "${__REQUIRED_TORNADO}" != "" ]; then + __PACKAGES="${__PACKAGES} python-tornado" + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python-apache-libcloud" + fi + + # shellcheck disable=SC2086 + __zypper_install ${__PACKAGES} || return 1 + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_suse_11_stable() { + install_opensuse_stable || return 1 + return 0 +} + +install_suse_11_git() { + install_opensuse_git || return 1 + return 0 +} + +install_suse_11_stable_post() { + install_opensuse_stable_post || return 1 + return 0 +} + +install_suse_11_git_post() { + install_opensuse_git_post || return 1 + return 0 +} + +install_suse_11_restart_daemons() { + install_opensuse_restart_daemons || return 1 + return 0 +} + + +# +# End of SUSE Enterprise 11 +# +####################################################################################################################### + +####################################################################################################################### +# +# SUSE Enterprise General Functions +# + +# Used for both SLE 11 and 12 +install_suse_check_services() { + if [ ! -f /bin/systemctl ]; then + # Not running systemd!? Don't check! + return 0 + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} + +# +# End of SUSE Enterprise General Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Gentoo Install Functions. +# +__autounmask() { + # Unmask package(s) and accept changes + # + # Usually it's a good thing to have config files protected by portage, but + # in this case this would require to interrupt the bootstrapping script at + # this point, manually merge the changes using etc-update/dispatch-conf/ + # cfg-update and then restart the bootstrapping script, so instead we allow + # at this point to modify certain config files directly + export CONFIG_PROTECT_MASK="${CONFIG_PROTECT_MASK:-} + /etc/portage/package.accept_keywords + /etc/portage/package.keywords + /etc/portage/package.license + /etc/portage/package.unmask + /etc/portage/package.use" + emerge --autounmask --autounmask-continue --autounmask-only --autounmask-write "${@}"; return $? +} + +__emerge() { + EMERGE_FLAGS='-q' + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + EMERGE_FLAGS='-v' + fi + + # Do not re-emerge packages that are already installed + EMERGE_FLAGS="${EMERGE_FLAGS} --noreplace" + + if [ "$_GENTOO_USE_BINHOST" -eq $BS_TRUE ]; then + EMERGE_FLAGS="${EMERGE_FLAGS} --getbinpkg" + fi + + # shellcheck disable=SC2086 + emerge ${EMERGE_FLAGS} "${@}"; return $? +} + +__gentoo_pre_dep() { + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + if __check_command_exists eix; then + eix-sync + else + emerge --sync + fi + else + if __check_command_exists eix; then + eix-sync -q + else + emerge --sync --quiet + fi + fi + if [ ! -d /etc/portage ]; then + mkdir /etc/portage + fi + + # Enable Python 3.6 target for pre Neon Salt release + if echo "${STABLE_REV}" | grep -q "2019" || [ "${ITYPE}" = "git" ] && [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + EXTRA_PYTHON_TARGET=python3_6 + fi + + # Enable Python 3.7 target for Salt Neon using GIT + if [ "${ITYPE}" = "git" ] && [ "${GIT_REV}" = "v3000" ]; then + EXTRA_PYTHON_TARGET=python3_7 + fi + + if [ -n "${EXTRA_PYTHON_TARGET:-}" ]; then + if ! emerge --info | sed 's/.*\(PYTHON_TARGETS="[^"]*"\).*/\1/' | grep -q "${EXTRA_PYTHON_TARGET}" ; then + echo "PYTHON_TARGETS=\"\${PYTHON_TARGETS} ${EXTRA_PYTHON_TARGET}\"" >> /etc/portage/make.conf + emerge --deep --with-bdeps=y --newuse --quiet @world + fi + fi +} + +__gentoo_post_dep() { + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __autounmask ${_EXTRA_PACKAGES} || return 1 + # shellcheck disable=SC2086 + __emerge ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + +install_gentoo_deps() { + __gentoo_pre_dep || return 1 + + # Make sure that the 'libcloud' use flag is set when Salt Cloud support is requested + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + SALT_USE_FILE='/etc/portage/package.use' + if [ -d '/etc/portage/package.use' ]; then + SALT_USE_FILE='/etc/portage/package.use/salt' + fi + + SALT_USE_FLAGS="$(grep -E '^[<>=~]*app-admin/salt.*' ${SALT_USE_FILE} 2>/dev/null)" + SALT_USE_FLAG_LIBCLOUD="$(echo "${SALT_USE_FLAGS}" | grep ' libcloud' 2>/dev/null)" + + # Set the libcloud use flag, if it is not set yet + if [ -z "${SALT_USE_FLAGS}" ]; then + echo "app-admin/salt libcloud" >> ${SALT_USE_FILE} + elif [ -z "${SALT_USE_FLAG_LIBCLOUD}" ]; then + sed 's#^\([<>=~]*app-admin/salt[^ ]*\)\(.*\)#\1 libcloud\2#g' -i ${SALT_USE_FILE} + fi + fi + + __gentoo_post_dep || return 1 +} + +install_gentoo_git_deps() { + __gentoo_pre_dep || return 1 + + # Install pip if it does not exist + if ! __check_command_exists pip ; then + GENTOO_GIT_PACKAGES="${GENTOO_GIT_PACKAGES:-} dev-python/pip" + fi + + # Install GIT if it does not exist + if ! __check_command_exists git ; then + GENTOO_GIT_PACKAGES="${GENTOO_GIT_PACKAGES:-} dev-vcs/git" + fi + + # Salt <3000 does not automatically install dependencies. It has to be done manually. + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + GENTOO_GIT_PACKAGES="${GENTOO_GIT_PACKAGES:-} + sys-apps/pciutils + dev-python/pyyaml + dev-python/pyzmq + dev-python/libnacl + dev-python/pycryptodome + dev-python/py + dev-python/requests + /dev/null 2>&1 || ( + systemctl preset salt-$fname.service > /dev/null 2>&1 && + systemctl enable salt-$fname.service > /dev/null 2>&1 + ) + else + # Salt minion cannot start in a docker container because the "net" service is not available + if [ $fname = "minion" ] && [ -f /.dockerenv ]; then + sed '/need net/d' -i /etc/init.d/salt-$fname + fi + + rc-update add "salt-$fname" > /dev/null 2>&1 || return 1 + fi + done +} + +install_gentoo_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if __check_command_exists systemctl ; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( + systemctl preset salt-$fname.service > /dev/null 2>&1 && + systemctl enable salt-$fname.service > /dev/null 2>&1 + ) + else + cat <<_eof > "/etc/init.d/salt-${fname}" +#!/sbin/openrc-run +# Copyright 1999-2015 Gentoo Foundation +# Distributed under the terms of the GNU General Public License v2 + +command="/usr/bin/salt-${fname}" +command_args="\${SALT_OPTS}" +command_background="1" +pidfile="/var/run/salt-${fname}.pid" +name="SALT ${fname} daemon" +retry="20" + +depend() { + use net logger +} +_eof + chmod +x /etc/init.d/salt-$fname + + cat <<_eof > "/etc/conf.d/salt-${fname}" +# /etc/conf.d/salt-${fname}: config file for /etc/init.d/salt-master + +# see man pages for salt-${fname} or run 'salt-${fname} --help' +# for valid cmdline options +SALT_OPTS="--log-level=warning" +_eof + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + rc-update add "salt-$fname" > /dev/null 2>&1 || return 1 + fi + done + + return 0 +} + +install_gentoo_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + # Ensure upstart configs / systemd units are loaded + if __check_command_exists systemctl ; then + systemctl daemon-reload + fi + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if __check_command_exists systemctl ; then + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + else + # Disable stdin to fix shell session hang on killing tee pipe + rc-service salt-$fname stop < /dev/null > /dev/null 2>&1 + rc-service salt-$fname start < /dev/null || return 1 + fi + done + + return 0 +} + +install_gentoo_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if __check_command_exists systemctl ; then + __check_services_systemd salt-$fname || return 1 + else + __check_services_openrc salt-$fname || return 1 + fi + done + + return 0 +} +# +# End of Gentoo Install Functions. +# +####################################################################################################################### + +####################################################################################################################### +# +# VoidLinux Install Functions +# +install_voidlinux_stable_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + xbps-install -Suy || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + xbps-install -Suy "${_EXTRA_PACKAGES}" || return 1 + fi + + return 0 +} + +install_voidlinux_stable() { + xbps-install -Suy salt || return 1 + return 0 +} + +install_voidlinux_stable_post() { + for fname in master minion syndic; do + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + ln -s /etc/sv/salt-$fname /var/service/. + done +} + +install_voidlinux_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in master minion syndic; do + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + sv restart salt-$fname + done +} + +install_voidlinux_check_services() { + for fname in master minion syndic; do + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + [ -e /var/service/salt-$fname ] || return 1 + done + + return 0 +} + +daemons_running_voidlinux() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + FAILED_DAEMONS=0 + for fname in master minion syndic; do + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ "$(sv status salt-$fname | grep run)" = "" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} +# +# Ended VoidLinux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# OS X / Darwin Install Functions +# + +__macosx_get_packagesite() { + DARWIN_ARCH="x86_64" + + __PY_VERSION_REPO="py2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + PKG="salt-${STABLE_REV}-${__PY_VERSION_REPO}-${DARWIN_ARCH}.pkg" + SALTPKGCONFURL="https://repo.saltproject.io/osx/${PKG}" +} + +# Using a separate conf step to head for idempotent install... +__configure_macosx_pkg_details() { + __macosx_get_packagesite || return 1 + return 0 +} + +install_macosx_stable_deps() { + __configure_macosx_pkg_details || return 1 + return 0 +} + +install_macosx_git_deps() { + install_macosx_stable_deps || return 1 + + if ! echo "$PATH" | grep -q /usr/local/bin; then + echowarn "/usr/local/bin was not found in \$PATH. Adding it for the duration of the script execution." + export PATH=/usr/local/bin:$PATH + fi + + __fetch_url "/tmp/get-pip.py" "https://bootstrap.pypa.io/get-pip.py" || return 1 + + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python2.7 + fi + + # Install PIP + $_PYEXE /tmp/get-pip.py || return 1 + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + return 0 + fi + + __PIP_REQUIREMENTS="dev_python27.txt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PIP_REQUIREMENTS="dev_python34.txt" + fi + + requirements_file="${_SALT_GIT_CHECKOUT_DIR}/requirements/${__PIP_REQUIREMENTS}" + pip install -U -r "${requirements_file}" --install-option="--prefix=/opt/salt" || return 1 + + return 0 +} + +install_macosx_stable() { + install_macosx_stable_deps || return 1 + + __fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1 + + /usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1 + + return 0 +} + +install_macosx_git() { + + if [ -n "$_PY_EXE" ]; then + _PYEXE=${_PY_EXE} + else + _PYEXE=python2.7 + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then + __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 + return 0 + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + $_PYEXE setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/opt/salt || return 1 + else + $_PYEXE setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/opt/salt || return 1 + fi + + return 0 +} + +install_macosx_stable_post() { + if [ ! -f /etc/paths.d/salt ]; then + print "%s\n" "/opt/salt/bin" "/usr/local/sbin" > /etc/paths.d/salt + fi + + # Don'f fail because of unknown variable on the next step + set +o nounset + # shellcheck disable=SC1091 + . /etc/profile + # Revert nounset to it's previous state + set -o nounset + + return 0 +} + +install_macosx_git_post() { + install_macosx_stable_post || return 1 + return 0 +} + +install_macosx_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + + return 0 +} +# +# Ended OS X / Darwin Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Default minion configuration function. Matches ANY distribution as long as +# the -c options is passed. +# +config_salt() { + # If the configuration directory is not passed, return + [ "$_TEMP_CONFIG_DIR" = "null" ] && return + + if [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then + echowarn "Passing -C (config only) option implies -F (forced overwrite)." + + if [ "$_FORCE_OVERWRITE" -ne $BS_TRUE ]; then + echowarn "Overwriting configs in 11 seconds!" + sleep 11 + _FORCE_OVERWRITE=$BS_TRUE + fi + fi + + # Let's create the necessary directories + [ -d "$_SALT_ETC_DIR" ] || mkdir "$_SALT_ETC_DIR" || return 1 + [ -d "$_PKI_DIR" ] || (mkdir -p "$_PKI_DIR" && chmod 700 "$_PKI_DIR") || return 1 + + # If -C or -F was passed, we don't need a .bak file for the config we're updating + # This is used in the custom master/minion config file checks below + CREATE_BAK=$BS_TRUE + if [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + CREATE_BAK=$BS_FALSE + fi + + CONFIGURED_ANYTHING=$BS_FALSE + + # Copy the grains file if found + if [ -f "$_TEMP_CONFIG_DIR/grains" ]; then + echodebug "Moving provided grains file from $_TEMP_CONFIG_DIR/grains to $_SALT_ETC_DIR/grains" + __movefile "$_TEMP_CONFIG_DIR/grains" "$_SALT_ETC_DIR/grains" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + if [ "$_INSTALL_MINION" -eq $BS_TRUE ] || \ + [ "$_CONFIG_ONLY" -eq $BS_TRUE ] || [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + # Create the PKI directory + [ -d "$_PKI_DIR/minion" ] || (mkdir -p "$_PKI_DIR/minion" && chmod 700 "$_PKI_DIR/minion") || return 1 + + # Check to see if a custom minion config json dict was provided + if [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + + # Check if a minion config file already exists and move to .bak if needed + if [ -f "$_SALT_ETC_DIR/minion" ] && [ "$CREATE_BAK" -eq "$BS_TRUE" ]; then + __movefile "$_SALT_ETC_DIR/minion" "$_SALT_ETC_DIR/minion.bak" $BS_TRUE || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Overwrite/create the config file with the yaml string + __overwriteconfig "$_SALT_ETC_DIR/minion" "$_CUSTOM_MINION_CONFIG" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + + # Copy the minions configuration if found + # Explicitly check for custom master config to avoid moving the minion config + elif [ -f "$_TEMP_CONFIG_DIR/minion" ] && [ "$_CUSTOM_MASTER_CONFIG" = "null" ]; then + __movefile "$_TEMP_CONFIG_DIR/minion" "$_SALT_ETC_DIR" "$_FORCE_OVERWRITE" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Copy the minion's keys if found + if [ -f "$_TEMP_CONFIG_DIR/minion.pem" ]; then + __movefile "$_TEMP_CONFIG_DIR/minion.pem" "$_PKI_DIR/minion/" "$_FORCE_OVERWRITE" || return 1 + chmod 400 "$_PKI_DIR/minion/minion.pem" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + if [ -f "$_TEMP_CONFIG_DIR/minion.pub" ]; then + __movefile "$_TEMP_CONFIG_DIR/minion.pub" "$_PKI_DIR/minion/" "$_FORCE_OVERWRITE" || return 1 + chmod 664 "$_PKI_DIR/minion/minion.pub" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + # For multi-master-pki, copy the master_sign public key if found + if [ -f "$_TEMP_CONFIG_DIR/master_sign.pub" ]; then + __movefile "$_TEMP_CONFIG_DIR/master_sign.pub" "$_PKI_DIR/minion/" || return 1 + chmod 664 "$_PKI_DIR/minion/master_sign.pub" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + fi + + # only (re)place master or syndic configs if -M (install master) or -S + # (install syndic) specified + OVERWRITE_MASTER_CONFIGS=$BS_FALSE + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then + OVERWRITE_MASTER_CONFIGS=$BS_TRUE + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then + OVERWRITE_MASTER_CONFIGS=$BS_TRUE + fi + + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ] || [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ] || [ "$OVERWRITE_MASTER_CONFIGS" -eq $BS_TRUE ] || [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then + # Create the PKI directory + [ -d "$_PKI_DIR/master" ] || (mkdir -p "$_PKI_DIR/master" && chmod 700 "$_PKI_DIR/master") || return 1 + + # Check to see if a custom master config json dict was provided + if [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then + + # Check if a master config file already exists and move to .bak if needed + if [ -f "$_SALT_ETC_DIR/master" ] && [ "$CREATE_BAK" -eq "$BS_TRUE" ]; then + __movefile "$_SALT_ETC_DIR/master" "$_SALT_ETC_DIR/master.bak" $BS_TRUE || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Overwrite/create the config file with the yaml string + __overwriteconfig "$_SALT_ETC_DIR/master" "$_CUSTOM_MASTER_CONFIG" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + + # Copy the masters configuration if found + elif [ -f "$_TEMP_CONFIG_DIR/master" ]; then + __movefile "$_TEMP_CONFIG_DIR/master" "$_SALT_ETC_DIR" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + + # Copy the master's keys if found + if [ -f "$_TEMP_CONFIG_DIR/master.pem" ]; then + __movefile "$_TEMP_CONFIG_DIR/master.pem" "$_PKI_DIR/master/" || return 1 + chmod 400 "$_PKI_DIR/master/master.pem" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + if [ -f "$_TEMP_CONFIG_DIR/master.pub" ]; then + __movefile "$_TEMP_CONFIG_DIR/master.pub" "$_PKI_DIR/master/" || return 1 + chmod 664 "$_PKI_DIR/master/master.pub" || return 1 + CONFIGURED_ANYTHING=$BS_TRUE + fi + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + # Recursively copy salt-cloud configs with overwriting if necessary + for file in "$_TEMP_CONFIG_DIR"/cloud*; do + if [ -f "$file" ]; then + __copyfile "$file" "$_SALT_ETC_DIR" || return 1 + elif [ -d "$file" ]; then + subdir="$(basename "$file")" + mkdir -p "$_SALT_ETC_DIR/$subdir" + for file_d in "$_TEMP_CONFIG_DIR/$subdir"/*; do + if [ -f "$file_d" ]; then + __copyfile "$file_d" "$_SALT_ETC_DIR/$subdir" || return 1 + fi + done + fi + done + fi + + if [ "$_CONFIG_ONLY" -eq $BS_TRUE ] && [ $CONFIGURED_ANYTHING -eq $BS_FALSE ]; then + echowarn "No configuration or keys were copied over. No configuration was done!" + exit 0 + fi + + return 0 +} +# +# Ended Default Configuration function +# +####################################################################################################################### + +####################################################################################################################### +# +# Default salt master minion keys pre-seed function. Matches ANY distribution +# as long as the -k option is passed. +# +preseed_master() { + # Create the PKI directory + + if [ "$(find "$_TEMP_KEYS_DIR" -maxdepth 1 -type f | wc -l)" -lt 1 ]; then + echoerror "No minion keys were uploaded. Unable to pre-seed master" + return 1 + fi + + SEED_DEST="$_PKI_DIR/master/minions" + [ -d "$SEED_DEST" ] || (mkdir -p "$SEED_DEST" && chmod 700 "$SEED_DEST") || return 1 + + for keyfile in "$_TEMP_KEYS_DIR"/*; do + keyfile=$(basename "${keyfile}") + src_keyfile="${_TEMP_KEYS_DIR}/${keyfile}" + dst_keyfile="${SEED_DEST}/${keyfile}" + + # If it's not a file, skip to the next + [ ! -f "$src_keyfile" ] && continue + + __movefile "$src_keyfile" "$dst_keyfile" || return 1 + chmod 664 "$dst_keyfile" || return 1 + done + + return 0 +} +# +# Ended Default Salt Master Pre-Seed minion keys function +# +####################################################################################################################### + +####################################################################################################################### +# +# This function checks if all of the installed daemons are running or not. +# +daemons_running() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + FAILED_DAEMONS=0 + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # shellcheck disable=SC2009 + if [ "${DISTRO_NAME}" = "SmartOS" ]; then + if [ "$(svcs -Ho STA salt-$fname)" != "ON" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + elif [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then + echoerror "salt-$fname was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} +# +# Ended daemons running check function +# +####################################################################################################################### + +#====================================================================================================================== +# LET'S PROCEED WITH OUR INSTALLATION +#====================================================================================================================== + +# Let's get the dependencies install function +DEP_FUNC_NAMES="" +if [ ${_NO_DEPS} -eq $BS_FALSE ]; then + DEP_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_deps" + DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}_deps" +fi + +DEPS_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$DEP_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + DEPS_INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "DEPS_INSTALL_FUNC=${DEPS_INSTALL_FUNC}" + +# Let's get the Salt config function +CONFIG_FUNC_NAMES="config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}_${ITYPE}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}_salt" +CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_salt" + +CONFIG_SALT_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$CONFIG_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + CONFIG_SALT_FUNC="$FUNC_NAME" + break + fi +done +echodebug "CONFIG_SALT_FUNC=${CONFIG_SALT_FUNC}" + +# Let's get the pre-seed master function +PRESEED_FUNC_NAMES="preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}_${ITYPE}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}_master" +PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_master" + +PRESEED_MASTER_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$PRESEED_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + PRESEED_MASTER_FUNC="$FUNC_NAME" + break + fi +done +echodebug "PRESEED_MASTER_FUNC=${PRESEED_MASTER_FUNC}" + +# Let's get the install function +INSTALL_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" +INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" +INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}" + +INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$INSTALL_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "INSTALL_FUNC=${INSTALL_FUNC}" + +# Let's get the post install function +POST_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_post" +POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}_post" + +POST_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$POST_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + POST_INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "POST_INSTALL_FUNC=${POST_INSTALL_FUNC}" + +# Let's get the start daemons install function +STARTDAEMONS_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_restart_daemons" +STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}_restart_daemons" + +STARTDAEMONS_INSTALL_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$STARTDAEMONS_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + STARTDAEMONS_INSTALL_FUNC="$FUNC_NAME" + break + fi +done +echodebug "STARTDAEMONS_INSTALL_FUNC=${STARTDAEMONS_INSTALL_FUNC}" + +# Let's get the daemons running check function. +DAEMONS_RUNNING_FUNC_NAMES="daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}_${ITYPE}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running" + +DAEMONS_RUNNING_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$DAEMONS_RUNNING_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + DAEMONS_RUNNING_FUNC="$FUNC_NAME" + break + fi +done +echodebug "DAEMONS_RUNNING_FUNC=${DAEMONS_RUNNING_FUNC}" + +# Let's get the check services function +if [ ${_DISABLE_SALT_CHECKS} -eq $BS_FALSE ]; then + CHECK_SERVICES_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_check_services" + CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_check_services" +else + CHECK_SERVICES_FUNC_NAMES="" +fi + +CHECK_SERVICES_FUNC="null" +for FUNC_NAME in $(__strip_duplicates "$CHECK_SERVICES_FUNC_NAMES"); do + if __function_defined "$FUNC_NAME"; then + CHECK_SERVICES_FUNC="$FUNC_NAME" + break + fi +done +echodebug "CHECK_SERVICES_FUNC=${CHECK_SERVICES_FUNC}" + +if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ "$DEPS_INSTALL_FUNC" = "null" ]; then + echoerror "No dependencies installation function found. Exiting..." + exit 1 +fi + +if [ "$INSTALL_FUNC" = "null" ]; then + echoerror "No installation function found. Exiting..." + exit 1 +fi + + +# Install dependencies +if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ $_CONFIG_ONLY -eq $BS_FALSE ]; then + # Only execute function is not in config mode only + echoinfo "Running ${DEPS_INSTALL_FUNC}()" + if ! ${DEPS_INSTALL_FUNC}; then + echoerror "Failed to run ${DEPS_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + + +if [ "${ITYPE}" = "git" ] && [ ${_NO_DEPS} -eq ${BS_TRUE} ]; then + if ! __git_clone_and_checkout; then + echo "Failed to clone and checkout git repository." + exit 1 + fi +fi + + +# Triggering config_salt() if overwriting master or minion configs +if [ "$_CUSTOM_MASTER_CONFIG" != "null" ] || [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="$_SALT_ETC_DIR" + fi + + if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ $_CONFIG_ONLY -eq $BS_TRUE ]; then + # Execute function to satisfy dependencies for configuration step + echoinfo "Running ${DEPS_INSTALL_FUNC}()" + if ! ${DEPS_INSTALL_FUNC}; then + echoerror "Failed to run ${DEPS_INSTALL_FUNC}()!!!" + exit 1 + fi + fi +fi + +# Configure Salt +if [ "$CONFIG_SALT_FUNC" != "null" ] && [ "$_TEMP_CONFIG_DIR" != "null" ]; then + echoinfo "Running ${CONFIG_SALT_FUNC}()" + if ! ${CONFIG_SALT_FUNC}; then + echoerror "Failed to run ${CONFIG_SALT_FUNC}()!!!" + exit 1 + fi +fi + +# Drop the master address if passed +if [ "$_SALT_MASTER_ADDRESS" != "null" ]; then + [ ! -d "$_SALT_ETC_DIR/minion.d" ] && mkdir -p "$_SALT_ETC_DIR/minion.d" + cat <<_eof > "$_SALT_ETC_DIR/minion.d/99-master-address.conf" +master: $_SALT_MASTER_ADDRESS +_eof +fi + +# Drop the minion id if passed +if [ "$_SALT_MINION_ID" != "null" ]; then + [ ! -d "$_SALT_ETC_DIR" ] && mkdir -p "$_SALT_ETC_DIR" + echo "$_SALT_MINION_ID" > "$_SALT_ETC_DIR/minion_id" +fi + +# Pre-seed master keys +if [ "$PRESEED_MASTER_FUNC" != "null" ] && [ "$_TEMP_KEYS_DIR" != "null" ]; then + echoinfo "Running ${PRESEED_MASTER_FUNC}()" + if ! ${PRESEED_MASTER_FUNC}; then + echoerror "Failed to run ${PRESEED_MASTER_FUNC}()!!!" + exit 1 + fi +fi + +# Install Salt +if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + # Only execute function is not in config mode only + echoinfo "Running ${INSTALL_FUNC}()" + if ! ${INSTALL_FUNC}; then + echoerror "Failed to run ${INSTALL_FUNC}()!!!" + exit 1 + fi +fi + +# Run any post install function. Only execute function if not in config mode only +if [ "$POST_INSTALL_FUNC" != "null" ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Running ${POST_INSTALL_FUNC}()" + if ! ${POST_INSTALL_FUNC}; then + echoerror "Failed to run ${POST_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + +# Run any check services function, Only execute function if not in config mode only +if [ "$CHECK_SERVICES_FUNC" != "null" ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Running ${CHECK_SERVICES_FUNC}()" + if ! ${CHECK_SERVICES_FUNC}; then + echoerror "Failed to run ${CHECK_SERVICES_FUNC}()!!!" + exit 1 + fi +fi + +# Run any start daemons function +if [ "$STARTDAEMONS_INSTALL_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; then + echoinfo "Running ${STARTDAEMONS_INSTALL_FUNC}()" + echodebug "Waiting ${_SLEEP} seconds for processes to settle before checking for them" + sleep ${_SLEEP} + if ! ${STARTDAEMONS_INSTALL_FUNC}; then + echoerror "Failed to run ${STARTDAEMONS_INSTALL_FUNC}()!!!" + exit 1 + fi +fi + +# Check if the installed daemons are running or not +if [ "$DAEMONS_RUNNING_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; then + echoinfo "Running ${DAEMONS_RUNNING_FUNC}()" + echodebug "Waiting ${_SLEEP} seconds for processes to settle before checking for them" + sleep ${_SLEEP} # Sleep a little bit to let daemons start + if ! ${DAEMONS_RUNNING_FUNC}; then + echoerror "Failed to run ${DAEMONS_RUNNING_FUNC}()!!!" + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ "$_ECHO_DEBUG" -eq $BS_FALSE ]; then + echoerror "salt-$fname was not found running. Pass '-D' to ${__ScriptName} when bootstrapping for additional debugging information..." + continue + fi + + [ ! -f "$_SALT_ETC_DIR/$fname" ] && [ $fname != "syndic" ] && echodebug "$_SALT_ETC_DIR/$fname does not exist" + + echodebug "Running salt-$fname by hand outputs: $(nohup salt-$fname -l debug)" + + [ ! -f /var/log/salt/$fname ] && echodebug "/var/log/salt/$fname does not exist. Can't cat its contents!" && continue + + echodebug "DAEMON LOGS for $fname:" + echodebug "$(cat /var/log/salt/$fname)" + echo + done + + echodebug "Running Processes:" + echodebug "$(ps auxwww)" + + exit 1 + fi +fi + +# Done! +if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then + echoinfo "Salt installed!" +else + echoinfo "Salt configured!" +fi + +exit 0 + +# vim: set sts=4 ts=4 et From a671ac387a39a49d384323239692df3857478596 Mon Sep 17 00:00:00 2001 From: defensivedepth Date: Thu, 12 Oct 2023 09:45:20 -0400 Subject: [PATCH 183/417] Add hotfix changes --- HOTFIX | 2 +- salt/manager/tools/sbin/soup | 11 +++++++---- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/HOTFIX b/HOTFIX index d3f5a12fa..afd2e4c40 100644 --- a/HOTFIX +++ b/HOTFIX @@ -1 +1 @@ - +20231012 diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 960c50f31..fa714cda4 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -691,13 +691,16 @@ verify_latest_update_script() { # Keeping this block in case we need to do a hotfix that requires salt update apply_hotfix() { -# if [[ "$INSTALLEDVERSION" == "2.3.90" ]] ; then -# fix_wazuh + if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then + salt-call state.appply elastic-fleet -l info queue=True + . /usr/sbin/so-elastic-fleet-common + elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints + /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend # elif [[ "$INSTALLEDVERSION" == "2.3.110" ]] ; then # 2_3_10_hotfix_1 -# else + else echo "No actions required. ($INSTALLEDVERSION/$HOTFIXVERSION)" -# fi + fi } From 967138cdff029ff5d0be3dac016be1cc2e7d9b13 Mon Sep 17 00:00:00 2001 From: defensivedepth Date: Thu, 12 Oct 2023 10:54:26 -0400 Subject: [PATCH 184/417] Apply state correctly --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index fa714cda4..d128a7c4d 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -692,7 +692,7 @@ verify_latest_update_script() { # Keeping this block in case we need to do a hotfix that requires salt update apply_hotfix() { if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then - salt-call state.appply elastic-fleet -l info queue=True + salt-call state.apply elastic-fleet -l info queue=True . /usr/sbin/so-elastic-fleet-common elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend From 98eab906aff0d269bc92ed02c721afa02408a3ac Mon Sep 17 00:00:00 2001 From: defensivedepth Date: Thu, 12 Oct 2023 11:00:24 -0400 Subject: [PATCH 185/417] Apply named state --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index d128a7c4d..375d48209 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -692,7 +692,7 @@ verify_latest_update_script() { # Keeping this block in case we need to do a hotfix that requires salt update apply_hotfix() { if [[ "$INSTALLEDVERSION" == "2.4.20" ]] ; then - salt-call state.apply elastic-fleet -l info queue=True + salt-call state.apply elasticfleet -l info queue=True . /usr/sbin/so-elastic-fleet-common elastic_fleet_integration_remove endpoints-initial elastic-defend-endpoints /usr/sbin/so-elastic-fleet-integration-policy-elastic-defend From 8dc163f0748b22a2cf521208f006712e872797c7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 12 Oct 2023 13:09:07 -0400 Subject: [PATCH 186/417] use script from develop branch --- salt/salt/scripts/bootstrap-salt.sh | 25 ++++++++++++++++++------- salt/salt/scripts/bs_dev | 0 salt/salt/scripts/bs_stable | 0 3 files changed, 18 insertions(+), 7 deletions(-) create mode 100644 salt/salt/scripts/bs_dev create mode 100644 salt/salt/scripts/bs_stable diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh index ace3bce26..422e9395f 100644 --- a/salt/salt/scripts/bootstrap-salt.sh +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -596,6 +596,7 @@ fi echoinfo "Running version: ${__ScriptVersion}" echoinfo "Executed by: ${CALLER}" echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'" +echowarn "Running the unstable version of ${__ScriptName}" # Define installation type if [ "$#" -gt 0 ];then @@ -1523,7 +1524,7 @@ __check_dpkg_architecture() { else # Saltstack official repository has arm64 metadata beginning with Debian 11, # use amd64 repositories on arm64 for anything older, since all pkgs are arch-independent - if [ "$DISTRO_NAME_L" = "debian" ] || [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then + if [ "$DISTRO_NAME_L" = "debian" ] && [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then __REPO_ARCH="amd64" else __REPO_ARCH="arm64" @@ -1709,6 +1710,14 @@ __debian_codename_translation() { "11") DISTRO_CODENAME="bullseye" ;; + "12") + DISTRO_CODENAME="bookworm" + # FIXME - TEMPORARY + # use bullseye packages until bookworm packages are available + DISTRO_CODENAME="bullseye" + DISTRO_MAJOR_VERSION=11 + rv=11 + ;; *) DISTRO_CODENAME="stretch" ;; @@ -2196,7 +2205,7 @@ __dnf_install_noinput() { #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __tdnf_install_noinput -# DESCRIPTION: (DRY) dnf install with noinput options +# DESCRIPTION: (DRY) tdnf install with noinput options #---------------------------------------------------------------------------------------------------------------------- __tdnf_install_noinput() { @@ -7033,15 +7042,17 @@ install_photon_git_deps() { "${__python}" -m pip install "${dep}" || return 1 done else - __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc glibc-devel linux-devel.x86_64" # shellcheck disable=SC2086 __tdnf_install_noinput ${__PACKAGES} || return 1 fi - # Need newer version of setuptools on Photon - _setuptools_dep="setuptools>=${_MINIMUM_SETUPTOOLS_VERSION}" - echodebug "Running '${_PY_EXE} -m pip --upgrade install ${_setuptools_dep}'" - ${_PY_EXE} -m pip install --upgrade "${_setuptools_dep}" + if [ "${DISTRO_MAJOR_VERSION}" -gt 3 ]; then + # Need newer version of setuptools on Photon + _setuptools_dep="setuptools>=${_MINIMUM_SETUPTOOLS_VERSION}" + echodebug "Running '${_PY_EXE} -m pip --upgrade install ${_setuptools_dep}'" + ${_PY_EXE} -m pip install --upgrade "${_setuptools_dep}" + fi # Let's trigger config_salt() if [ "$_TEMP_CONFIG_DIR" = "null" ]; then diff --git a/salt/salt/scripts/bs_dev b/salt/salt/scripts/bs_dev new file mode 100644 index 000000000..e69de29bb diff --git a/salt/salt/scripts/bs_stable b/salt/salt/scripts/bs_stable new file mode 100644 index 000000000..e69de29bb From 17ae9b33498c6d8ebaf4b503044a5c892ca77ace Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 12 Oct 2023 13:54:07 -0400 Subject: [PATCH 187/417] avoid reboot during testing --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index e35dde579..17c62af81 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -91,7 +91,7 @@ fi # if packages are updated and the box isn't rebooted if [[ $is_debian ]]; then update_packages - if [[ -f "/var/run/reboot-required" ]]; then + if [[ -f "/var/run/reboot-required" ]] && [ -z "$TESTING" ]; then whiptail_debian_reboot_required reboot fi From 6dd06c0fe94b55dd95c6314989a9a2ff87f6deb5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 12 Oct 2023 15:07:47 -0400 Subject: [PATCH 188/417] change install_centos_onedir to install version provided from command line --- salt/salt/scripts/bootstrap-salt.sh | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh index 422e9395f..464243c84 100644 --- a/salt/salt/scripts/bootstrap-salt.sh +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -5126,17 +5126,30 @@ install_centos_onedir_deps() { install_centos_onedir() { __PACKAGES="" + local cloud='salt-cloud' + local master='salt-master' + local minion='salt-minion' + local syndic='salt-syndic' + local ver="$_ONEDIR_REV" + + if [ ! -z $ver ]; then + cloud+="-$ver" + master+="-$ver" + minion+="-$ver" + syndic+="-$ver" + fi + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud" + __PACKAGES="${__PACKAGES} $cloud" fi if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-master" + __PACKAGES="${__PACKAGES} $master" fi if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion" + __PACKAGES="${__PACKAGES} $minion" fi if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-syndic" + __PACKAGES="${__PACKAGES} $syndic" fi # shellcheck disable=SC2086 From b12c4a96e95b0a353c42a2c0b8c0a33f7e3e3f78 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 12 Oct 2023 15:11:25 -0400 Subject: [PATCH 189/417] remove files --- salt/salt/scripts/bootstrap-salt_orig.sh | 8126 ---------------------- salt/salt/scripts/bs_dev | 0 salt/salt/scripts/bs_stable | 0 3 files changed, 8126 deletions(-) delete mode 100644 salt/salt/scripts/bootstrap-salt_orig.sh delete mode 100644 salt/salt/scripts/bs_dev delete mode 100644 salt/salt/scripts/bs_stable diff --git a/salt/salt/scripts/bootstrap-salt_orig.sh b/salt/salt/scripts/bootstrap-salt_orig.sh deleted file mode 100644 index 47d25949c..000000000 --- a/salt/salt/scripts/bootstrap-salt_orig.sh +++ /dev/null @@ -1,8126 +0,0 @@ -#!/bin/sh - - -# WARNING: Changes to this file in the salt repo will be overwritten! -# Please submit pull requests against the salt-bootstrap repo: -# https://github.com/saltstack/salt-bootstrap - -#====================================================================================================================== -# vim: softtabstop=4 shiftwidth=4 expandtab fenc=utf-8 spell spelllang=en cc=120 -#====================================================================================================================== -# -# FILE: bootstrap-salt.sh -# -# DESCRIPTION: Bootstrap Salt installation for various systems/distributions -# -# BUGS: https://github.com/saltstack/salt-bootstrap/issues -# -# COPYRIGHT: (c) 2012-2021 by the SaltStack Team, see AUTHORS.rst for more -# details. -# -# LICENSE: Apache 2.0 -# ORGANIZATION: SaltStack (saltproject.io) -# CREATED: 10/15/2012 09:49:37 PM WEST -#====================================================================================================================== -set -o nounset # Treat unset variables as an error - -__ScriptVersion="2021.09.17" -__ScriptName="bootstrap-salt.sh" - -__ScriptFullName="$0" -__ScriptArgs="$*" - -#====================================================================================================================== -# Environment variables taken into account. -#---------------------------------------------------------------------------------------------------------------------- -# * BS_COLORS: If 0 disables colour support -# * BS_PIP_ALLOWED: If 1 enable pip based installations(if needed) -# * BS_PIP_ALL: If 1 enable all python packages to be installed via pip instead of apt, requires setting virtualenv -# * BS_VIRTUALENV_DIR: The virtualenv to install salt into (shouldn't exist yet) -# * BS_ECHO_DEBUG: If 1 enable debug echo which can also be set by -D -# * BS_SALT_ETC_DIR: Defaults to /etc/salt (Only tweak'able on git based installations) -# * BS_SALT_CACHE_DIR: Defaults to /var/cache/salt (Only tweak'able on git based installations) -# * BS_KEEP_TEMP_FILES: If 1, don't move temporary files, instead copy them -# * BS_FORCE_OVERWRITE: Force overriding copied files(config, init.d, etc) -# * BS_UPGRADE_SYS: If 1 and an option, upgrade system. Default 0. -# * BS_GENTOO_USE_BINHOST: If 1 add `--getbinpkg` to gentoo's emerge -# * BS_SALT_MASTER_ADDRESS: The IP or DNS name of the salt-master the minion should connect to -# * BS_SALT_GIT_CHECKOUT_DIR: The directory where to clone Salt on git installations -#====================================================================================================================== - - -# Bootstrap script truth values -BS_TRUE=1 -BS_FALSE=0 - -# Default sleep time used when waiting for daemons to start, restart and checking for these running -__DEFAULT_SLEEP=3 - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __detect_color_support -# DESCRIPTION: Try to detect color support. -#---------------------------------------------------------------------------------------------------------------------- -_COLORS=${BS_COLORS:-$(tput colors 2>/dev/null || echo 0)} -__detect_color_support() { - # shellcheck disable=SC2181 - if [ $? -eq 0 ] && [ "$_COLORS" -gt 2 ]; then - RC='\033[1;31m' - GC='\033[1;32m' - BC='\033[1;34m' - YC='\033[1;33m' - EC='\033[0m' - else - RC="" - GC="" - BC="" - YC="" - EC="" - fi -} -__detect_color_support - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: echoerr -# DESCRIPTION: Echo errors to stderr. -#---------------------------------------------------------------------------------------------------------------------- -echoerror() { - printf "${RC} * ERROR${EC}: %s\\n" "$@" 1>&2; -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: echoinfo -# DESCRIPTION: Echo information to stdout. -#---------------------------------------------------------------------------------------------------------------------- -echoinfo() { - printf "${GC} * INFO${EC}: %s\\n" "$@"; -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: echowarn -# DESCRIPTION: Echo warning information to stdout. -#---------------------------------------------------------------------------------------------------------------------- -echowarn() { - printf "${YC} * WARN${EC}: %s\\n" "$@"; -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: echodebug -# DESCRIPTION: Echo debug information to stdout. -#---------------------------------------------------------------------------------------------------------------------- -echodebug() { - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - printf "${BC} * DEBUG${EC}: %s\\n" "$@"; - fi -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_command_exists -# DESCRIPTION: Check if a command exists. -#---------------------------------------------------------------------------------------------------------------------- -__check_command_exists() { - command -v "$1" > /dev/null 2>&1 -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_pip_allowed -# DESCRIPTION: Simple function to let the users know that -P needs to be used. -#---------------------------------------------------------------------------------------------------------------------- -__check_pip_allowed() { - if [ $# -eq 1 ]; then - _PIP_ALLOWED_ERROR_MSG=$1 - else - _PIP_ALLOWED_ERROR_MSG="pip based installations were not allowed. Retry using '-P'" - fi - - if [ "$_PIP_ALLOWED" -eq $BS_FALSE ]; then - echoerror "$_PIP_ALLOWED_ERROR_MSG" - __usage - exit 1 - fi -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_config_dir -# DESCRIPTION: Checks the config directory, retrieves URLs if provided. -#---------------------------------------------------------------------------------------------------------------------- -__check_config_dir() { - CC_DIR_NAME="$1" - CC_DIR_BASE=$(basename "${CC_DIR_NAME}") - - case "$CC_DIR_NAME" in - http://*|https://*) - __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" - CC_DIR_NAME="/tmp/${CC_DIR_BASE}" - ;; - ftp://*) - __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" - CC_DIR_NAME="/tmp/${CC_DIR_BASE}" - ;; - *://*) - echoerror "Unsupported URI scheme for $CC_DIR_NAME" - echo "null" - return - ;; - *) - if [ ! -e "${CC_DIR_NAME}" ]; then - echoerror "The configuration directory or archive $CC_DIR_NAME does not exist." - echo "null" - return - fi - ;; - esac - - case "$CC_DIR_NAME" in - *.tgz|*.tar.gz) - tar -zxf "${CC_DIR_NAME}" -C /tmp - CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tgz") - CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.gz") - CC_DIR_NAME="/tmp/${CC_DIR_BASE}" - ;; - *.tbz|*.tar.bz2) - tar -xjf "${CC_DIR_NAME}" -C /tmp - CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tbz") - CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.bz2") - CC_DIR_NAME="/tmp/${CC_DIR_BASE}" - ;; - *.txz|*.tar.xz) - tar -xJf "${CC_DIR_NAME}" -C /tmp - CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".txz") - CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.xz") - CC_DIR_NAME="/tmp/${CC_DIR_BASE}" - ;; - esac - - echo "${CC_DIR_NAME}" -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_unparsed_options -# DESCRIPTION: Checks the placed after the install arguments -#---------------------------------------------------------------------------------------------------------------------- -__check_unparsed_options() { - shellopts="$1" - # grep alternative for SunOS - if [ -f /usr/xpg4/bin/grep ]; then - grep='/usr/xpg4/bin/grep' - else - grep='grep' - fi - unparsed_options=$( echo "$shellopts" | ${grep} -E '(^|[[:space:]])[-]+[[:alnum:]]' ) - if [ "$unparsed_options" != "" ]; then - __usage - echo - echoerror "options are only allowed before install arguments" - echo - exit 1 - fi -} - - -#---------------------------------------------------------------------------------------------------------------------- -# Handle command line arguments -#---------------------------------------------------------------------------------------------------------------------- -_KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE} -_TEMP_CONFIG_DIR="null" -_SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git" -_SALT_REPO_URL=${_SALTSTACK_REPO_URL} -_DOWNSTREAM_PKG_REPO=$BS_FALSE -_TEMP_KEYS_DIR="null" -_SLEEP="${__DEFAULT_SLEEP}" -_INSTALL_MASTER=$BS_FALSE -_INSTALL_SYNDIC=$BS_FALSE -_INSTALL_MINION=$BS_TRUE -_INSTALL_CLOUD=$BS_FALSE -_VIRTUALENV_DIR=${BS_VIRTUALENV_DIR:-"null"} -_START_DAEMONS=$BS_TRUE -_DISABLE_SALT_CHECKS=$BS_FALSE -_ECHO_DEBUG=${BS_ECHO_DEBUG:-$BS_FALSE} -_CONFIG_ONLY=$BS_FALSE -_PIP_ALLOWED=${BS_PIP_ALLOWED:-$BS_FALSE} -_PIP_ALL=${BS_PIP_ALL:-$BS_FALSE} -_SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/etc/salt} -_SALT_CACHE_DIR=${BS_SALT_CACHE_DIR:-/var/cache/salt} -_PKI_DIR=${_SALT_ETC_DIR}/pki -_FORCE_OVERWRITE=${BS_FORCE_OVERWRITE:-$BS_FALSE} -_GENTOO_USE_BINHOST=${BS_GENTOO_USE_BINHOST:-$BS_FALSE} -_EPEL_REPO=${BS_EPEL_REPO:-epel} -_EPEL_REPOS_INSTALLED=$BS_FALSE -_UPGRADE_SYS=${BS_UPGRADE_SYS:-$BS_FALSE} -_INSECURE_DL=${BS_INSECURE_DL:-$BS_FALSE} -_CURL_ARGS=${BS_CURL_ARGS:-} -_FETCH_ARGS=${BS_FETCH_ARGS:-} -_GPG_ARGS=${BS_GPG_ARGS:-} -_WGET_ARGS=${BS_WGET_ARGS:-} -_SALT_MASTER_ADDRESS=${BS_SALT_MASTER_ADDRESS:-null} -_SALT_MINION_ID="null" -# _SIMPLIFY_VERSION is mostly used in Solaris based distributions -_SIMPLIFY_VERSION=$BS_TRUE -_LIBCLOUD_MIN_VERSION="0.14.0" -_EXTRA_PACKAGES="" -_HTTP_PROXY="" -_SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-/tmp/git/salt} -_NO_DEPS=$BS_FALSE -_FORCE_SHALLOW_CLONE=$BS_FALSE -_DISABLE_SSL=$BS_FALSE -_DISABLE_REPOS=$BS_FALSE -_CUSTOM_REPO_URL="null" -_CUSTOM_MASTER_CONFIG="null" -_CUSTOM_MINION_CONFIG="null" -_QUIET_GIT_INSTALLATION=$BS_FALSE -_REPO_URL="repo.saltproject.io" -_PY_EXE="python3" -_INSTALL_PY="$BS_FALSE" -_TORNADO_MAX_PY3_VERSION="5.0" -_POST_NEON_INSTALL=$BS_FALSE -_MINIMUM_PIP_VERSION="9.0.1" -_MINIMUM_SETUPTOOLS_VERSION="9.1" -_POST_NEON_PIP_INSTALL_ARGS="--prefix=/usr" - -# Defaults for install arguments -ITYPE="stable" - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __usage -# DESCRIPTION: Display usage information. -#---------------------------------------------------------------------------------------------------------------------- -__usage() { - cat << EOT - - Usage : ${__ScriptName} [options] [install-type-args] - - Installation types: - - stable Install latest stable release. This is the default - install type - - stable [branch] Install latest version on a branch. Only supported - for packages available at repo.saltproject.io - - stable [version] Install a specific version. Only supported for - packages available at repo.saltproject.io - To pin a 3xxx minor version, specify it as 3xxx.0 - - testing RHEL-family specific: configure EPEL testing repo - - git Install from the head of the master branch - - git [ref] Install from any git ref (such as a branch, tag, or - commit) - - Examples: - - ${__ScriptName} - - ${__ScriptName} stable - - ${__ScriptName} stable 2017.7 - - ${__ScriptName} stable 2017.7.2 - - ${__ScriptName} testing - - ${__ScriptName} git - - ${__ScriptName} git 2017.7 - - ${__ScriptName} git v2017.7.2 - - ${__ScriptName} git 06f249901a2e2f1ed310d58ea3921a129f214358 - - Options: - -h Display this message - -v Display script version - -n No colours - -D Show debug output - -c Temporary configuration directory - -g Salt Git repository URL. Default: ${_SALTSTACK_REPO_URL} - -w Install packages from downstream package repository rather than - upstream, saltstack package repository. This is currently only - implemented for SUSE. - -k Temporary directory holding the minion keys which will pre-seed - the master. - -s Sleep time used when waiting for daemons to start, restart and when - checking for the services running. Default: ${__DEFAULT_SLEEP} - -L Also install salt-cloud and required python-libcloud package - -M Also install salt-master - -S Also install salt-syndic - -N Do not install salt-minion - -X Do not start daemons after installation - -d Disables checking if Salt services are enabled to start on system boot. - You can also do this by touching /tmp/disable_salt_checks on the target - host. Default: \${BS_FALSE} - -P Allow pip based installations. On some distributions the required salt - packages or its dependencies are not available as a package for that - distribution. Using this flag allows the script to use pip as a last - resort method. NOTE: This only works for functions which actually - implement pip based installations. - -U If set, fully upgrade the system prior to bootstrapping Salt - -I If set, allow insecure connections while downloading any files. For - example, pass '--no-check-certificate' to 'wget' or '--insecure' to - 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining - GnuPG archive keys insecurely if distro has changed release signatures. - -F Allow copied files to overwrite existing (config, init.d, etc) - -K If set, keep the temporary files in the temporary directories specified - with -c and -k - -C Only run the configuration function. Implies -F (forced overwrite). - To overwrite Master or Syndic configs, -M or -S, respectively, must - also be specified. Salt installation will be ommitted, but some of the - dependencies could be installed to write configuration with -j or -J. - -A Pass the salt-master DNS name or IP. This will be stored under - \${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf - -i Pass the salt-minion id. This will be stored under - \${BS_SALT_ETC_DIR}/minion_id - -p Extra-package to install while installing Salt dependencies. One package - per -p flag. You are responsible for providing the proper package name. - -H Use the specified HTTP proxy for all download URLs (including https://). - For example: http://myproxy.example.com:3128 - -b Assume that dependencies are already installed and software sources are - set up. If git is selected, git tree is still checked out as dependency - step. - -f Force shallow cloning for git installations. - This may result in an "n/a" in the version number. - -l Disable ssl checks. When passed, switches "https" calls to "http" where - possible. - -V Install Salt into virtualenv - (only available for Ubuntu based distributions) - -a Pip install all Python pkg dependencies for Salt. Requires -V to install - all pip pkgs into the virtualenv. - (Only available for Ubuntu based distributions) - -r Disable all repository configuration performed by this script. This - option assumes all necessary repository configuration is already present - on the system. - -R Specify a custom repository URL. Assumes the custom repository URL - points to a repository that mirrors Salt packages located at - repo.saltproject.io. The option passed with -R replaces the - "repo.saltproject.io". If -R is passed, -r is also set. Currently only - works on CentOS/RHEL and Debian based distributions. - -J Replace the Master config file with data passed in as a JSON string. If - a Master config file is found, a reasonable effort will be made to save - the file with a ".bak" extension. If used in conjunction with -C or -F, - no ".bak" file will be created as either of those options will force - a complete overwrite of the file. - -j Replace the Minion config file with data passed in as a JSON string. If - a Minion config file is found, a reasonable effort will be made to save - the file with a ".bak" extension. If used in conjunction with -C or -F, - no ".bak" file will be created as either of those options will force - a complete overwrite of the file. - -q Quiet salt installation from git (setup.py install -q) - -x Changes the Python version used to install Salt. - For CentOS 6 git installations python2.7 is supported. - Fedora git installation, CentOS 7, Debian 9, Ubuntu 16.04 and 18.04 support python3. - -y Installs a different python version on host. Currently this has only been - tested with CentOS 6 and is considered experimental. This will install the - ius repo on the box if disable repo is false. This must be used in conjunction - with -x . For example: - sh bootstrap.sh -P -y -x python2.7 git v2017.7.2 - The above will install python27 and install the git version of salt using the - python2.7 executable. This only works for git and pip installations. - -EOT -} # ---------- end of function __usage ---------- - - -while getopts ':hvnDc:g:Gyx:wk:s:MSNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aq' opt -do - case "${opt}" in - - h ) __usage; exit 0 ;; - v ) echo "$0 -- Version $__ScriptVersion"; exit 0 ;; - n ) _COLORS=0; __detect_color_support ;; - D ) _ECHO_DEBUG=$BS_TRUE ;; - c ) _TEMP_CONFIG_DIR="$OPTARG" ;; - g ) _SALT_REPO_URL=$OPTARG ;; - - G ) echowarn "The '-G' option is DEPRECATED and will be removed in the future stable release!" - echowarn "Bootstrap will always use 'https' protocol to clone from SaltStack GitHub repo." - echowarn "No need to provide this option anymore, now it is a default behavior." - ;; - - w ) _DOWNSTREAM_PKG_REPO=$BS_TRUE ;; - k ) _TEMP_KEYS_DIR="$OPTARG" ;; - s ) _SLEEP=$OPTARG ;; - M ) _INSTALL_MASTER=$BS_TRUE ;; - S ) _INSTALL_SYNDIC=$BS_TRUE ;; - N ) _INSTALL_MINION=$BS_FALSE ;; - X ) _START_DAEMONS=$BS_FALSE ;; - C ) _CONFIG_ONLY=$BS_TRUE ;; - P ) _PIP_ALLOWED=$BS_TRUE ;; - F ) _FORCE_OVERWRITE=$BS_TRUE ;; - U ) _UPGRADE_SYS=$BS_TRUE ;; - K ) _KEEP_TEMP_FILES=$BS_TRUE ;; - I ) _INSECURE_DL=$BS_TRUE ;; - A ) _SALT_MASTER_ADDRESS=$OPTARG ;; - i ) _SALT_MINION_ID=$OPTARG ;; - L ) _INSTALL_CLOUD=$BS_TRUE ;; - p ) _EXTRA_PACKAGES="$_EXTRA_PACKAGES $OPTARG" ;; - d ) _DISABLE_SALT_CHECKS=$BS_TRUE ;; - H ) _HTTP_PROXY="$OPTARG" ;; - b ) _NO_DEPS=$BS_TRUE ;; - f ) _FORCE_SHALLOW_CLONE=$BS_TRUE ;; - l ) _DISABLE_SSL=$BS_TRUE ;; - V ) _VIRTUALENV_DIR="$OPTARG" ;; - a ) _PIP_ALL=$BS_TRUE ;; - r ) _DISABLE_REPOS=$BS_TRUE ;; - R ) _CUSTOM_REPO_URL=$OPTARG ;; - J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;; - j ) _CUSTOM_MINION_CONFIG=$OPTARG ;; - q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;; - x ) _PY_EXE="$OPTARG" ;; - y ) _INSTALL_PY="$BS_TRUE" ;; - - \?) echo - echoerror "Option does not exist : $OPTARG" - __usage - exit 1 - ;; - - esac # --- end of case --- -done -shift $((OPTIND-1)) - - -# Define our logging file and pipe paths -LOGFILE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.log/g )" -LOGPIPE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )" -# Ensure no residual pipe exists -rm "$LOGPIPE" 2>/dev/null - -# Create our logging pipe -# On FreeBSD we have to use mkfifo instead of mknod -if ! (mknod "$LOGPIPE" p >/dev/null 2>&1 || mkfifo "$LOGPIPE" >/dev/null 2>&1); then - echoerror "Failed to create the named pipe required to log" - exit 1 -fi - -# What ever is written to the logpipe gets written to the logfile -tee < "$LOGPIPE" "$LOGFILE" & - -# Close STDOUT, reopen it directing it to the logpipe -exec 1>&- -exec 1>"$LOGPIPE" -# Close STDERR, reopen it directing it to the logpipe -exec 2>&- -exec 2>"$LOGPIPE" - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __exit_cleanup -# DESCRIPTION: Cleanup any leftovers after script has ended -# -# -# http://www.unix.com/man-page/POSIX/1posix/trap/ -# -# Signal Number Signal Name -# 1 SIGHUP -# 2 SIGINT -# 3 SIGQUIT -# 6 SIGABRT -# 9 SIGKILL -# 14 SIGALRM -# 15 SIGTERM -#---------------------------------------------------------------------------------------------------------------------- -APT_ERR=$(mktemp /tmp/apt_error.XXXXXX) -__exit_cleanup() { - EXIT_CODE=$? - - if [ "$ITYPE" = "git" ] && [ -d "${_SALT_GIT_CHECKOUT_DIR}" ]; then - if [ $_KEEP_TEMP_FILES -eq $BS_FALSE ]; then - # Clean up the checked out repository - echodebug "Cleaning up the Salt Temporary Git Repository" - # shellcheck disable=SC2164 - cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" - rm -rf "${_SALT_GIT_CHECKOUT_DIR}" - #rm -rf "${_SALT_GIT_CHECKOUT_DIR}/deps" - else - echowarn "Not cleaning up the Salt Temporary git repository on request" - echowarn "Note that if you intend to re-run this script using the git approach, you might encounter some issues" - fi - fi - - # Remove the logging pipe when the script exits - if [ -p "$LOGPIPE" ]; then - echodebug "Removing the logging pipe $LOGPIPE" - rm -f "$LOGPIPE" - fi - - # Remove the temporary apt error file when the script exits - if [ -f "$APT_ERR" ]; then - echodebug "Removing the temporary apt error file $APT_ERR" - rm -f "$APT_ERR" - fi - - # Kill tee when exiting, CentOS, at least requires this - # shellcheck disable=SC2009 - TEE_PID=$(ps ax | grep tee | grep "$LOGFILE" | awk '{print $1}') - - [ "$TEE_PID" = "" ] && exit $EXIT_CODE - - echodebug "Killing logging pipe tee's with pid(s): $TEE_PID" - - # We need to trap errors since killing tee will cause a 127 errno - # We also do this as late as possible so we don't "mis-catch" other errors - __trap_errors() { - echoinfo "Errors Trapped: $EXIT_CODE" - # Exit with the "original" exit code, not the trapped code - exit $EXIT_CODE - } - trap "__trap_errors" INT ABRT QUIT TERM - - # Now we're "good" to kill tee - kill -s TERM "$TEE_PID" - - # In case the 127 errno is not triggered, exit with the "original" exit code - exit $EXIT_CODE -} -trap "__exit_cleanup" EXIT INT - - -# Let's discover how we're being called -# shellcheck disable=SC2009 -CALLER=$(ps -a -o pid,args | grep $$ | grep -v grep | tr -s ' ' | cut -d ' ' -f 3) - -if [ "${CALLER}x" = "${0}x" ]; then - CALLER="shell pipe" -fi - -echoinfo "Running version: ${__ScriptVersion}" -echoinfo "Executed by: ${CALLER}" -echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'" -#echowarn "Running the unstable version of ${__ScriptName}" - -# Define installation type -if [ "$#" -gt 0 ];then - __check_unparsed_options "$*" - ITYPE=$1 - shift -fi - -# Check installation type -if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git)')" = "" ]; then - echoerror "Installation type \"$ITYPE\" is not known..." - exit 1 -fi - -# If doing a git install, check what branch/tag/sha will be checked out -if [ "$ITYPE" = "git" ]; then - if [ "$#" -eq 0 ];then - GIT_REV="master" - else - GIT_REV="$1" - shift - fi - - # Disable shell warning about unbound variable during git install - STABLE_REV="latest" - -# If doing stable install, check if version specified -elif [ "$ITYPE" = "stable" ]; then - if [ "$#" -eq 0 ];then - STABLE_REV="latest" - else - if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3|2019\.2|3000|3001|3002|3003|3004)$')" != "" ]; then - STABLE_REV="$1" - shift - elif [ "$(echo "$1" | grep -E '^(2[0-9]*\.[0-9]*\.[0-9]*|[3-9][0-9]{3}(\.[0-9]*)?)$')" != "" ]; then - # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix - STABLE_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') - if [ "$(uname)" != "Darwin" ]; then - STABLE_REV="archive/$STABLE_REV" - fi - shift - else - echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, 2019.2, 3000, 3001, 3002, 3003, 3004, latest, \$MAJOR.\$MINOR.\$PATCH until 2019.2, \$MAJOR or \$MAJOR.\$PATCH starting from 3000)" - exit 1 - fi - fi -fi - -# Check for any unparsed arguments. Should be an error. -if [ "$#" -gt 0 ]; then - __usage - echo - echoerror "Too many arguments." - exit 1 -fi - -# whoami alternative for SunOS -if [ -f /usr/xpg4/bin/id ]; then - whoami='/usr/xpg4/bin/id -un' -else - whoami='whoami' -fi - -# Root permissions are required to run this script -if [ "$($whoami)" != "root" ]; then - echoerror "Salt requires root privileges to install. Please re-run this script as root." - exit 1 -fi - -# Check that we're actually installing one of minion/master/syndic -if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echowarn "Nothing to install or configure" - exit 1 -fi - -# Check that we're installing a minion if we're being passed a master address -if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MASTER_ADDRESS" != "null" ]; then - echoerror "Don't pass a master address (-A) if no minion is going to be bootstrapped." - exit 1 -fi - -# Check that we're installing a minion if we're being passed a minion id -if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_SALT_MINION_ID" != "null" ]; then - echoerror "Don't pass a minion id (-i) if no minion is going to be bootstrapped." - exit 1 -fi - -# Check that we're installing or configuring a master if we're being passed a master config json dict -if [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then - if [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoerror "Don't pass a master config JSON dict (-J) if no master is going to be bootstrapped or configured." - exit 1 - fi -fi - -# Check that we're installing or configuring a minion if we're being passed a minion config json dict -if [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then - if [ "$_INSTALL_MINION" -eq $BS_FALSE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoerror "Don't pass a minion config JSON dict (-j) if no minion is going to be bootstrapped or configured." - exit 1 - fi -fi - -# Check if we're installing via a different Python executable and set major version variables -if [ -n "$_PY_EXE" ]; then - if [ "$(uname)" = "Darwin" ]; then - _PY_PKG_VER=$(echo "$_PY_EXE" | sed "s/\\.//g") - else - _PY_PKG_VER=$(echo "$_PY_EXE" | sed -E "s/\\.//g") - fi - - _PY_MAJOR_VERSION=$(echo "$_PY_PKG_VER" | cut -c 7) - if [ "$_PY_MAJOR_VERSION" != 3 ] && [ "$_PY_MAJOR_VERSION" != 2 ]; then - echoerror "Detected -x option, but Python major version is not 2 or 3." - echoerror "The -x option must be passed as python2, python27, or python2.7 (or use the Python '3' versions of examples)." - exit 1 - fi - - if [ "$_PY_EXE" != "python3" ]; then - echoinfo "Detected -x option. Using $_PY_EXE to install Salt." - fi -else - _PY_PKG_VER="" - _PY_MAJOR_VERSION="" -fi - -# If the configuration directory or archive does not exist, error out -if [ "$_TEMP_CONFIG_DIR" != "null" ]; then - _TEMP_CONFIG_DIR="$(__check_config_dir "$_TEMP_CONFIG_DIR")" - [ "$_TEMP_CONFIG_DIR" = "null" ] && exit 1 -fi - -# If the pre-seed keys directory does not exist, error out -if [ "$_TEMP_KEYS_DIR" != "null" ] && [ ! -d "$_TEMP_KEYS_DIR" ]; then - echoerror "The pre-seed keys directory ${_TEMP_KEYS_DIR} does not exist." - exit 1 -fi - -# -a and -V only work from git -if [ "$ITYPE" != "git" ]; then - if [ $_PIP_ALL -eq $BS_TRUE ]; then - echoerror "Pip installing all python packages with -a is only possible when installing Salt via git" - exit 1 - fi - if [ "$_VIRTUALENV_DIR" != "null" ]; then - echoerror "Virtualenv installs via -V is only possible when installing Salt via git" - exit 1 - fi -fi - -# Set the _REPO_URL value based on if -R was passed or not. Defaults to repo.saltproject.io. -if [ "$_CUSTOM_REPO_URL" != "null" ]; then - _REPO_URL="$_CUSTOM_REPO_URL" - - # Check for -r since -R is being passed. Set -r with a warning. - if [ "$_DISABLE_REPOS" -eq $BS_FALSE ]; then - echowarn "Detected -R option. No other repositories will be configured when -R is used. Setting -r option to True." - _DISABLE_REPOS=$BS_TRUE - fi -fi - -# Check the _DISABLE_SSL value and set HTTP or HTTPS. -if [ "$_DISABLE_SSL" -eq $BS_TRUE ]; then - HTTP_VAL="http" -else - HTTP_VAL="https" -fi - -# Check the _QUIET_GIT_INSTALLATION value and set SETUP_PY_INSTALL_ARGS. -if [ "$_QUIET_GIT_INSTALLATION" -eq $BS_TRUE ]; then - SETUP_PY_INSTALL_ARGS="-q" -else - SETUP_PY_INSTALL_ARGS="" -fi - -# Handle the insecure flags -if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then - _CURL_ARGS="${_CURL_ARGS} --insecure" - _FETCH_ARGS="${_FETCH_ARGS} --no-verify-peer" - _GPG_ARGS="${_GPG_ARGS} --keyserver-options no-check-cert" - _WGET_ARGS="${_WGET_ARGS} --no-check-certificate" -else - _GPG_ARGS="${_GPG_ARGS} --keyserver-options ca-cert-file=/etc/ssl/certs/ca-certificates.crt" -fi - -# Export the http_proxy configuration to our current environment -if [ "${_HTTP_PROXY}" != "" ]; then - export http_proxy="${_HTTP_PROXY}" - export https_proxy="${_HTTP_PROXY}" - # Using "deprecated" option here, but that appears the only way to make it work. - # See https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=818802 - # and https://bugs.launchpad.net/ubuntu/+source/gnupg2/+bug/1625848 - _GPG_ARGS="${_GPG_ARGS},http-proxy=${_HTTP_PROXY}" -fi - -# Work around for 'Docker + salt-bootstrap failure' https://github.com/saltstack/salt-bootstrap/issues/394 -if [ "${_DISABLE_SALT_CHECKS}" -eq $BS_FALSE ] && [ -f /tmp/disable_salt_checks ]; then - # shellcheck disable=SC2016 - echowarn 'Found file: /tmp/disable_salt_checks, setting _DISABLE_SALT_CHECKS=$BS_TRUE' - _DISABLE_SALT_CHECKS=$BS_TRUE -fi - -# Because -a can only be installed into virtualenv -if [ "${_PIP_ALL}" -eq $BS_TRUE ] && [ "${_VIRTUALENV_DIR}" = "null" ]; then - usage - # Could possibly set up a default virtualenv location when -a flag is passed - echoerror "Using -a requires -V because pip pkgs should be siloed from python system pkgs" - exit 1 -fi - -# Make sure virtualenv directory does not already exist -if [ -d "${_VIRTUALENV_DIR}" ]; then - echoerror "The directory ${_VIRTUALENV_DIR} for virtualenv already exists" - exit 1 -fi - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __fetch_url -# DESCRIPTION: Retrieves a URL and writes it to a given path -#---------------------------------------------------------------------------------------------------------------------- -__fetch_url() { - # shellcheck disable=SC2086 - curl $_CURL_ARGS -L -s -f -o "$1" "$2" >/dev/null 2>&1 || - wget $_WGET_ARGS -q -O "$1" "$2" >/dev/null 2>&1 || - fetch $_FETCH_ARGS -q -o "$1" "$2" >/dev/null 2>&1 || # FreeBSD - fetch -q -o "$1" "$2" >/dev/null 2>&1 || # Pre FreeBSD 10 - ftp -o "$1" "$2" >/dev/null 2>&1 || # OpenBSD - (echoerror "$2 failed to download to $1"; exit 1) -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __fetch_verify -# DESCRIPTION: Retrieves a URL, verifies its content and writes it to standard output -#---------------------------------------------------------------------------------------------------------------------- -__fetch_verify() { - fetch_verify_url="$1" - fetch_verify_sum="$2" - fetch_verify_size="$3" - - fetch_verify_tmpf=$(mktemp) && \ - __fetch_url "$fetch_verify_tmpf" "$fetch_verify_url" && \ - test "$(stat --format=%s "$fetch_verify_tmpf")" -eq "$fetch_verify_size" && \ - test "$(md5sum "$fetch_verify_tmpf" | awk '{ print $1 }')" = "$fetch_verify_sum" && \ - cat "$fetch_verify_tmpf" && \ - if rm -f "$fetch_verify_tmpf"; then - return 0 - fi - echo "Failed verification of $fetch_verify_url" - return 1 -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __gather_hardware_info -# DESCRIPTION: Discover hardware information -#---------------------------------------------------------------------------------------------------------------------- -__gather_hardware_info() { - if [ -f /proc/cpuinfo ]; then - CPU_VENDOR_ID=$(awk '/vendor_id|Processor/ {sub(/-.*$/,"",$3); print $3; exit}' /proc/cpuinfo ) - elif [ -f /usr/bin/kstat ]; then - # SmartOS. - # Solaris!? - # This has only been tested for a GenuineIntel CPU - CPU_VENDOR_ID=$(/usr/bin/kstat -p cpu_info:0:cpu_info0:vendor_id | awk '{print $2}') - else - CPU_VENDOR_ID=$( sysctl -n hw.model ) - fi - # shellcheck disable=SC2034 - CPU_VENDOR_ID_L=$( echo "$CPU_VENDOR_ID" | tr '[:upper:]' '[:lower:]' ) - CPU_ARCH=$(uname -m 2>/dev/null || uname -p 2>/dev/null || echo "unknown") - CPU_ARCH_L=$( echo "$CPU_ARCH" | tr '[:upper:]' '[:lower:]' ) -} -__gather_hardware_info - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __gather_os_info -# DESCRIPTION: Discover operating system information -#---------------------------------------------------------------------------------------------------------------------- -__gather_os_info() { - OS_NAME=$(uname -s 2>/dev/null) - OS_NAME_L=$( echo "$OS_NAME" | tr '[:upper:]' '[:lower:]' ) - OS_VERSION=$(uname -r) - # shellcheck disable=SC2034 - OS_VERSION_L=$( echo "$OS_VERSION" | tr '[:upper:]' '[:lower:]' ) -} -__gather_os_info - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __parse_version_string -# DESCRIPTION: Parse version strings ignoring the revision. -# MAJOR.MINOR.REVISION becomes MAJOR.MINOR -#---------------------------------------------------------------------------------------------------------------------- -__parse_version_string() { - VERSION_STRING="$1" - PARSED_VERSION=$( - echo "$VERSION_STRING" | - sed -e 's/^/#/' \ - -e 's/^#[^0-9]*\([0-9][0-9]*\.[0-9][0-9]*\)\(\.[0-9][0-9]*\).*$/\1/' \ - -e 's/^#[^0-9]*\([0-9][0-9]*\.[0-9][0-9]*\).*$/\1/' \ - -e 's/^#[^0-9]*\([0-9][0-9]*\).*$/\1/' \ - -e 's/^#.*$//' - ) - echo "$PARSED_VERSION" -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __derive_debian_numeric_version -# DESCRIPTION: Derive the numeric version from a Debian version string. -#---------------------------------------------------------------------------------------------------------------------- -__derive_debian_numeric_version() { - NUMERIC_VERSION="" - INPUT_VERSION="$1" - if echo "$INPUT_VERSION" | grep -q '^[0-9]'; then - NUMERIC_VERSION="$INPUT_VERSION" - elif [ -z "$INPUT_VERSION" ] && [ -f "/etc/debian_version" ]; then - INPUT_VERSION="$(cat /etc/debian_version)" - fi - if [ -z "$NUMERIC_VERSION" ]; then - if [ "$INPUT_VERSION" = "wheezy/sid" ]; then - # I've found an EC2 wheezy image which did not tell its version - NUMERIC_VERSION=$(__parse_version_string "7.0") - elif [ "$INPUT_VERSION" = "jessie/sid" ]; then - NUMERIC_VERSION=$(__parse_version_string "8.0") - elif [ "$INPUT_VERSION" = "stretch/sid" ]; then - NUMERIC_VERSION=$(__parse_version_string "9.0") - elif [ "$INPUT_VERSION" = "buster/sid" ]; then - NUMERIC_VERSION=$(__parse_version_string "10.0") - elif [ "$INPUT_VERSION" = "bullseye/sid" ]; then - NUMERIC_VERSION=$(__parse_version_string "11.0") - else - echowarn "Unable to parse the Debian Version (codename: '$INPUT_VERSION')" - fi - fi - echo "$NUMERIC_VERSION" -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __unquote_string -# DESCRIPTION: Strip single or double quotes from the provided string. -#---------------------------------------------------------------------------------------------------------------------- -__unquote_string() { - # shellcheck disable=SC1117 - echo "$*" | sed -e "s/^\([\"\']\)\(.*\)\1\$/\2/g" -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __camelcase_split -# DESCRIPTION: Convert 'CamelCased' strings to 'Camel Cased' -#---------------------------------------------------------------------------------------------------------------------- -__camelcase_split() { - echo "$*" | sed -e 's/\([^[:upper:][:punct:]]\)\([[:upper:]]\)/\1 \2/g' -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __strip_duplicates -# DESCRIPTION: Strip duplicate strings -#---------------------------------------------------------------------------------------------------------------------- -__strip_duplicates() { - echo "$*" | tr -s '[:space:]' '\n' | awk '!x[$0]++' -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __sort_release_files -# DESCRIPTION: Custom sort function. Alphabetical or numerical sort is not -# enough. -#---------------------------------------------------------------------------------------------------------------------- -__sort_release_files() { - KNOWN_RELEASE_FILES=$(echo "(arch|alpine|centos|debian|ubuntu|fedora|redhat|suse|\ - mandrake|mandriva|gentoo|slackware|turbolinux|unitedlinux|void|lsb|system|\ - oracle|os)(-|_)(release|version)" | sed -E 's:[[:space:]]::g') - primary_release_files="" - secondary_release_files="" - # Sort know VS un-known files first - for release_file in $(echo "${@}" | sed -E 's:[[:space:]]:\n:g' | sort -f | uniq); do - match=$(echo "$release_file" | grep -E -i "${KNOWN_RELEASE_FILES}") - if [ "${match}" != "" ]; then - primary_release_files="${primary_release_files} ${release_file}" - else - secondary_release_files="${secondary_release_files} ${release_file}" - fi - done - - # Now let's sort by know files importance, max important goes last in the max_prio list - max_prio="redhat-release centos-release oracle-release fedora-release" - for entry in $max_prio; do - if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then - primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\2 \\1 \\3:g") - fi - done - # Now, least important goes last in the min_prio list - min_prio="lsb-release" - for entry in $min_prio; do - if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then - primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\1 \\3 \\2:g") - fi - done - - # Echo the results collapsing multiple white-space into a single white-space - echo "${primary_release_files} ${secondary_release_files}" | sed -E 's:[[:space:]]+:\n:g' -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __gather_linux_system_info -# DESCRIPTION: Discover Linux system information -#---------------------------------------------------------------------------------------------------------------------- -__gather_linux_system_info() { - DISTRO_NAME="" - DISTRO_VERSION="" - - # Let's test if the lsb_release binary is available - rv=$(lsb_release >/dev/null 2>&1) - - # shellcheck disable=SC2181 - if [ $? -eq 0 ]; then - DISTRO_NAME=$(lsb_release -si) - if [ "${DISTRO_NAME}" = "Scientific" ]; then - DISTRO_NAME="Scientific Linux" - elif [ "$(echo "$DISTRO_NAME" | grep ^CloudLinux)" != "" ]; then - DISTRO_NAME="Cloud Linux" - elif [ "$(echo "$DISTRO_NAME" | grep ^RedHat)" != "" ]; then - # Let's convert 'CamelCased' to 'Camel Cased' - n=$(__camelcase_split "$DISTRO_NAME") - # Skip setting DISTRO_NAME this time, splitting CamelCase has failed. - # See https://github.com/saltstack/salt-bootstrap/issues/918 - [ "$n" = "$DISTRO_NAME" ] && DISTRO_NAME="" || DISTRO_NAME="$n" - elif [ "$( echo "${DISTRO_NAME}" | grep openSUSE )" != "" ]; then - # lsb_release -si returns "openSUSE Tumbleweed" on openSUSE tumbleweed - # lsb_release -si returns "openSUSE project" on openSUSE 12.3 - # lsb_release -si returns "openSUSE" on openSUSE 15.n - DISTRO_NAME="opensuse" - elif [ "${DISTRO_NAME}" = "SUSE LINUX" ]; then - if [ "$(lsb_release -sd | grep -i opensuse)" != "" ]; then - # openSUSE 12.2 reports SUSE LINUX on lsb_release -si - DISTRO_NAME="opensuse" - else - # lsb_release -si returns "SUSE LINUX" on SLES 11 SP3 - DISTRO_NAME="suse" - fi - elif [ "${DISTRO_NAME}" = "EnterpriseEnterpriseServer" ]; then - # This the Oracle Linux Enterprise ID before ORACLE LINUX 5 UPDATE 3 - DISTRO_NAME="Oracle Linux" - elif [ "${DISTRO_NAME}" = "OracleServer" ]; then - # This the Oracle Linux Server 6.5 - DISTRO_NAME="Oracle Linux" - elif [ "${DISTRO_NAME}" = "AmazonAMI" ] || [ "${DISTRO_NAME}" = "Amazon" ]; then - DISTRO_NAME="Amazon Linux AMI" - elif [ "${DISTRO_NAME}" = "ManjaroLinux" ]; then - DISTRO_NAME="Arch Linux" - elif [ "${DISTRO_NAME}" = "Arch" ]; then - DISTRO_NAME="Arch Linux" - return - fi - rv=$(lsb_release -sr) - [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") - elif [ -f /etc/lsb-release ]; then - # We don't have the lsb_release binary, though, we do have the file it parses - DISTRO_NAME=$(grep DISTRIB_ID /etc/lsb-release | sed -e 's/.*=//') - rv=$(grep DISTRIB_RELEASE /etc/lsb-release | sed -e 's/.*=//') - [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") - fi - - if [ "$DISTRO_NAME" != "" ] && [ "$DISTRO_VERSION" != "" ]; then - # We already have the distribution name and version - return - fi - # shellcheck disable=SC2035,SC2086 - for rsource in $(__sort_release_files "$( - cd /etc && /bin/ls *[_-]release *[_-]version 2>/dev/null | env -i sort | \ - sed -e '/^redhat-release$/d' -e '/^lsb-release$/d'; \ - echo redhat-release lsb-release - )"); do - - [ ! -f "/etc/${rsource}" ] && continue # Does not exist - - n=$(echo "${rsource}" | sed -e 's/[_-]release$//' -e 's/[_-]version$//') - shortname=$(echo "${n}" | tr '[:upper:]' '[:lower:]') - if [ "$shortname" = "debian" ]; then - rv=$(__derive_debian_numeric_version "$(cat /etc/${rsource})") - else - rv=$( (grep VERSION "/etc/${rsource}"; cat "/etc/${rsource}") | grep '[0-9]' | sed -e 'q' ) - fi - [ "${rv}" = "" ] && [ "$shortname" != "arch" ] && continue # There's no version information. Continue to next rsource - v=$(__parse_version_string "$rv") - case $shortname in - redhat ) - if [ "$(grep -E 'CentOS' /etc/${rsource})" != "" ]; then - n="CentOS" - elif [ "$(grep -E 'Scientific' /etc/${rsource})" != "" ]; then - n="Scientific Linux" - elif [ "$(grep -E 'Red Hat Enterprise Linux' /etc/${rsource})" != "" ]; then - n="ed at nterprise inux" - else - n="ed at inux" - fi - ;; - arch ) n="Arch Linux" ;; - alpine ) n="Alpine Linux" ;; - centos ) n="CentOS" ;; - debian ) n="Debian" ;; - ubuntu ) n="Ubuntu" ;; - fedora ) n="Fedora" ;; - suse|opensuse ) n="SUSE" ;; - mandrake*|mandriva ) n="Mandriva" ;; - gentoo ) n="Gentoo" ;; - slackware ) n="Slackware" ;; - turbolinux ) n="TurboLinux" ;; - unitedlinux ) n="UnitedLinux" ;; - void ) n="VoidLinux" ;; - oracle ) n="Oracle Linux" ;; - system ) - while read -r line; do - [ "${n}x" != "systemx" ] && break - case "$line" in - *Amazon*Linux*AMI*) - n="Amazon Linux AMI" - break - esac - done < "/etc/${rsource}" - ;; - os ) - nn="$(__unquote_string "$(grep '^ID=' /etc/os-release | sed -e 's/^ID=\(.*\)$/\1/g')")" - rv="$(__unquote_string "$(grep '^VERSION_ID=' /etc/os-release | sed -e 's/^VERSION_ID=\(.*\)$/\1/g')")" - [ "${rv}" != "" ] && v=$(__parse_version_string "$rv") || v="" - case $(echo "${nn}" | tr '[:upper:]' '[:lower:]') in - alpine ) - n="Alpine Linux" - v="${rv}" - ;; - amzn ) - # Amazon AMI's after 2014.09 match here - n="Amazon Linux AMI" - ;; - arch ) - n="Arch Linux" - v="" # Arch Linux does not provide a version. - ;; - cloudlinux ) - n="Cloud Linux" - ;; - debian ) - n="Debian" - v=$(__derive_debian_numeric_version "$v") - ;; - sles ) - n="SUSE" - v="${rv}" - ;; - opensuse-* ) - n="opensuse" - v="${rv}" - ;; - * ) - n=${nn} - ;; - esac - ;; - * ) n="${n}" ; - esac - DISTRO_NAME=$n - DISTRO_VERSION=$v - break - done -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __install_python() -# DESCRIPTION: Install a different version of python on a host. Currently this has only been tested on CentOS 6 and -# is considered experimental. -#---------------------------------------------------------------------------------------------------------------------- -__install_python() { - if [ "$_PY_EXE" = "" ]; then - echoerror "Must specify -x with -y to install a specific python version" - exit 1 - fi - - __PACKAGES="$_PY_PKG_VER" - - if [ ${_DISABLE_REPOS} -eq ${BS_FALSE} ]; then - echoinfo "Attempting to install a repo to help provide a separate python package" - echoinfo "$DISTRO_NAME_L" - case "$DISTRO_NAME_L" in - "red_hat"|"centos") - __PYTHON_REPO_URL="https://repo.ius.io/ius-release-el${DISTRO_MAJOR_VERSION}.rpm" - ;; - *) - echoerror "Installing a repo to provide a python package is only supported on Redhat/CentOS. - If a repo is already available, please try running script with -r." - exit 1 - ;; - esac - - echoinfo "Installing IUS repo" - __yum_install_noinput "${__PYTHON_REPO_URL}" || return 1 - fi - - echoinfo "Installing ${__PACKAGES}" - __yum_install_noinput "${__PACKAGES}" || return 1 -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __gather_sunos_system_info -# DESCRIPTION: Discover SunOS system info -#---------------------------------------------------------------------------------------------------------------------- -__gather_sunos_system_info() { - if [ -f /sbin/uname ]; then - DISTRO_VERSION=$(/sbin/uname -X | awk '/[kK][eE][rR][nN][eE][lL][iI][dD]/ { print $3 }') - fi - - DISTRO_NAME="" - if [ -f /etc/release ]; then - while read -r line; do - [ "${DISTRO_NAME}" != "" ] && break - case "$line" in - *OpenIndiana*oi_[0-9]*) - DISTRO_NAME="OpenIndiana" - DISTRO_VERSION=$(echo "$line" | sed -nE "s/OpenIndiana(.*)oi_([[:digit:]]+)(.*)/\\2/p") - break - ;; - *OpenSolaris*snv_[0-9]*) - DISTRO_NAME="OpenSolaris" - DISTRO_VERSION=$(echo "$line" | sed -nE "s/OpenSolaris(.*)snv_([[:digit:]]+)(.*)/\\2/p") - break - ;; - *Oracle*Solaris*[0-9]*) - DISTRO_NAME="Oracle Solaris" - DISTRO_VERSION=$(echo "$line" | sed -nE "s/(Oracle Solaris) ([[:digit:]]+)(.*)/\\2/p") - break - ;; - *Solaris*) - DISTRO_NAME="Solaris" - # Let's make sure we not actually on a Joyent's SmartOS VM since some releases - # don't have SmartOS in `/etc/release`, only `Solaris` - if uname -v | grep joyent >/dev/null 2>&1; then - DISTRO_NAME="SmartOS" - fi - break - ;; - *NexentaCore*) - DISTRO_NAME="Nexenta Core" - break - ;; - *SmartOS*) - DISTRO_NAME="SmartOS" - break - ;; - *OmniOS*) - DISTRO_NAME="OmniOS" - DISTRO_VERSION=$(echo "$line" | awk '{print $3}') - _SIMPLIFY_VERSION=$BS_FALSE - break - ;; - esac - done < /etc/release - fi - - if [ "${DISTRO_NAME}" = "" ]; then - DISTRO_NAME="Solaris" - DISTRO_VERSION=$( - echo "${OS_VERSION}" | - sed -e 's;^4\.;1.;' \ - -e 's;^5\.\([0-6]\)[^0-9]*$;2.\1;' \ - -e 's;^5\.\([0-9][0-9]*\).*;\1;' - ) - fi - - if [ "${DISTRO_NAME}" = "SmartOS" ]; then - VIRTUAL_TYPE="smartmachine" - if [ "$(zonename)" = "global" ]; then - VIRTUAL_TYPE="global" - fi - fi -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __gather_bsd_system_info -# DESCRIPTION: Discover OpenBSD, NetBSD and FreeBSD systems information -#---------------------------------------------------------------------------------------------------------------------- -__gather_bsd_system_info() { - DISTRO_NAME=${OS_NAME} - DISTRO_VERSION=$(echo "${OS_VERSION}" | sed -e 's;[()];;' -e 's/-.*$//') -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __gather_osx_system_info -# DESCRIPTION: Discover MacOS X -#---------------------------------------------------------------------------------------------------------------------- -__gather_osx_system_info() { - DISTRO_NAME="MacOSX" - DISTRO_VERSION=$(sw_vers -productVersion) -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __gather_system_info -# DESCRIPTION: Discover which system and distribution we are running. -#---------------------------------------------------------------------------------------------------------------------- -__gather_system_info() { - case ${OS_NAME_L} in - linux ) - __gather_linux_system_info - ;; - sunos ) - __gather_sunos_system_info - ;; - openbsd|freebsd|netbsd ) - __gather_bsd_system_info - ;; - darwin ) - __gather_osx_system_info - ;; - * ) - echoerror "${OS_NAME} not supported."; - exit 1 - ;; - esac - -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __ubuntu_derivatives_translation -# DESCRIPTION: Map Ubuntu derivatives to their Ubuntu base versions. -# If distro has a known Ubuntu base version, use those install -# functions by pretending to be Ubuntu (i.e. change global vars) -#---------------------------------------------------------------------------------------------------------------------- -# shellcheck disable=SC2034 -__ubuntu_derivatives_translation() { - UBUNTU_DERIVATIVES="(trisquel|linuxmint|linaro|elementary_os|neon)" - # Mappings - trisquel_6_ubuntu_base="12.04" - linuxmint_13_ubuntu_base="12.04" - linuxmint_17_ubuntu_base="14.04" - linuxmint_18_ubuntu_base="16.04" - linuxmint_19_ubuntu_base="18.04" - linuxmint_20_ubuntu_base="20.04" - linaro_12_ubuntu_base="12.04" - elementary_os_02_ubuntu_base="12.04" - neon_16_ubuntu_base="16.04" - neon_18_ubuntu_base="18.04" - neon_20_ubuntu_base="20.04" - - # Translate Ubuntu derivatives to their base Ubuntu version - match=$(echo "$DISTRO_NAME_L" | grep -E ${UBUNTU_DERIVATIVES}) - - if [ "${match}" != "" ]; then - case $match in - "elementary_os") - _major=$(echo "$DISTRO_VERSION" | sed 's/\.//g') - ;; - "linuxmint") - export LSB_ETC_LSB_RELEASE=/etc/upstream-release/lsb-release - _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - ;; - *) - _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - ;; - esac - - _ubuntu_version=$(eval echo "\$${match}_${_major}_ubuntu_base") - - if [ "$_ubuntu_version" != "" ]; then - echodebug "Detected Ubuntu $_ubuntu_version derivative" - DISTRO_NAME_L="ubuntu" - DISTRO_VERSION="$_ubuntu_version" - fi - fi -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_dpkg_architecture -# DESCRIPTION: Determine the primary architecture for packages to install on Debian and derivatives -# and issue all necessary error messages. -#---------------------------------------------------------------------------------------------------------------------- -__check_dpkg_architecture() { - if __check_command_exists dpkg; then - DPKG_ARCHITECTURE="$(dpkg --print-architecture)" - else - echoerror "dpkg: command not found." - return 1 - fi - - __REPO_ARCH="$DPKG_ARCHITECTURE" - __REPO_ARCH_DEB='deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg]' - __return_code=0 - - case $DPKG_ARCHITECTURE in - "i386") - error_msg="$_REPO_URL likely doesn't have all required 32-bit packages for $DISTRO_NAME $DISTRO_MAJOR_VERSION." - # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location - __REPO_ARCH="amd64" - ;; - "amd64") - error_msg="" - ;; - "arm64") - if [ "$_CUSTOM_REPO_URL" != "null" ]; then - warn_msg="Support for arm64 is experimental, make sure the custom repository used has the expected structure and contents." - else - # Saltstack official repository does not yet have arm64 metadata, - # use amd64 repositories on arm64, since all pkgs are arch-independent - __REPO_ARCH="amd64" - __REPO_ARCH_DEB="deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg arch=$__REPO_ARCH]" - warn_msg="Support for arm64 packages is experimental and might rely on architecture-independent packages from the amd64 repository." - fi - error_msg="" - ;; - "armhf") - if [ "$DISTRO_NAME_L" = "ubuntu" ] || [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then - error_msg="Support for armhf packages at $_REPO_URL is limited to Debian/Raspbian 8 platforms." - __return_code=1 - else - error_msg="" - fi - ;; - *) - error_msg="$_REPO_URL doesn't have packages for your system architecture: $DPKG_ARCHITECTURE." - __return_code=1 - ;; - esac - - if [ "${warn_msg:-}" != "" ]; then - # AArch64: Do not fail at this point, but warn the user about experimental support - # See https://github.com/saltstack/salt-bootstrap/issues/1240 - echowarn "${warn_msg}" - fi - if [ "${error_msg}" != "" ]; then - echoerror "${error_msg}" - if [ "$ITYPE" != "git" ]; then - echoerror "You can try git installation mode, i.e.: sh ${__ScriptName} git v2017.7.2." - echoerror "It may be necessary to use git installation mode with pip and disable the SaltStack apt repository." - echoerror "For example:" - echoerror " sh ${__ScriptName} -r -P git v2017.7.2" - fi - fi - - if [ "${__return_code}" -eq 0 ]; then - return 0 - else - return 1 - fi -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __ubuntu_codename_translation -# DESCRIPTION: Map Ubuntu major versions to their corresponding codenames -#---------------------------------------------------------------------------------------------------------------------- -# shellcheck disable=SC2034 -__ubuntu_codename_translation() { - case $DISTRO_MINOR_VERSION in - "04") - _april="yes" - ;; - "10") - _april="" - ;; - *) - _april="yes" - ;; - esac - - case $DISTRO_MAJOR_VERSION in - "12") - DISTRO_CODENAME="precise" - ;; - "14") - DISTRO_CODENAME="trusty" - ;; - "16") - DISTRO_CODENAME="xenial" - ;; - "18") - DISTRO_CODENAME="bionic" - ;; - "20") - DISTRO_CODENAME="focal" - ;; - "21") - DISTRO_CODENAME="hirsute" - ;; - *) - DISTRO_CODENAME="trusty" - ;; - esac -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __debian_derivatives_translation -# DESCRIPTION: Map Debian derivatives to their Debian base versions. -# If distro has a known Debian base version, use those install -# functions by pretending to be Debian (i.e. change global vars) -#---------------------------------------------------------------------------------------------------------------------- -# shellcheck disable=SC2034 -__debian_derivatives_translation() { - # If the file does not exist, return - [ ! -f /etc/os-release ] && return - - DEBIAN_DERIVATIVES="(cumulus|devuan|kali|linuxmint|raspbian|bunsenlabs|turnkey)" - # Mappings - cumulus_2_debian_base="7.0" - cumulus_3_debian_base="8.0" - cumulus_4_debian_base="10.0" - devuan_1_debian_base="8.0" - devuan_2_debian_base="9.0" - kali_1_debian_base="7.0" - linuxmint_1_debian_base="8.0" - raspbian_8_debian_base="8.0" - raspbian_9_debian_base="9.0" - raspbian_10_debian_base="10.0" - bunsenlabs_9_debian_base="9.0" - turnkey_9_debian_base="9.0" - - # Translate Debian derivatives to their base Debian version - match=$(echo "$DISTRO_NAME_L" | grep -E ${DEBIAN_DERIVATIVES}) - - if [ "${match}" != "" ]; then - case $match in - cumulus*) - _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - _debian_derivative="cumulus" - ;; - devuan) - _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - _debian_derivative="devuan" - ;; - kali) - _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - _debian_derivative="kali" - ;; - linuxmint) - _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - _debian_derivative="linuxmint" - ;; - raspbian) - _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - _debian_derivative="raspbian" - ;; - bunsenlabs) - _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - _debian_derivative="bunsenlabs" - ;; - turnkey) - _major=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - _debian_derivative="turnkey" - ;; - esac - - _debian_version=$(eval echo "\$${_debian_derivative}_${_major}_debian_base" 2>/dev/null) - - if [ "$_debian_version" != "" ]; then - echodebug "Detected Debian $_debian_version derivative" - DISTRO_NAME_L="debian" - DISTRO_VERSION="$_debian_version" - DISTRO_MAJOR_VERSION="$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g')" - fi - fi -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __debian_codename_translation -# DESCRIPTION: Map Debian major versions to their corresponding code names -#---------------------------------------------------------------------------------------------------------------------- -# shellcheck disable=SC2034 -__debian_codename_translation() { - - case $DISTRO_MAJOR_VERSION in - "9") - DISTRO_CODENAME="stretch" - ;; - "10") - DISTRO_CODENAME="buster" - ;; - "11") - DISTRO_CODENAME="bullseye" - ;; - *) - DISTRO_CODENAME="stretch" - ;; - esac -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_end_of_life_versions -# DESCRIPTION: Check for end of life distribution versions -#---------------------------------------------------------------------------------------------------------------------- -__check_end_of_life_versions() { - case "${DISTRO_NAME_L}" in - debian) - # Debian versions below 9 are not supported - if [ "$DISTRO_MAJOR_VERSION" -lt 9 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://wiki.debian.org/DebianReleases" - exit 1 - fi - ;; - - ubuntu) - # Ubuntu versions not supported - # - # < 16.04 - # = 16.10 - # = 17.04, 17.10 - # = 18.10 - # = 19.04, 19.10 - if [ "$DISTRO_MAJOR_VERSION" -lt 16 ] || \ - [ "$DISTRO_MAJOR_VERSION" -eq 17 ] || \ - [ "$DISTRO_MAJOR_VERSION" -eq 19 ] || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 18 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://wiki.ubuntu.com/Releases" - exit 1 - fi - ;; - - opensuse) - # openSUSE versions not supported - # - # <= 13.X - # <= 42.2 - if [ "$DISTRO_MAJOR_VERSION" -lt 15 ] || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 42 ] && [ "$DISTRO_MINOR_VERSION" -le 2 ]; }; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " http://en.opensuse.org/Lifetime" - exit 1 - fi - ;; - - suse) - # SuSE versions not supported - # - # < 11 SP4 - # < 12 SP2 - # < 15 SP1 - SUSE_PATCHLEVEL=$(awk -F'=' '/VERSION_ID/ { print $2 }' /etc/os-release | grep -oP "\.\K\w+") - if [ "${SUSE_PATCHLEVEL}" = "" ]; then - SUSE_PATCHLEVEL="00" - fi - if [ "$DISTRO_MAJOR_VERSION" -lt 11 ] || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 11 ] && [ "$SUSE_PATCHLEVEL" -lt 04 ]; } || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 15 ] && [ "$SUSE_PATCHLEVEL" -lt 01 ]; } || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$SUSE_PATCHLEVEL" -lt 02 ]; }; then - echoerror "Versions lower than SuSE 11 SP4, 12 SP2 or 15 SP1 are not supported." - echoerror "Please consider upgrading to the next stable" - echoerror " https://www.suse.com/lifecycle/" - exit 1 - fi - ;; - - fedora) - # Fedora lower than 33 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 33 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://fedoraproject.org/wiki/Releases" - exit 1 - fi - ;; - - centos) - # CentOS versions lower than 7 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " http://wiki.centos.org/Download" - exit 1 - fi - ;; - - red_hat*linux) - # Red Hat (Enterprise) Linux versions lower than 7 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://access.redhat.com/support/policy/updates/errata/" - exit 1 - fi - ;; - - oracle*linux) - # Oracle Linux versions lower than 7 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " http://www.oracle.com/us/support/library/elsp-lifetime-069338.pdf" - exit 1 - fi - ;; - - scientific*linux) - # Scientific Linux versions lower than 7 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://www.scientificlinux.org/downloads/sl-versions/" - exit 1 - fi - ;; - - cloud*linux) - # Cloud Linux versions lower than 7 are no longer supported - if [ "$DISTRO_MAJOR_VERSION" -lt 7 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://docs.cloudlinux.com/index.html?cloudlinux_life-cycle.html" - exit 1 - fi - ;; - - amazon*linux*ami) - # Amazon Linux versions 2018.XX and lower no longer supported - # Except for Amazon Linux 2, which reset the major version counter - if [ "$DISTRO_MAJOR_VERSION" -le 2018 ] && [ "$DISTRO_MAJOR_VERSION" -gt 10 ]; then - echoerror "End of life distributions are not supported." - echoerror "Please consider upgrading to the next stable. See:" - echoerror " https://aws.amazon.com/amazon-linux-ami/" - exit 1 - fi - ;; - - freebsd) - # FreeBSD versions lower than 11 are EOL - if [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then - echoerror "Versions lower than FreeBSD 11 are EOL and no longer supported." - exit 1 - fi - ;; - - *) - ;; - esac -} - - -__gather_system_info - -echo -echoinfo "System Information:" -echoinfo " CPU: ${CPU_VENDOR_ID}" -echoinfo " CPU Arch: ${CPU_ARCH}" -echoinfo " OS Name: ${OS_NAME}" -echoinfo " OS Version: ${OS_VERSION}" -echoinfo " Distribution: ${DISTRO_NAME} ${DISTRO_VERSION}" -echo - -# Simplify distro name naming on functions -DISTRO_NAME_L=$(echo "$DISTRO_NAME" | tr '[:upper:]' '[:lower:]' | sed 's/[^a-zA-Z0-9_ ]//g' | sed -Ee 's/([[:space:]])+/_/g' | sed -Ee 's/tumbleweed//' ) - -# Simplify version naming on functions -if [ "$DISTRO_VERSION" = "" ] || [ ${_SIMPLIFY_VERSION} -eq $BS_FALSE ]; then - DISTRO_MAJOR_VERSION="" - DISTRO_MINOR_VERSION="" - PREFIXED_DISTRO_MAJOR_VERSION="" - PREFIXED_DISTRO_MINOR_VERSION="" -else - DISTRO_MAJOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).*/\1/g') - DISTRO_MINOR_VERSION=$(echo "$DISTRO_VERSION" | sed 's/^\([0-9]*\).\([0-9]*\).*/\2/g') - PREFIXED_DISTRO_MAJOR_VERSION="_${DISTRO_MAJOR_VERSION}" - if [ "${PREFIXED_DISTRO_MAJOR_VERSION}" = "_" ]; then - PREFIXED_DISTRO_MAJOR_VERSION="" - fi - PREFIXED_DISTRO_MINOR_VERSION="_${DISTRO_MINOR_VERSION}" - if [ "${PREFIXED_DISTRO_MINOR_VERSION}" = "_" ]; then - PREFIXED_DISTRO_MINOR_VERSION="" - fi -fi - -# For Ubuntu derivatives, pretend to be their Ubuntu base version -__ubuntu_derivatives_translation - -# For Debian derivates, pretend to be their Debian base version -__debian_derivatives_translation - -# Fail soon for end of life versions -__check_end_of_life_versions - -echodebug "Binaries will be searched using the following \$PATH: ${PATH}" - -# Let users know that we'll use a proxy -if [ "${_HTTP_PROXY}" != "" ]; then - echoinfo "Using http proxy $_HTTP_PROXY" -fi - -# Let users know what's going to be installed/configured -if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoinfo "Installing minion" - else - echoinfo "Configuring minion" - fi -fi - -if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then - if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoinfo "Installing master" - else - echoinfo "Configuring master" - fi -fi - -if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then - if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoinfo "Installing syndic" - else - echoinfo "Configuring syndic" - fi -fi - -if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoinfo "Installing salt-cloud and required python-libcloud package" -fi - -if [ $_START_DAEMONS -eq $BS_FALSE ]; then - echoinfo "Daemons will not be started" -fi - -if [ "${DISTRO_NAME_L}" = "ubuntu" ]; then - # For ubuntu versions, obtain the codename from the release version - __ubuntu_codename_translation -elif [ "${DISTRO_NAME_L}" = "debian" ]; then - # For debian versions, obtain the codename from the release version - __debian_codename_translation -fi - -if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(debian|ubuntu|centos|gentoo|red_hat|oracle|scientific|amazon|fedora|macosx)')" = "" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]; then - echoerror "${DISTRO_NAME} does not have major version pegged packages support" - exit 1 -fi - -# Only RedHat based distros have testing support -if [ "${ITYPE}" = "testing" ]; then - if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(centos|red_hat|amazon|oracle)')" = "" ]; then - echoerror "${DISTRO_NAME} does not have testing packages support" - exit 1 - fi - _EPEL_REPO="epel-testing" -fi - -# Only Ubuntu has support for installing to virtualenvs -if [ "${DISTRO_NAME_L}" != "ubuntu" ] && [ "$_VIRTUALENV_DIR" != "null" ]; then - echoerror "${DISTRO_NAME} does not have -V support" - exit 1 -fi - -# Only Ubuntu has support for pip installing all packages -if [ "${DISTRO_NAME_L}" != "ubuntu" ] && [ $_PIP_ALL -eq $BS_TRUE ]; then - echoerror "${DISTRO_NAME} does not have -a support" - exit 1 -fi - -if [ "$ITYPE" = "git" ]; then - - if [ "${GIT_REV}" = "master" ]; then - _POST_NEON_INSTALL=$BS_TRUE - __TAG_REGEX_MATCH="MATCH" - else - case ${OS_NAME_L} in - openbsd|freebsd|netbsd|darwin ) - __NEW_VS_TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?3[0-9]{3}(\.[0-9]{1,2})?).*$/MATCH/') - if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then - _POST_NEON_INSTALL=$BS_TRUE - __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" - if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then - # We do this to properly clone tags - GIT_REV="v${GIT_REV}" - fi - echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" - else - __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?[0-9]{1,4}\.[0-9]{1,2})(\.[0-9]{1,2})?.*$/MATCH/') - echodebug "Pre Neon Tag Regex Match On: ${GIT_REV}" - fi - ;; - * ) - __NEW_VS_TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?3[[:digit:]]\{3\}\(\.[[:digit:]]\{1,2\}\)\?\).*$/MATCH/') - if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then - _POST_NEON_INSTALL=$BS_TRUE - __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" - if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then - # We do this to properly clone tags - GIT_REV="v${GIT_REV}" - fi - echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" - else - __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?[[:digit:]]\{1,4\}\.[[:digit:]]\{1,2\}\)\(\.[[:digit:]]\{1,2\}\)\?.*$/MATCH/') - echodebug "Pre Neon Tag Regex Match On: ${GIT_REV}" - fi - ;; - esac - fi - - if [ "$_POST_NEON_INSTALL" -eq $BS_TRUE ]; then - echo - echowarn "Post Neon git based installations will always install salt" - echowarn "and its dependencies using pip which will be upgraded to" - echowarn "at least v${_MINIMUM_PIP_VERSION}, and, in case the setuptools version is also" - echowarn "too old, it will be upgraded to at least v${_MINIMUM_SETUPTOOLS_VERSION}" - echo - echowarn "You have 10 seconds to cancel and stop the bootstrap process..." - echo - sleep 10 - _PIP_ALLOWED=$BS_TRUE - fi -fi - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __function_defined -# DESCRIPTION: Checks if a function is defined within this scripts scope -# PARAMETERS: function name -# RETURNS: 0 or 1 as in defined or not defined -#---------------------------------------------------------------------------------------------------------------------- -__function_defined() { - FUNC_NAME=$1 - if [ "$(command -v "$FUNC_NAME")" != "" ]; then - echoinfo "Found function $FUNC_NAME" - return 0 - fi - echodebug "$FUNC_NAME not found...." - return 1 -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __wait_for_apt -# DESCRIPTION: Check if any apt, apt-get, aptitude, or dpkg processes are running before -# calling these again. This is useful when these process calls are part of -# a boot process, such as on AWS AMIs. This func will wait until the boot -# process is finished so the script doesn't exit on a locked proc. -#---------------------------------------------------------------------------------------------------------------------- -__wait_for_apt(){ - # Timeout set at 15 minutes - WAIT_TIMEOUT=900 - - # Run our passed in apt command - "${@}" 2>"$APT_ERR" - APT_RETURN=$? - - # Make sure we're not waiting on a lock - while [ $APT_RETURN -ne 0 ] && grep -q '^E: Could not get lock' "$APT_ERR"; do - echoinfo "Aware of the lock. Patiently waiting $WAIT_TIMEOUT more seconds..." - sleep 1 - WAIT_TIMEOUT=$((WAIT_TIMEOUT - 1)) - - if [ "$WAIT_TIMEOUT" -eq 0 ]; then - echoerror "Apt, apt-get, aptitude, or dpkg process is taking too long." - echoerror "Bootstrap script cannot proceed. Aborting." - return 1 - else - "${@}" 2>"$APT_ERR" - APT_RETURN=$? - fi - done - - return $APT_RETURN -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __apt_get_install_noinput -# DESCRIPTION: (DRY) apt-get install with noinput options -# PARAMETERS: packages -#---------------------------------------------------------------------------------------------------------------------- -__apt_get_install_noinput() { - __wait_for_apt apt-get install -y -o DPkg::Options::=--force-confold "${@}"; return $? -} # ---------- end of function __apt_get_install_noinput ---------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __apt_get_upgrade_noinput -# DESCRIPTION: (DRY) apt-get upgrade with noinput options -#---------------------------------------------------------------------------------------------------------------------- -__apt_get_upgrade_noinput() { - __wait_for_apt apt-get upgrade -y -o DPkg::Options::=--force-confold; return $? -} # ---------- end of function __apt_get_upgrade_noinput ---------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __temp_gpg_pub -# DESCRIPTION: Create a temporary file for downloading a GPG public key. -#---------------------------------------------------------------------------------------------------------------------- -__temp_gpg_pub() { - if __check_command_exists mktemp; then - tempfile="$(mktemp /tmp/salt-gpg-XXXXXXXX.pub 2>/dev/null)" - - if [ -z "$tempfile" ]; then - echoerror "Failed to create temporary file in /tmp" - return 1 - fi - else - tempfile="/tmp/salt-gpg-$$.pub" - fi - - echo $tempfile -} # ----------- end of function __temp_gpg_pub ----------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __apt_key_fetch -# DESCRIPTION: Download and import GPG public key for "apt-secure" -# PARAMETERS: url -#---------------------------------------------------------------------------------------------------------------------- -__apt_key_fetch() { - url=$1 - - tempfile="$(__temp_gpg_pub)" - - __fetch_url "$tempfile" "$url" || return 1 - cp -f "$tempfile" /usr/share/keyrings/salt-archive-keyring.gpg && chmod 644 /usr/share/keyrings/salt-archive-keyring.gpg || return 1 - rm -f "$tempfile" - - return 0 -} # ---------- end of function __apt_key_fetch ---------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __rpm_import_gpg -# DESCRIPTION: Download and import GPG public key to rpm database -# PARAMETERS: url -#---------------------------------------------------------------------------------------------------------------------- -__rpm_import_gpg() { - url=$1 - - tempfile="$(__temp_gpg_pub)" - - __fetch_url "$tempfile" "$url" || return 1 - - # At least on CentOS 8, a missing newline at the end causes: - # error: /tmp/salt-gpg-n1gKUb1u.pub: key 1 not an armored public key. - # shellcheck disable=SC1003,SC2086 - sed -i -e '$a\' $tempfile - - rpm --import "$tempfile" || return 1 - rm -f "$tempfile" - - return 0 -} # ---------- end of function __rpm_import_gpg ---------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __yum_install_noinput -# DESCRIPTION: (DRY) yum install with noinput options -#---------------------------------------------------------------------------------------------------------------------- -__yum_install_noinput() { - - ENABLE_EPEL_CMD="" - # Skip Amazon Linux for the first round, since EPEL is no longer required. - # See issue #724 - if [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DISTRO_NAME_L" != "amazon_linux_ami" ]; then - ENABLE_EPEL_CMD="--enablerepo=${_EPEL_REPO}" - fi - - if [ "$DISTRO_NAME_L" = "oracle_linux" ]; then - # We need to install one package at a time because --enablerepo=X disables ALL OTHER REPOS!!!! - for package in "${@}"; do - yum -y install "${package}" || yum -y install "${package}" ${ENABLE_EPEL_CMD} || return $? - done - else - yum -y install "${@}" ${ENABLE_EPEL_CMD} || return $? - fi -} # ---------- end of function __yum_install_noinput ---------- - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __dnf_install_noinput -# DESCRIPTION: (DRY) dnf install with noinput options -#---------------------------------------------------------------------------------------------------------------------- -__dnf_install_noinput() { - - dnf -y install "${@}" || return $? -} # ---------- end of function __dnf_install_noinput ---------- - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __git_clone_and_checkout -# DESCRIPTION: (DRY) Helper function to clone and checkout salt to a -# specific revision. -#---------------------------------------------------------------------------------------------------------------------- -__git_clone_and_checkout() { - - echodebug "Installed git version: $(git --version | awk '{ print $3 }')" - # Turn off SSL verification if -I flag was set for insecure downloads - if [ "$_INSECURE_DL" -eq $BS_TRUE ]; then - export GIT_SSL_NO_VERIFY=1 - fi - - __SALT_GIT_CHECKOUT_PARENT_DIR=$(dirname "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null) - __SALT_GIT_CHECKOUT_PARENT_DIR="${__SALT_GIT_CHECKOUT_PARENT_DIR:-/tmp/git}" - __SALT_CHECKOUT_REPONAME="$(basename "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)" - __SALT_CHECKOUT_REPONAME="${__SALT_CHECKOUT_REPONAME:-salt}" - [ -d "${__SALT_GIT_CHECKOUT_PARENT_DIR}" ] || mkdir "${__SALT_GIT_CHECKOUT_PARENT_DIR}" - # shellcheck disable=SC2164 - cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" - if [ -d "${_SALT_GIT_CHECKOUT_DIR}" ]; then - echodebug "Found a checked out Salt repository" - # shellcheck disable=SC2164 - cd "${_SALT_GIT_CHECKOUT_DIR}" - echodebug "Fetching git changes" - git fetch || return 1 - # Tags are needed because of salt's versioning, also fetch that - echodebug "Fetching git tags" - git fetch --tags || return 1 - - # If we have the SaltStack remote set as upstream, we also need to fetch the tags from there - if [ "$(git remote -v | grep $_SALTSTACK_REPO_URL)" != "" ]; then - echodebug "Fetching upstream(SaltStack's Salt repository) git tags" - git fetch --tags upstream - else - echoinfo "Adding SaltStack's Salt repository as a remote" - git remote add upstream "$_SALTSTACK_REPO_URL" - echodebug "Fetching upstream(SaltStack's Salt repository) git tags" - git fetch --tags upstream - fi - - echodebug "Hard reseting the cloned repository to ${GIT_REV}" - git reset --hard "$GIT_REV" || return 1 - - # Just calling `git reset --hard $GIT_REV` on a branch name that has - # already been checked out will not update that branch to the upstream - # HEAD; instead it will simply reset to itself. Check the ref to see - # if it is a branch name, check out the branch, and pull in the - # changes. - if git branch -a | grep -q "${GIT_REV}"; then - echodebug "Rebasing the cloned repository branch" - git pull --rebase || return 1 - fi - else - if [ "$_FORCE_SHALLOW_CLONE" -eq "${BS_TRUE}" ]; then - echoinfo "Forced shallow cloning of git repository." - __SHALLOW_CLONE=$BS_TRUE - elif [ "$__TAG_REGEX_MATCH" = "MATCH" ]; then - echoinfo "Git revision matches a Salt version tag, shallow cloning enabled." - __SHALLOW_CLONE=$BS_TRUE - else - echowarn "The git revision being installed does not match a Salt version tag. Shallow cloning disabled" - __SHALLOW_CLONE=$BS_FALSE - fi - - if [ "$__SHALLOW_CLONE" -eq $BS_TRUE ]; then - # Let's try shallow cloning to speed up. - # Test for "--single-branch" option introduced in git 1.7.10, the minimal version of git where the shallow - # cloning we need actually works - if [ "$(git clone 2>&1 | grep 'single-branch')" != "" ]; then - # The "--single-branch" option is supported, attempt shallow cloning - echoinfo "Attempting to shallow clone $GIT_REV from Salt's repository ${_SALT_REPO_URL}" - if git clone --depth 1 --branch "$GIT_REV" "$_SALT_REPO_URL" "$__SALT_CHECKOUT_REPONAME"; then - # shellcheck disable=SC2164 - cd "${_SALT_GIT_CHECKOUT_DIR}" - __SHALLOW_CLONE=$BS_TRUE - else - # Shallow clone above failed(missing upstream tags???), let's resume the old behaviour. - echowarn "Failed to shallow clone." - echoinfo "Resuming regular git clone and remote SaltStack repository addition procedure" - __SHALLOW_CLONE=$BS_FALSE - fi - else - echodebug "Shallow cloning not possible. Required git version not met." - __SHALLOW_CLONE=$BS_FALSE - fi - fi - - if [ "$__SHALLOW_CLONE" -eq $BS_FALSE ]; then - git clone "$_SALT_REPO_URL" "$__SALT_CHECKOUT_REPONAME" || return 1 - # shellcheck disable=SC2164 - cd "${_SALT_GIT_CHECKOUT_DIR}" - - if ! echo "$_SALT_REPO_URL" | grep -q -F -w "${_SALTSTACK_REPO_URL#*://}"; then - # We need to add the saltstack repository as a remote and fetch tags for proper versioning - echoinfo "Adding SaltStack's Salt repository as a remote" - git remote add upstream "$_SALTSTACK_REPO_URL" || return 1 - - echodebug "Fetching upstream (SaltStack's Salt repository) git tags" - git fetch --tags upstream || return 1 - - # Check if GIT_REV is a remote branch or just a commit hash - if git branch -r | grep -q -F -w "origin/$GIT_REV"; then - GIT_REV="origin/$GIT_REV" - fi - fi - - echodebug "Checking out $GIT_REV" - git checkout "$GIT_REV" || return 1 - fi - - fi - - echoinfo "Cloning Salt's git repository succeeded" - return 0 -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __copyfile -# DESCRIPTION: Simple function to copy files. Overrides if asked. -#---------------------------------------------------------------------------------------------------------------------- -__copyfile() { - overwrite=$_FORCE_OVERWRITE - if [ $# -eq 2 ]; then - sfile=$1 - dfile=$2 - elif [ $# -eq 3 ]; then - sfile=$1 - dfile=$2 - overwrite=$3 - else - echoerror "Wrong number of arguments for __copyfile()" - echoinfo "USAGE: __copyfile OR __copyfile " - exit 1 - fi - - # Does the source file exist? - if [ ! -f "$sfile" ]; then - echowarn "$sfile does not exist!" - return 1 - fi - - # If the destination is a directory, let's make it a full path so the logic - # below works as expected - if [ -d "$dfile" ]; then - echodebug "The passed destination ($dfile) is a directory" - dfile="${dfile}/$(basename "$sfile")" - echodebug "Full destination path is now: $dfile" - fi - - if [ ! -f "$dfile" ]; then - # The destination file does not exist, copy - echodebug "Copying $sfile to $dfile" - cp "$sfile" "$dfile" || return 1 - elif [ -f "$dfile" ] && [ "$overwrite" -eq $BS_TRUE ]; then - # The destination exist and we're overwriting - echodebug "Overwriting $dfile with $sfile" - cp -f "$sfile" "$dfile" || return 1 - elif [ -f "$dfile" ] && [ "$overwrite" -ne $BS_TRUE ]; then - echodebug "Not overwriting $dfile with $sfile" - fi - return 0 -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __movefile -# DESCRIPTION: Simple function to move files. Overrides if asked. -#---------------------------------------------------------------------------------------------------------------------- -__movefile() { - overwrite=$_FORCE_OVERWRITE - if [ $# -eq 2 ]; then - sfile=$1 - dfile=$2 - elif [ $# -eq 3 ]; then - sfile=$1 - dfile=$2 - overwrite=$3 - else - echoerror "Wrong number of arguments for __movefile()" - echoinfo "USAGE: __movefile OR __movefile " - exit 1 - fi - - if [ $_KEEP_TEMP_FILES -eq $BS_TRUE ]; then - # We're being told not to move files, instead copy them so we can keep - # them around - echodebug "Since BS_KEEP_TEMP_FILES=1 we're copying files instead of moving them" - __copyfile "$sfile" "$dfile" "$overwrite" - return $? - fi - - # Does the source file exist? - if [ ! -f "$sfile" ]; then - echowarn "$sfile does not exist!" - return 1 - fi - - # If the destination is a directory, let's make it a full path so the logic - # below works as expected - if [ -d "$dfile" ]; then - echodebug "The passed destination($dfile) is a directory" - dfile="${dfile}/$(basename "$sfile")" - echodebug "Full destination path is now: $dfile" - fi - - if [ ! -f "$dfile" ]; then - # The destination file does not exist, move - echodebug "Moving $sfile to $dfile" - mv "$sfile" "$dfile" || return 1 - elif [ -f "$dfile" ] && [ "$overwrite" -eq $BS_TRUE ]; then - # The destination exist and we're overwriting - echodebug "Overriding $dfile with $sfile" - mv -f "$sfile" "$dfile" || return 1 - elif [ -f "$dfile" ] && [ "$overwrite" -ne $BS_TRUE ]; then - echodebug "Not overriding $dfile with $sfile" - fi - - return 0 -} - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __linkfile -# DESCRIPTION: Simple function to create symlinks. Overrides if asked. Accepts globs. -#---------------------------------------------------------------------------------------------------------------------- -__linkfile() { - overwrite=$_FORCE_OVERWRITE - if [ $# -eq 2 ]; then - target=$1 - linkname=$2 - elif [ $# -eq 3 ]; then - target=$1 - linkname=$2 - overwrite=$3 - else - echoerror "Wrong number of arguments for __linkfile()" - echoinfo "USAGE: __linkfile OR __linkfile " - exit 1 - fi - - for sfile in $target; do - # Does the source file exist? - if [ ! -f "$sfile" ]; then - echowarn "$sfile does not exist!" - return 1 - fi - - # If the destination is a directory, let's make it a full path so the logic - # below works as expected - if [ -d "$linkname" ]; then - echodebug "The passed link name ($linkname) is a directory" - linkname="${linkname}/$(basename "$sfile")" - echodebug "Full destination path is now: $linkname" - fi - - if [ ! -e "$linkname" ]; then - # The destination file does not exist, create link - echodebug "Creating $linkname symlink pointing to $sfile" - ln -s "$sfile" "$linkname" || return 1 - elif [ -e "$linkname" ] && [ "$overwrite" -eq $BS_TRUE ]; then - # The destination exist and we're overwriting - echodebug "Overwriting $linkname symlink to point on $sfile" - ln -sf "$sfile" "$linkname" || return 1 - elif [ -e "$linkname" ] && [ "$overwrite" -ne $BS_TRUE ]; then - echodebug "Not overwriting $linkname symlink to point on $sfile" - fi - done - - return 0 -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __overwriteconfig() -# DESCRIPTION: Simple function to overwrite master or minion config files. -#---------------------------------------------------------------------------------------------------------------------- -__overwriteconfig() { - if [ $# -eq 2 ]; then - target=$1 - json=$2 - else - echoerror "Wrong number of arguments for __convert_json_to_yaml_str()" - echoinfo "USAGE: __convert_json_to_yaml_str " - exit 1 - fi - - # Make a tempfile to dump any python errors into. - if __check_command_exists mktemp; then - tempfile="$(mktemp /tmp/salt-config-XXXXXXXX 2>/dev/null)" - - if [ -z "$tempfile" ]; then - echoerror "Failed to create temporary file in /tmp" - return 1 - fi - else - tempfile="/tmp/salt-config-$$" - fi - - if [ -n "$_PY_EXE" ]; then - good_python="$_PY_EXE" - # If python does not have yaml installed we're on Arch and should use python2 - elif python -c "import yaml" 2> /dev/null; then - good_python=python - else - good_python=python2 - fi - - # Convert json string to a yaml string and write it to config file. Output is dumped into tempfile. - "$good_python" -c "import json; import yaml; jsn=json.loads('$json'); yml=yaml.safe_dump(jsn, line_break='\\n', default_flow_style=False); config_file=open('$target', 'w'); config_file.write(yml); config_file.close();" 2>$tempfile - - # No python errors output to the tempfile - if [ ! -s "$tempfile" ]; then - rm -f "$tempfile" - return 0 - fi - - # Errors are present in the tempfile - let's expose them to the user. - fullerror=$(cat "$tempfile") - echodebug "$fullerror" - echoerror "Python error encountered. This is likely due to passing in a malformed JSON string. Please use -D to see stacktrace." - - rm -f "$tempfile" - - return 1 - -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_services_systemd -# DESCRIPTION: Return 0 or 1 in case the service is enabled or not -# PARAMETERS: servicename -#---------------------------------------------------------------------------------------------------------------------- -__check_services_systemd() { - if [ $# -eq 0 ]; then - echoerror "You need to pass a service name to check!" - exit 1 - elif [ $# -ne 1 ]; then - echoerror "You need to pass a service name to check as the single argument to the function" - fi - - servicename=$1 - echodebug "Checking if service ${servicename} is enabled" - - if [ "$(systemctl is-enabled "${servicename}")" = "enabled" ]; then - echodebug "Service ${servicename} is enabled" - return 0 - else - echodebug "Service ${servicename} is NOT enabled" - return 1 - fi -} # ---------- end of function __check_services_systemd ---------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_services_upstart -# DESCRIPTION: Return 0 or 1 in case the service is enabled or not -# PARAMETERS: servicename -#---------------------------------------------------------------------------------------------------------------------- -__check_services_upstart() { - if [ $# -eq 0 ]; then - echoerror "You need to pass a service name to check!" - exit 1 - elif [ $# -ne 1 ]; then - echoerror "You need to pass a service name to check as the single argument to the function" - fi - - servicename=$1 - echodebug "Checking if service ${servicename} is enabled" - - # Check if service is enabled to start at boot - if initctl list | grep "${servicename}" > /dev/null 2>&1; then - echodebug "Service ${servicename} is enabled" - return 0 - else - echodebug "Service ${servicename} is NOT enabled" - return 1 - fi -} # ---------- end of function __check_services_upstart ---------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_services_sysvinit -# DESCRIPTION: Return 0 or 1 in case the service is enabled or not -# PARAMETERS: servicename -#---------------------------------------------------------------------------------------------------------------------- -__check_services_sysvinit() { - if [ $# -eq 0 ]; then - echoerror "You need to pass a service name to check!" - exit 1 - elif [ $# -ne 1 ]; then - echoerror "You need to pass a service name to check as the single argument to the function" - fi - - servicename=$1 - echodebug "Checking if service ${servicename} is enabled" - - if [ "$(LC_ALL=C /sbin/chkconfig --list | grep "\\<${servicename}\\>" | grep '[2-5]:on')" != "" ]; then - echodebug "Service ${servicename} is enabled" - return 0 - else - echodebug "Service ${servicename} is NOT enabled" - return 1 - fi -} # ---------- end of function __check_services_sysvinit ---------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_services_debian -# DESCRIPTION: Return 0 or 1 in case the service is enabled or not -# PARAMETERS: servicename -#---------------------------------------------------------------------------------------------------------------------- -__check_services_debian() { - if [ $# -eq 0 ]; then - echoerror "You need to pass a service name to check!" - exit 1 - elif [ $# -ne 1 ]; then - echoerror "You need to pass a service name to check as the single argument to the function" - fi - - servicename=$1 - echodebug "Checking if service ${servicename} is enabled" - - # Check if the service is going to be started at any runlevel, fixes bootstrap in container (Docker, LXC) - if ls /etc/rc?.d/S*"${servicename}" >/dev/null 2>&1; then - echodebug "Service ${servicename} is enabled" - return 0 - else - echodebug "Service ${servicename} is NOT enabled" - return 1 - fi -} # ---------- end of function __check_services_debian ---------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_services_openbsd -# DESCRIPTION: Return 0 or 1 in case the service is enabled or not -# PARAMETERS: servicename -#---------------------------------------------------------------------------------------------------------------------- -__check_services_openbsd() { - if [ $# -eq 0 ]; then - echoerror "You need to pass a service name to check!" - exit 1 - elif [ $# -ne 1 ]; then - echoerror "You need to pass a service name to check as the single argument to the function" - fi - - servicename=$1 - echodebug "Checking if service ${servicename} is enabled" - - # shellcheck disable=SC2086,SC2046,SC2144 - if rcctl get ${servicename} status; then - echodebug "Service ${servicename} is enabled" - return 0 - else - echodebug "Service ${servicename} is NOT enabled" - return 1 - fi -} # ---------- end of function __check_services_openbsd ---------- - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_services_openrc -# DESCRIPTION: Return 0 or 1 in case the service is enabled or not -# PARAMETERS: servicename -#---------------------------------------------------------------------------------------------------------------------- -__check_services_openrc() { - if [ $# -eq 0 ]; then - echoerror "You need to pass a service name to check!" - exit 1 - elif [ $# -ne 1 ]; then - echoerror "You need to pass a service name to check as the single argument to the function" - fi - - servicename=$1 - echodebug "Checking if service ${servicename} is enabled" - - # shellcheck disable=SC2086,SC2046,SC2144 - if rc-status $(rc-status -r) | tail -n +2 | grep -q "\\<$servicename\\>"; then - echodebug "Service ${servicename} is enabled" - return 0 - else - echodebug "Service ${servicename} is NOT enabled" - return 1 - fi -} # ---------- end of function __check_services_openrc ---------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __create_virtualenv -# DESCRIPTION: Return 0 or 1 depending on successful creation of virtualenv -#---------------------------------------------------------------------------------------------------------------------- -__create_virtualenv() { - if [ ! -d "$_VIRTUALENV_DIR" ]; then - echoinfo "Creating virtualenv ${_VIRTUALENV_DIR}" - if [ $_PIP_ALL -eq $BS_TRUE ]; then - virtualenv --no-site-packages "${_VIRTUALENV_DIR}" || return 1 - else - virtualenv --system-site-packages "${_VIRTUALENV_DIR}" || return 1 - fi - fi - return 0 -} # ---------- end of function __create_virtualenv ---------- - - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __activate_virtualenv -# DESCRIPTION: Return 0 or 1 depending on successful activation of virtualenv -#---------------------------------------------------------------------------------------------------------------------- -__activate_virtualenv() { - set +o nounset - # Is virtualenv empty - if [ -z "$_VIRTUALENV_DIR" ]; then - __create_virtualenv || return 1 - # shellcheck source=/dev/null - . "${_VIRTUALENV_DIR}/bin/activate" || return 1 - echoinfo "Activated virtualenv ${_VIRTUALENV_DIR}" - fi - set -o nounset - return 0 -} # ---------- end of function __activate_virtualenv ---------- - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __install_pip_pkgs -# DESCRIPTION: Return 0 or 1 if successfully able to install pip packages. Can provide a different python version to -# install pip packages with. If $py_ver is not specified it will use the default python version. -# PARAMETERS: pkgs, py_ver -#---------------------------------------------------------------------------------------------------------------------- - -__install_pip_pkgs() { - _pip_pkgs="$1" - _py_exe="$2" - _py_pkg=$(echo "$_py_exe" | sed -E "s/\\.//g") - _pip_cmd="${_py_exe} -m pip" - - if [ "${_py_exe}" = "" ]; then - _py_exe='python' - fi - - __check_pip_allowed - - # Install pip and pip dependencies - if ! __check_command_exists "${_pip_cmd} --version"; then - __PACKAGES="${_py_pkg}-setuptools ${_py_pkg}-pip gcc" - # shellcheck disable=SC2086 - if [ "$DISTRO_NAME_L" = "debian" ] || [ "$DISTRO_NAME_L" = "ubuntu" ];then - __PACKAGES="${__PACKAGES} ${_py_pkg}-dev" - __apt_get_install_noinput ${__PACKAGES} || return 1 - else - __PACKAGES="${__PACKAGES} ${_py_pkg}-devel" - if [ "$DISTRO_NAME_L" = "fedora" ];then - __dnf_install_noinput ${__PACKAGES} || return 1 - else - __yum_install_noinput ${__PACKAGES} || return 1 - fi - fi - - fi - - echoinfo "Installing pip packages: ${_pip_pkgs} using ${_py_exe}" - # shellcheck disable=SC2086 - ${_pip_cmd} install ${_pip_pkgs} || return 1 -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __install_tornado_pip -# PARAMETERS: python executable -# DESCRIPTION: Return 0 or 1 if successfully able to install tornado<5.0 -#---------------------------------------------------------------------------------------------------------------------- -__install_tornado_pip() { - # OS needs tornado <5.0 from pip - __check_pip_allowed "You need to allow pip based installations (-P) for Tornado <5.0 in order to install Salt on Python 3" - ## install pip if its not installed and install tornado - __install_pip_pkgs "tornado<5.0" "${1}" || return 1 -} - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __install_pip_deps -# DESCRIPTION: Return 0 or 1 if successfully able to install pip packages via requirements file -# PARAMETERS: requirements_file -#---------------------------------------------------------------------------------------------------------------------- -__install_pip_deps() { - # Install virtualenv to system pip before activating virtualenv if thats going to be used - # We assume pip pkg is installed since that is distro specific - if [ "$_VIRTUALENV_DIR" != "null" ]; then - if ! __check_command_exists pip; then - echoerror "Pip not installed: required for -a installs" - exit 1 - fi - pip install -U virtualenv - __activate_virtualenv || return 1 - else - echoerror "Must have virtualenv dir specified for -a installs" - fi - - requirements_file=$1 - if [ ! -f "${requirements_file}" ]; then - echoerror "Requirements file: ${requirements_file} cannot be found, needed for -a (pip pkg) installs" - exit 1 - fi - - __PIP_PACKAGES='' - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - # shellcheck disable=SC2089 - __PIP_PACKAGES="${__PIP_PACKAGES} 'apache-libcloud>=$_LIBCLOUD_MIN_VERSION'" - fi - - # shellcheck disable=SC2086,SC2090 - pip install -U -r ${requirements_file} ${__PIP_PACKAGES} -} # ---------- end of function __install_pip_deps ---------- - -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __install_salt_from_repo_post_neon -# DESCRIPTION: Return 0 or 1 if successfully able to install. Can provide a different python version to -# install pip packages with. If $py_exe is not specified it will use the default python version. -# PARAMETERS: py_exe -#---------------------------------------------------------------------------------------------------------------------- -__install_salt_from_repo_post_neon() { - _py_exe="$1" - - if [ "${_py_exe}" = "" ]; then - _py_exe='python' - fi - - echodebug "__install_salt_from_repo_post_neon py_exe=$_py_exe" - - _py_version=$(${_py_exe} -c "import sys; print('{0}.{1}'.format(*sys.version_info))") - _pip_cmd="pip${_py_version}" - if ! __check_command_exists "${_pip_cmd}"; then - echodebug "The pip binary '${_pip_cmd}' was not found in PATH" - _pip_cmd="pip$(echo "${_py_version}" | cut -c -1)" - if ! __check_command_exists "${_pip_cmd}"; then - echodebug "The pip binary '${_pip_cmd}' was not found in PATH" - _pip_cmd="pip" - if ! __check_command_exists "${_pip_cmd}"; then - echoerror "Unable to find a pip binary" - return 1 - fi - fi - fi - - __check_pip_allowed - - echodebug "Installed pip version: $(${_pip_cmd} --version)" - - CHECK_PIP_VERSION_SCRIPT=$(cat << EOM -import sys -try: - import pip - installed_pip_version=tuple([int(part.strip()) for part in pip.__version__.split('.') if part.isdigit()]) - desired_pip_version=($(echo ${_MINIMUM_PIP_VERSION} | sed 's/\./, /g' )) - if installed_pip_version < desired_pip_version: - print('Desired pip version {!r} > Installed pip version {!r}'.format('.'.join(map(str, desired_pip_version)), '.'.join(map(str, installed_pip_version)))) - sys.exit(1) - print('Desired pip version {!r} < Installed pip version {!r}'.format('.'.join(map(str, desired_pip_version)), '.'.join(map(str, installed_pip_version)))) - sys.exit(0) -except ImportError: - print('Failed to import pip') - sys.exit(1) -EOM -) - if ! ${_py_exe} -c "$CHECK_PIP_VERSION_SCRIPT"; then - # Upgrade pip to at least 1.2 which is when we can start using "python -m pip" - if [ "${_py_version}" = "3.5" ]; then - echodebug "Running '${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} pip>=${_MINIMUM_PIP_VERSION},<21.0'" - ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} -v "pip>=${_MINIMUM_PIP_VERSION},<21.0" - else - echodebug "Running '${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} pip>=${_MINIMUM_PIP_VERSION}'" - ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} -v "pip>=${_MINIMUM_PIP_VERSION}" - fi - sleep 1 - echodebug "PATH: ${PATH}" - _pip_cmd="pip${_py_version}" - if ! __check_command_exists "${_pip_cmd}"; then - echodebug "The pip binary '${_pip_cmd}' was not found in PATH" - _pip_cmd="pip$(echo "${_py_version}" | cut -c -1)" - if ! __check_command_exists "${_pip_cmd}"; then - echodebug "The pip binary '${_pip_cmd}' was not found in PATH" - _pip_cmd="pip" - if ! __check_command_exists "${_pip_cmd}"; then - echoerror "Unable to find a pip binary" - return 1 - fi - fi - fi - echodebug "Installed pip version: $(${_pip_cmd} --version)" - fi - - _setuptools_dep="setuptools>=${_MINIMUM_SETUPTOOLS_VERSION}" - if [ "$_PY_MAJOR_VERSION" -eq 2 ]; then - # We also lock setuptools to <45 which is the latest release to support both py2 and py3 - _setuptools_dep="${_setuptools_dep},<45" - fi - - echodebug "Running '${_pip_cmd} install wheel ${_setuptools_dep}'" - ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} wheel "${_setuptools_dep}" - - echoinfo "Installing salt using ${_py_exe}" - cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 - - mkdir /tmp/git/deps - echoinfo "Downloading Salt Dependencies from PyPi" - echodebug "Running '${_pip_cmd} download -d /tmp/git/deps .'" - ${_pip_cmd} download -d /tmp/git/deps . || (echo "Failed to download salt dependencies" && return 1) - - echoinfo "Installing Downloaded Salt Dependencies" - echodebug "Running '${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/*'" - ${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/* || return 1 - rm -f /tmp/git/deps/* - - echoinfo "Building Salt Python Wheel" - - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - SETUP_PY_INSTALL_ARGS="-v" - fi - - echodebug "Running '${_py_exe} setup.py --salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} bdist_wheel'" - ${_py_exe} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} bdist_wheel || return 1 - mv dist/salt*.whl /tmp/git/deps/ || return 1 - - cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" || return 1 - - echoinfo "Installing Built Salt Wheel" - ${_pip_cmd} uninstall --yes salt 2>/dev/null || true - echodebug "Running '${_pip_cmd} install --no-deps --force-reinstall ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/salt*.whl'" - ${_pip_cmd} install --no-deps --force-reinstall \ - ${_POST_NEON_PIP_INSTALL_ARGS} \ - --global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \ - /tmp/git/deps/salt*.whl || return 1 - - echoinfo "Checking if Salt can be imported using ${_py_exe}" - CHECK_SALT_SCRIPT=$(cat << EOM -import os -import sys -try: - import salt - import salt.version - print('\nInstalled Salt Version: {}'.format(salt.version.__version__)) - print('Installed Salt Package Path: {}\n'.format(os.path.dirname(salt.__file__))) - sys.exit(0) -except ImportError: - print('\nFailed to import salt\n') - sys.exit(1) -EOM -) - if ! ${_py_exe} -c "$CHECK_SALT_SCRIPT"; then - return 1 - fi - return 0 -} # ---------- end of function __install_salt_from_repo_post_neon ---------- - - -if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - if [ "x${_PY_MAJOR_VERSION}" = "x" ]; then - # Default to python 2 for pre Neon installs - _PY_MAJOR_VERSION=2 - fi -else - if [ "x${_PY_MAJOR_VERSION}" = "x" ]; then - # Default to python 3 for post Neon install - _PY_MAJOR_VERSION=3 - fi -fi - -####################################################################################################################### -# -# Distribution install functions -# -# In order to install salt for a distribution you need to define: -# -# To Install Dependencies, which is required, one of: -# 1. install____deps -# 2. install_____deps -# 3. install___deps -# 4 install____deps -# 5. install___deps -# 6. install__deps -# -# Optionally, define a salt configuration function, which will be called if -# the -c (config-dir) option is passed. One of: -# 1. config____salt -# 2. config_____salt -# 3. config___salt -# 4 config____salt -# 5. config___salt -# 6. config__salt -# 7. config_salt [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] -# -# Optionally, define a salt master pre-seed function, which will be called if -# the -k (pre-seed master keys) option is passed. One of: -# 1. preseed____master -# 2. preseed_____master -# 3. preseed___master -# 4 preseed____master -# 5. preseed___master -# 6. preseed__master -# 7. preseed_master [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] -# -# To install salt, which, of course, is required, one of: -# 1. install___ -# 2. install____ -# 3. install__ -# -# Optionally, define a post install function, one of: -# 1. install____post -# 2. install_____post -# 3. install___post -# 4 install____post -# 5. install___post -# 6. install__post -# -# Optionally, define a start daemons function, one of: -# 1. install____restart_daemons -# 2. install_____restart_daemons -# 3. install___restart_daemons -# 4 install____restart_daemons -# 5. install___restart_daemons -# 6. install__restart_daemons -# -# NOTE: The start daemons function should be able to restart any daemons -# which are running, or start if they're not running. -# -# Optionally, define a daemons running function, one of: -# 1. daemons_running___ -# 2. daemons_running____ -# 3. daemons_running__ -# 4 daemons_running___ -# 5. daemons_running__ -# 6. daemons_running_ -# 7. daemons_running [THIS ONE IS ALREADY DEFINED AS THE DEFAULT] -# -# Optionally, check enabled Services: -# 1. install____check_services -# 2. install_____check_services -# 3. install___check_services -# 4 install____check_services -# 5. install___check_services -# 6. install__check_services -# -####################################################################################################################### - - -####################################################################################################################### -# -# Ubuntu Install Functions -# -__enable_universe_repository() { - if [ "$(grep -R universe /etc/apt/sources.list /etc/apt/sources.list.d/ | grep -v '#')" != "" ]; then - # The universe repository is already enabled - return 0 - fi - - echodebug "Enabling the universe repository" - - add-apt-repository -y "deb http://archive.ubuntu.com/ubuntu $(lsb_release -sc) universe" || return 1 - - return 0 -} - -__install_saltstack_ubuntu_repository() { - # Workaround for latest non-LTS Ubuntu - if { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 21 ] && [ "$DISTRO_MINOR_VERSION" -eq 04 ]; }; then - echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages for previous LTS release. You may experience problems." - UBUNTU_VERSION=20.04 - UBUNTU_CODENAME="focal" - else - UBUNTU_VERSION=${DISTRO_VERSION} - UBUNTU_CODENAME=${DISTRO_CODENAME} - fi - - # Install downloader backend for GPG keys fetching - __PACKAGES='wget' - - # Required as it is not installed by default on Ubuntu 18+ - if [ "$DISTRO_MAJOR_VERSION" -ge 18 ]; then - __PACKAGES="${__PACKAGES} gnupg" - fi - - # Make sure https transport is available - if [ "$HTTP_VAL" = "https" ] ; then - __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" - fi - - # shellcheck disable=SC2086,SC2090 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - __PY_VERSION_REPO="apt" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PY_VERSION_REPO="py3" - fi - - # SaltStack's stable Ubuntu repository: - SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${STABLE_REV}" - echo "$__REPO_ARCH_DEB $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/salt.list - - __apt_key_fetch "$SALTSTACK_UBUNTU_URL/salt-archive-keyring.gpg" || return 1 - - __wait_for_apt apt-get update || return 1 -} - -install_ubuntu_deps() { - if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then - # Install add-apt-repository - if ! __check_command_exists add-apt-repository; then - __apt_get_install_noinput software-properties-common || return 1 - fi - - __enable_universe_repository || return 1 - - __wait_for_apt apt-get update || return 1 - fi - - __PACKAGES='' - - if [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then - # Minimal systems might not have upstart installed, install it - __PACKAGES="upstart" - fi - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - PY_PKG_VER=3 - else - PY_PKG_VER="" - fi - - if [ "$DISTRO_MAJOR_VERSION" -ge 16 ] && [ -z "$_PY_EXE" ]; then - __PACKAGES="${__PACKAGES} python2.7" - fi - - if [ "$_VIRTUALENV_DIR" != "null" ]; then - __PACKAGES="${__PACKAGES} python-virtualenv" - fi - # Need python-apt for managing packages via Salt - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apt" - - # requests is still used by many salt modules - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-requests" - - # YAML module is used for generating custom master/minion configs - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" - - # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 - __PACKAGES="${__PACKAGES} procps pciutils" - - # shellcheck disable=SC2086,SC2090 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 -} - -install_ubuntu_stable_deps() { - if [ "${_SLEEP}" -eq "${__DEFAULT_SLEEP}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then - # The user did not pass a custom sleep value as an argument, let's increase the default value - echodebug "On Ubuntu systems we increase the default sleep value to 10." - echodebug "See https://github.com/saltstack/salt/issues/12248 for more info." - _SLEEP=10 - fi - - if [ $_START_DAEMONS -eq $BS_FALSE ]; then - echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." - fi - - # No user interaction, libc6 restart services for example - export DEBIAN_FRONTEND=noninteractive - - __wait_for_apt apt-get update || return 1 - - if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then - if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then - if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ]; then - __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 - else - __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && - apt-key update && apt-get update || return 1 - fi - fi - - __apt_get_upgrade_noinput || return 1 - fi - - if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then - __check_dpkg_architecture || return 1 - __install_saltstack_ubuntu_repository || return 1 - fi - - install_ubuntu_deps || return 1 -} - -install_ubuntu_git_deps() { - __wait_for_apt apt-get update || return 1 - - if ! __check_command_exists git; then - __apt_get_install_noinput git-core || return 1 - fi - - if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then - __apt_get_install_noinput ca-certificates - fi - - __git_clone_and_checkout || return 1 - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - PY_PKG_VER=3 - else - PY_PKG_VER="" - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - - __PACKAGES="" - - # See how we are installing packages - if [ "${_PIP_ALL}" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} python-dev swig libssl-dev libzmq3 libzmq3-dev" - - if ! __check_command_exists pip; then - __PACKAGES="${__PACKAGES} python-setuptools python-pip" - fi - - # Get just the apt packages that are required to build all the pythons - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - # Install the pythons from requirements (only zmq for now) - __install_pip_deps "${_SALT_GIT_CHECKOUT_DIR}/requirements/zeromq.txt" || return 1 - else - install_ubuntu_stable_deps || return 1 - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python3-setuptools" - else - # There is no m2crypto package for Py3 at this time - only install for Py2 - __PACKAGES="${__PACKAGES} python-m2crypto" - fi - - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-zmq" - __PACKAGES="${__PACKAGES} python-concurrent.futures" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - # Install python-libcloud if asked to - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" - fi - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - fi - else - __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - fi - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_ubuntu_stable() { - __PACKAGES="" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-master" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-syndic" - fi - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - return 0 -} - -install_ubuntu_git() { - # Activate virtualenv before install - if [ "${_VIRTUALENV_DIR}" != "null" ]; then - __activate_virtualenv || return 1 - fi - - if [ -n "$_PY_EXE" ]; then - _PYEXE=${_PY_EXE} - else - _PYEXE=python2.7 - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - # We can use --prefix on debian based ditributions - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python3/dist-packages --install-option=--install-scripts=/usr/bin" - else - _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python2.7/dist-packages --install-option=--install-scripts=/usr/bin" - fi - _POST_NEON_PIP_INSTALL_ARGS="" - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 - sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service - return 0 - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - # shellcheck disable=SC2086 - "${_PYEXE}" setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 - else - # shellcheck disable=SC2086 - "${_PYEXE}" setup.py ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 - fi - - return 0 -} - -install_ubuntu_stable_post() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ]; then - # Using systemd - /bin/systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( - /bin/systemctl preset salt-$fname.service > /dev/null 2>&1 && - /bin/systemctl enable salt-$fname.service > /dev/null 2>&1 - ) - sleep 1 - /bin/systemctl daemon-reload - elif [ -f /etc/init.d/salt-$fname ]; then - update-rc.d salt-$fname defaults - fi - done - - return 0 -} - -install_ubuntu_git_post() { - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) - sleep 1 - systemctl daemon-reload - elif [ -f /sbin/initctl ]; then - _upstart_conf="/etc/init/salt-$fname.conf" - # We have upstart support - echodebug "There's upstart support" - if [ ! -f $_upstart_conf ]; then - # upstart does not know about our service, let's copy the proper file - echowarn "Upstart does not appear to know about salt-$fname" - echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.upstart to $_upstart_conf" - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.upstart" "$_upstart_conf" - # Set service to know about virtualenv - if [ "${_VIRTUALENV_DIR}" != "null" ]; then - echo "SALT_USE_VIRTUALENV=${_VIRTUALENV_DIR}" > /etc/default/salt-${fname} - fi - /sbin/initctl reload-configuration || return 1 - fi - # No upstart support in Ubuntu!? - elif [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" ]; then - echodebug "There's NO upstart support!?" - echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init to /etc/init.d/salt-$fname" - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.init" "/etc/init.d/salt-$fname" - chmod +x /etc/init.d/salt-$fname - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - update-rc.d salt-$fname defaults - else - echoerror "Neither upstart nor init.d was setup for salt-$fname" - fi - done - - return 0 -} - -install_ubuntu_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - # Ensure upstart configs / systemd units are loaded - if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then - systemctl daemon-reload - elif [ -f /sbin/initctl ]; then - /sbin/initctl reload-configuration - fi - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then - echodebug "There's systemd support while checking salt-$fname" - systemctl stop salt-$fname > /dev/null 2>&1 - systemctl start salt-$fname.service && continue - # We failed to start the service, let's test the SysV code below - echodebug "Failed to start salt-$fname using systemd" - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - systemctl status salt-$fname.service - journalctl -xe - fi - fi - - if [ -f /sbin/initctl ]; then - echodebug "There's upstart support while checking salt-$fname" - - if status salt-$fname 2>/dev/null | grep -q running; then - stop salt-$fname || (echodebug "Failed to stop salt-$fname" && return 1) - fi - - start salt-$fname && continue - # We failed to start the service, let's test the SysV code below - echodebug "Failed to start salt-$fname using Upstart" - fi - - if [ ! -f /etc/init.d/salt-$fname ]; then - echoerror "No init.d support for salt-$fname was found" - return 1 - fi - - /etc/init.d/salt-$fname stop > /dev/null 2>&1 - /etc/init.d/salt-$fname start - done - - return 0 -} - -install_ubuntu_check_services() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then - __check_services_systemd salt-$fname || return 1 - elif [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then - __check_services_upstart salt-$fname || return 1 - elif [ -f /etc/init.d/salt-$fname ]; then - __check_services_debian salt-$fname || return 1 - fi - done - - return 0 -} -# -# End of Ubuntu Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# Debian Install Functions -# -__install_saltstack_debian_repository() { - if [ "$DISTRO_MAJOR_VERSION" -eq 11 ]; then - # Packages for Debian 11 at repo.saltproject.io are not yet available - # Set up repository for Debian 10 for Debian 11 for now until support - # is available at repo.saltproject.io for Debian 11. - echowarn "Debian 11 distribution detected, but stable packages requested. Trying packages from Debian 10. You may experience problems." - DEBIAN_RELEASE="10" - DEBIAN_CODENAME="buster" - else - DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" - DEBIAN_CODENAME="$DISTRO_CODENAME" - fi - - __PY_VERSION_REPO="apt" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PY_VERSION_REPO="py3" - fi - - # Install downloader backend for GPG keys fetching - __PACKAGES='wget' - - # Required as it is not installed by default on Debian 9+ - if [ "$DISTRO_MAJOR_VERSION" -ge 9 ]; then - __PACKAGES="${__PACKAGES} gnupg2" - fi - - # Make sure https transport is available - if [ "$HTTP_VAL" = "https" ] ; then - __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" - fi - - # shellcheck disable=SC2086,SC2090 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location - SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/${STABLE_REV}" - echo "$__REPO_ARCH_DEB $SALTSTACK_DEBIAN_URL $DEBIAN_CODENAME main" > "/etc/apt/sources.list.d/salt.list" - - __apt_key_fetch "$SALTSTACK_DEBIAN_URL/salt-archive-keyring.gpg" || return 1 - - __wait_for_apt apt-get update || return 1 -} - -install_debian_deps() { - if [ $_START_DAEMONS -eq $BS_FALSE ]; then - echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." - fi - - # No user interaction, libc6 restart services for example - export DEBIAN_FRONTEND=noninteractive - - __wait_for_apt apt-get update || return 1 - - if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then - # Try to update GPG keys first if allowed - if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then - if [ "$DISTRO_MAJOR_VERSION" -ge 10 ]; then - __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 - else - __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && - apt-key update && apt-get update || return 1 - fi - fi - - __apt_get_upgrade_noinput || return 1 - fi - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - PY_PKG_VER=3 - else - PY_PKG_VER="" - fi - - # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 - __PACKAGES='procps pciutils' - - # YAML module is used for generating custom master/minion configs - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then - __check_dpkg_architecture || return 1 - __install_saltstack_debian_repository || return 1 - fi - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 -} - -install_debian_git_pre() { - if ! __check_command_exists git; then - __apt_get_install_noinput git || return 1 - fi - - if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then - __apt_get_install_noinput ca-certificates - fi - - __git_clone_and_checkout || return 1 - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi -} - -install_debian_git_deps() { - install_debian_deps || return 1 - install_debian_git_pre || return 1 - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - PY_PKG_VER=3 - else - PY_PKG_VER="" - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - __PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-backports.ssl-match-hostname" - __PACKAGES="${__PACKAGES} python-crypto python-jinja2 python-msgpack python-m2crypto" - __PACKAGES="${__PACKAGES} python-requests python-tornado python-yaml python-zmq" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - # Install python-libcloud if asked to - __PACKAGES="${__PACKAGES} python-libcloud" - fi - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - else - __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" - echodebug "install_debian_git_deps() Installing ${__PACKAGES}" - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - fi - - return 0 -} - -install_debian_7_git_deps() { - install_debian_deps || return 1 - install_debian_git_deps || return 1 - - return 0 -} - -install_debian_8_git_deps() { - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - echodebug "CALLING install_debian_git_deps" - install_debian_git_deps || return 1 - return 0 - fi - - install_debian_deps || return 1 - - if ! __check_command_exists git; then - __apt_get_install_noinput git || return 1 - fi - - if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then - __apt_get_install_noinput ca-certificates - fi - - __git_clone_and_checkout || return 1 - - __PACKAGES="libzmq3 libzmq3-dev lsb-release python-apt python-crypto python-jinja2" - __PACKAGES="${__PACKAGES} python-m2crypto python-msgpack python-requests python-systemd" - __PACKAGES="${__PACKAGES} python-yaml python-zmq python-concurrent.futures" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - # Install python-libcloud if asked to - __PACKAGES="${__PACKAGES} python-libcloud" - fi - - __PIP_PACKAGES='' - if (__check_pip_allowed >/dev/null 2>&1); then - __PIP_PACKAGES='tornado<5.0' - # Install development environment for building tornado Python module - __PACKAGES="${__PACKAGES} build-essential python-dev" - - if ! __check_command_exists pip; then - __PACKAGES="${__PACKAGES} python-pip" - fi - # Attempt to configure backports repo on non-x86_64 system - elif [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DPKG_ARCHITECTURE" != "amd64" ]; then - # Check if Debian Backports repo already configured - if ! apt-cache policy | grep -q 'Debian Backports'; then - echo 'deb http://httpredir.debian.org/debian jessie-backports main' > \ - /etc/apt/sources.list.d/backports.list - fi - - __wait_for_apt apt-get update || return 1 - - # python-tornado package should be installed from backports repo - __PACKAGES="${__PACKAGES} python-backports.ssl-match-hostname python-tornado/jessie-backports" - else - __PACKAGES="${__PACKAGES} python-backports.ssl-match-hostname python-tornado" - fi - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - if [ "${__PIP_PACKAGES}" != "" ]; then - # shellcheck disable=SC2086,SC2090 - pip install -U ${__PIP_PACKAGES} || return 1 - fi - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_debian_9_git_deps() { - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - install_debian_git_deps || return 1 - return 0 - fi - - install_debian_deps || return 1 - install_debian_git_pre || return 1 - - __PACKAGES="libzmq5 lsb-release" - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - PY_PKG_VER=3 - else - PY_PKG_VER="" - - # These packages are PY2-ONLY - __PACKAGES="${__PACKAGES} python-backports-abc python-m2crypto python-concurrent.futures" - fi - - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apt python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-systemd" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml python${PY_PKG_VER}-zmq" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - # Install python-libcloud if asked to - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" - fi - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - return 0 -} - -install_debian_10_git_deps() { - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - install_debian_git_deps || return 1 - return 0 - fi - - install_debian_deps || return 1 - install_debian_git_pre || return 1 - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - _py=${_PY_EXE} - PY_PKG_VER=3 - __PACKAGES="python${PY_PKG_VER}-distutils" - else - _py="python" - PY_PKG_VER="" - __PACKAGES="" - fi - - __install_tornado_pip ${_py}|| return 1 - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-jinja2" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-yaml python${PY_PKG_VER}-zmq" - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - return 0 -} - -install_debian_stable() { - __PACKAGES="" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-master" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-syndic" - fi - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - return 0 -} - -install_debian_7_stable() { - install_debian_stable || return 1 - return 0 -} - -install_debian_8_stable() { - install_debian_stable || return 1 - return 0 -} - -install_debian_9_stable() { - install_debian_stable || return 1 - return 0 -} - -install_debian_git() { - if [ -n "$_PY_EXE" ]; then - _PYEXE=${_PY_EXE} - else - _PYEXE=python - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - # We can use --prefix on debian based ditributions - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python3/dist-packages --install-option=--install-scripts=/usr/bin" - else - _POST_NEON_PIP_INSTALL_ARGS="--target=/usr/lib/python2.7/dist-packages --install-option=--install-scripts=/usr/bin" - fi - _POST_NEON_PIP_INSTALL_ARGS="" - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 - sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service - return 0 - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - # shellcheck disable=SC2086 - "${_PYEXE}" setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 - else - # shellcheck disable=SC2086 - "${_PYEXE}" setup.py ${SETUP_PY_INSTALL_ARGS} install --install-layout=deb || return 1 - fi -} - -install_debian_7_git() { - install_debian_git || return 1 - return 0 -} - -install_debian_8_git() { - install_debian_git || return 1 - return 0 -} - -install_debian_9_git() { - install_debian_git || return 1 - return 0 -} - -install_debian_git_post() { - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ "$fname" = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ "$fname" = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ "$fname" = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ "$fname" = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - # Configure SystemD for Debian 8 "Jessie" and later - if [ -f /bin/systemctl ]; then - if [ ! -f /lib/systemd/system/salt-${fname}.service ] || \ - { [ -f /lib/systemd/system/salt-${fname}.service ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]; }; then - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.environment" "/etc/default/salt-${fname}" - else - # workaround before adding Debian-specific unit files to the Salt main repo - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system - sed -i -e '/^Type/ s/notify/simple/' /lib/systemd/system/salt-${fname}.service - fi - fi - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ "$fname" = "api" ] && continue - - /bin/systemctl enable "salt-${fname}.service" - SYSTEMD_RELOAD=$BS_TRUE - - # Install initscripts for Debian 7 "Wheezy" - elif [ ! -f "/etc/init.d/salt-$fname" ] || \ - { [ -f "/etc/init.d/salt-$fname" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.init" "/etc/init.d/salt-${fname}" - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/deb/salt-${fname}.environment" "/etc/default/salt-${fname}" - - if [ ! -f "/etc/init.d/salt-${fname}" ]; then - echowarn "The init script for salt-${fname} was not found, skipping it..." - continue - fi - - chmod +x "/etc/init.d/salt-${fname}" - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ "$fname" = "api" ] && continue - - update-rc.d "salt-${fname}" defaults - fi - done -} - -install_debian_restart_daemons() { - [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ]; then - # Debian 8 uses systemd - /bin/systemctl stop salt-$fname > /dev/null 2>&1 - /bin/systemctl start salt-$fname.service && continue - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - systemctl status salt-$fname.service - journalctl -xe - fi - elif [ -f /etc/init.d/salt-$fname ]; then - # Still in SysV init - /etc/init.d/salt-$fname stop > /dev/null 2>&1 - /etc/init.d/salt-$fname start - fi - done -} - -install_debian_check_services() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ]; then - __check_services_systemd salt-$fname || return 1 - elif [ -f /etc/init.d/salt-$fname ]; then - __check_services_debian salt-$fname || return 1 - fi - done - return 0 -} -# -# Ended Debian Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# Fedora Install Functions -# - -install_fedora_deps() { - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - dnf -y update || return 1 - fi - - __PACKAGES="${__PACKAGES:=}" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -lt 3 ]; then - echoerror "There are no Python 2 stable packages for Fedora, only Py3 packages" - return 1 - fi - - # Salt on Fedora is Py3 - PY_PKG_VER=3 - - __PACKAGES="${__PACKAGES} dnf-utils libyaml procps-ng python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-zmq" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip python${PY_PKG_VER}-m2crypto python${PY_PKG_VER}-pyyaml" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-systemd" - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - fi - - # shellcheck disable=SC2086 - __dnf_install_noinput ${__PACKAGES} ${_EXTRA_PACKAGES} || return 1 - - return 0 -} - -install_fedora_stable() { - if [ "$STABLE_REV" = "latest" ]; then - __SALT_VERSION="" - else - __SALT_VERSION="$(dnf list --showduplicates salt | grep "$STABLE_REV" | head -n 1 | awk '{print $2}')" - if [ "x${__SALT_VERSION}" = "x" ]; then - echoerror "Could not find a stable install for Salt ${STABLE_REV}" - exit 1 - fi - echoinfo "Installing Stable Package Version ${__SALT_VERSION}" - __SALT_VERSION="-${__SALT_VERSION}" - fi - __PACKAGES="" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud${__SALT_VERSION}" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-master${__SALT_VERSION}" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion${__SALT_VERSION}" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-syndic${__SALT_VERSION}" - fi - - # shellcheck disable=SC2086 - __dnf_install_noinput ${__PACKAGES} || return 1 - - __python="python3" - if ! __check_command_exists python3; then - echoerror "Could not find a python3 binary?!" - return 1 - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - __check_pip_allowed "You need to allow pip based installations (-P) for Tornado <5.0 in order to install Salt" - __installed_tornado_rpm=$(rpm -qa | grep python${PY_PKG_VER}-tornado) - if [ -n "${__installed_tornado_rpm}" ]; then - echodebug "Removing system package ${__installed_tornado_rpm}" - rpm -e --nodeps "${__installed_tornado_rpm}" || return 1 - fi - __get_site_packages_dir_code=$(cat << EOM -import site -print([d for d in site.getsitepackages() if d.startswith('/usr/lib/python')][0]) -EOM -) - __target_path=$(${__python} -c "${__get_site_packages_dir_code}") - echodebug "Running '${__python}' -m pip install --target ${__target_path} 'tornado<5.0'" - "${__python}" -m pip install --target "${__target_path}" "tornado<5" || return 1 - fi - - return 0 -} - -install_fedora_stable_post() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) - sleep 1 - systemctl daemon-reload - done -} - -install_fedora_git_deps() { - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - # Packages are named python3- - PY_PKG_VER=3 - else - PY_PKG_VER=2 - fi - - __PACKAGES="" - if ! __check_command_exists ps; then - __PACKAGES="${__PACKAGES} procps-ng" - fi - if ! __check_command_exists git; then - __PACKAGES="${__PACKAGES} git" - fi - - if [ -n "${__PACKAGES}" ]; then - # shellcheck disable=SC2086 - __dnf_install_noinput ${__PACKAGES} || return 1 - __PACKAGES="" - fi - - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - - if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then - __PACKAGES="${__PACKAGES} ca-certificates" - fi - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr" - fi - - install_fedora_deps || return 1 - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - if __check_command_exists python3; then - __python="python3" - fi - elif [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then - if __check_command_exists python2; then - __python="python2" - fi - else - if ! __check_command_exists python; then - echoerror "Unable to find a python binary?!" - return 1 - fi - # Let's hope it's the right one - __python="python" - fi - - grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" | while IFS=' - ' read -r dep; do - echodebug "Running '${__python}' -m pip install '${dep}'" - "${__python}" -m pip install "${dep}" || return 1 - done - else - __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" - # shellcheck disable=SC2086 - __dnf_install_noinput ${__PACKAGES} || return 1 - fi - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_fedora_git() { - if [ "${_PY_EXE}" != "" ]; then - _PYEXE=${_PY_EXE} - echoinfo "Using the following python version: ${_PY_EXE} to install salt" - else - _PYEXE='python2' - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - return 0 - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - ${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 - else - ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 - fi - return 0 -} - -install_fedora_git_post() { - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) - sleep 1 - systemctl daemon-reload - done -} - -install_fedora_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - systemctl stop salt-$fname > /dev/null 2>&1 - systemctl start salt-$fname.service && continue - echodebug "Failed to start salt-$fname using systemd" - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - systemctl status salt-$fname.service - journalctl -xe - fi - done -} - -install_fedora_check_services() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - __check_services_systemd salt-$fname || return 1 - done - - return 0 -} -# -# Ended Fedora Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# CentOS Install Functions -# -__install_epel_repository() { - if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_TRUE ]; then - return 0 - fi - - # Check if epel repo is already enabled and flag it accordingly - if yum repolist | grep -q "^[!]\\?${_EPEL_REPO}/"; then - _EPEL_REPOS_INSTALLED=$BS_TRUE - return 0 - fi - - # Download latest 'epel-release' package for the distro version directly - epel_repo_url="${HTTP_VAL}://dl.fedoraproject.org/pub/epel/epel-release-latest-${DISTRO_MAJOR_VERSION}.noarch.rpm" - rpm -Uvh --force "$epel_repo_url" || return 1 - - _EPEL_REPOS_INSTALLED=$BS_TRUE - - return 0 -} - -__install_saltstack_rhel_repository() { - if [ "$ITYPE" = "stable" ]; then - repo_rev="$STABLE_REV" - else - repo_rev="latest" - fi - - __PY_VERSION_REPO="yum" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PY_VERSION_REPO="py3" - fi - - # Avoid using '$releasever' variable for yum. - # Instead, this should work correctly on all RHEL variants. - base_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/${repo_rev}/" - gpg_key="SALTSTACK-GPG-KEY.pub" - repo_file="/etc/yum.repos.d/salt.repo" - - if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then - cat <<_eof > "$repo_file" -[saltstack] -name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever -baseurl=${base_url} -skip_if_unavailable=True -gpgcheck=1 -gpgkey=${base_url}${gpg_key} -enabled=1 -enabled_metadata=1 -_eof - - fetch_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${repo_rev}/" - __rpm_import_gpg "${fetch_url}${gpg_key}" || return 1 - yum clean metadata || return 1 - elif [ "$repo_rev" != "latest" ]; then - echowarn "salt.repo already exists, ignoring salt version argument." - echowarn "Use -F (forced overwrite) to install $repo_rev." - fi - - return 0 -} - -install_centos_stable_deps() { - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - yum -y update || return 1 - fi - - if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - echowarn "Detected -r or -R option while installing Salt packages for Python 3." - echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." - echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." - fi - - if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then - __install_epel_repository || return 1 - __install_saltstack_rhel_repository || return 1 - fi - - # If -R was passed, we need to configure custom repo url with rsync-ed packages - # Which is still handled in __install_saltstack_rhel_repository. This call has - # its own check in case -r was passed without -R. - if [ "$_CUSTOM_REPO_URL" != "null" ]; then - __install_saltstack_rhel_repository || return 1 - fi - - if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then - __PACKAGES="dnf-utils chkconfig" - else - __PACKAGES="yum-utils chkconfig" - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then - # YAML module is used for generating custom master/minion configs - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python3-pyyaml" - else - __PACKAGES="${__PACKAGES} python2-pyyaml" - fi - elif [ "$DISTRO_MAJOR_VERSION" -eq 7 ]; then - # YAML module is used for generating custom master/minion configs - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python36-PyYAML" - else - __PACKAGES="${__PACKAGES} PyYAML" - fi - else - # YAML module is used for generating custom master/minion configs - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python34-PyYAML" - else - __PACKAGES="${__PACKAGES} PyYAML" - fi - fi - fi - - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi - - - return 0 -} - -install_centos_stable() { - __PACKAGES="" - - local cloud='salt-cloud' - local master='salt-master' - local minion='salt-minion' - local syndic='salt-syndic' - - if echo "$STABLE_REV" | grep -q "archive";then # point release being applied - local ver=$(echo "$STABLE_REV"|awk -F/ '{print $2}') # strip archive/ - elif echo "$STABLE_REV" | egrep -vq "archive|latest";then # latest or major version(3003, 3004, etc) being applie - local ver=$STABLE_REV - fi - - if [ ! -z $ver ]; then - cloud+="-$ver" - master+="-$ver" - minion+="-$ver" - syndic+="-$ver" - fi - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $cloud" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $master" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} $minion" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $syndic" - fi - - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - - return 0 -} - -install_centos_stable_post() { - SYSTEMD_RELOAD=$BS_FALSE - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ]; then - /bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( - /bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && - /bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 - ) - - SYSTEMD_RELOAD=$BS_TRUE - elif [ -f "/etc/init.d/salt-${fname}" ]; then - /sbin/chkconfig salt-${fname} on - fi - done - - if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then - /bin/systemctl daemon-reload - fi - - return 0 -} - -install_centos_git_deps() { - install_centos_stable_deps || return 1 - - if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then - __yum_install_noinput ca-certificates || return 1 - fi - - if ! __check_command_exists git; then - __yum_install_noinput git || return 1 - fi - - __git_clone_and_checkout || return 1 - - __PACKAGES="" - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then - # Packages are named python3- - PY_PKG_VER=3 - __PACKAGES="${__PACKAGES} python3" - else - # Packages are named python36- - PY_PKG_VER=36 - __PACKAGES="${__PACKAGES} python36" - fi - else - PY_PKG_VER="" - if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then - __PACKAGES="${__PACKAGES} python2" - elif [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then - PY_PKG_VER=27 - __PACKAGES="${__PACKAGES} python27" - else - __PACKAGES="${__PACKAGES} python" - fi - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - _install_m2crypto_req=false - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - _py=${_PY_EXE} - if [ "$DISTRO_MAJOR_VERSION" -gt 6 ]; then - _install_m2crypto_req=true - fi - else - if [ "$DISTRO_MAJOR_VERSION" -eq 6 ]; then - _install_m2crypto_req=true - fi - _py="python" - - # Only Py2 needs python-futures - __PACKAGES="${__PACKAGES} python-futures" - - # There is no systemd-python3 package as of this writing - if [ "$DISTRO_MAJOR_VERSION" -ge 7 ]; then - __PACKAGES="${__PACKAGES} systemd-python" - fi - fi - - if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then - __install_tornado_pip ${_py} || return 1 - __PACKAGES="${__PACKAGES} python3-m2crypto" - else - __PACKAGES="${__PACKAGES} m2crypto python${PY_PKG_VER}-crypto" - fi - - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-jinja2" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado python${PY_PKG_VER}-zmq" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud" - fi - - if [ "${_INSTALL_PY}" -eq "${BS_TRUE}" ]; then - # Install Python if "-y" was passed in. - __install_python || return 1 - fi - - if [ "${_PY_EXE}" != "" ] && [ "$_PIP_ALLOWED" -eq "$BS_TRUE" ]; then - # If "-x" is defined, install dependencies with pip based on the Python version given. - _PIP_PACKAGES="m2crypto!=0.33.0 jinja2 msgpack-python pycrypto PyYAML tornado<5.0 zmq futures>=2.0" - - # install swig and openssl on cent6 - if $_install_m2crypto_req; then - __yum_install_noinput openssl-devel swig || return 1 - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # Filter out any commented lines from the requirements file - _REQ_LINES="$(grep '^[^#]' "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - for SINGLE_PACKAGE in ${_PIP_PACKAGES}; do - __REQUIRED_VERSION="$(grep "${SINGLE_PACKAGE}" "${_REQ_LINES}")" - if [ "${__REQUIRED_VERSION}" != "" ]; then - _PIP_PACKAGES=$(echo "$_PIP_PACKAGES" | sed "s/${SINGLE_PACKAGE}/${__REQUIRED_VERSION}/") - fi - done - fi - - if [ "$_INSTALL_CLOUD" -eq "${BS_TRUE}" ]; then - _PIP_PACKAGES="${_PIP_PACKAGES} apache-libcloud" - fi - - __install_pip_pkgs "${_PIP_PACKAGES}" "${_PY_EXE}" || return 1 - else - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - fi - else - if [ "${_INSTALL_PY}" -eq "${BS_TRUE}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 8 ]; then - # Install Python if "-y" was passed in. - __install_python || return 1 - fi - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - fi - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_centos_git() { - if [ "${_PY_EXE}" != "" ]; then - _PYEXE=${_PY_EXE} - echoinfo "Using the following python version: ${_PY_EXE} to install salt" - else - _PYEXE='python2' - fi - - echodebug "_PY_EXE: $_PY_EXE" - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - return 0 - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - $_PYEXE setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 - else - $_PYEXE setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 - fi - - return 0 -} - -install_centos_git_post() { - SYSTEMD_RELOAD=$BS_FALSE - - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ]; then - if [ ! -f "/usr/lib/systemd/system/salt-${fname}.service" ] || \ - { [ -f "/usr/lib/systemd/system/salt-${fname}.service" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" /usr/lib/systemd/system - fi - - SYSTEMD_RELOAD=$BS_TRUE - elif [ ! -f "/etc/init.d/salt-$fname" ] || \ - { [ -f "/etc/init.d/salt-$fname" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}" /etc/init.d - chmod +x /etc/init.d/salt-${fname} - fi - done - - if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then - /bin/systemctl daemon-reload - fi - - install_centos_stable_post || return 1 - - return 0 -} - -install_centos_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then - # We have upstart support and upstart knows about our service - if ! /sbin/initctl status salt-$fname > /dev/null 2>&1; then - # Everything is in place and upstart gave us an error code? Fail! - return 1 - fi - - # upstart knows about this service. - # Let's try to stop it, and then start it - /sbin/initctl stop salt-$fname > /dev/null 2>&1 - # Restart service - if ! /sbin/initctl start salt-$fname > /dev/null 2>&1; then - # Failed the restart?! - return 1 - fi - elif [ -f /etc/init.d/salt-$fname ]; then - # Disable stdin to fix shell session hang on killing tee pipe - service salt-$fname stop < /dev/null > /dev/null 2>&1 - service salt-$fname start < /dev/null - elif [ -f /usr/bin/systemctl ]; then - # CentOS 7 uses systemd - /usr/bin/systemctl stop salt-$fname > /dev/null 2>&1 - /usr/bin/systemctl start salt-$fname.service && continue - echodebug "Failed to start salt-$fname using systemd" - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - systemctl status salt-$fname.service - journalctl -xe - fi - fi - done -} - -install_centos_testing_deps() { - install_centos_stable_deps || return 1 - return 0 -} - -install_centos_testing() { - install_centos_stable || return 1 - return 0 -} - -install_centos_testing_post() { - install_centos_stable_post || return 1 - return 0 -} - -install_centos_check_services() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /sbin/initctl ] && [ -f /etc/init/salt-${fname}.conf ]; then - __check_services_upstart salt-$fname || return 1 - elif [ -f /etc/init.d/salt-$fname ]; then - __check_services_sysvinit salt-$fname || return 1 - elif [ -f /usr/bin/systemctl ]; then - __check_services_systemd salt-$fname || return 1 - fi - done - - return 0 -} -# -# Ended CentOS Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# RedHat Install Functions -# -install_red_hat_linux_stable_deps() { - install_centos_stable_deps || return 1 - return 0 -} - -install_red_hat_linux_git_deps() { - install_centos_git_deps || return 1 - return 0 -} - -install_red_hat_enterprise_stable_deps() { - install_red_hat_linux_stable_deps || return 1 - return 0 -} - -install_red_hat_enterprise_git_deps() { - install_red_hat_linux_git_deps || return 1 - return 0 -} - -install_red_hat_enterprise_linux_stable_deps() { - install_red_hat_linux_stable_deps || return 1 - return 0 -} - -install_red_hat_enterprise_linux_git_deps() { - install_red_hat_linux_git_deps || return 1 - return 0 -} - -install_red_hat_enterprise_server_stable_deps() { - install_red_hat_linux_stable_deps || return 1 - return 0 -} - -install_red_hat_enterprise_server_git_deps() { - install_red_hat_linux_git_deps || return 1 - return 0 -} - -install_red_hat_enterprise_workstation_stable_deps() { - install_red_hat_linux_stable_deps || return 1 - return 0 -} - -install_red_hat_enterprise_workstation_git_deps() { - install_red_hat_linux_git_deps || return 1 - return 0 -} - -install_red_hat_linux_stable() { - install_centos_stable || return 1 - return 0 -} - -install_red_hat_linux_git() { - install_centos_git || return 1 - return 0 -} - -install_red_hat_enterprise_stable() { - install_red_hat_linux_stable || return 1 - return 0 -} - -install_red_hat_enterprise_git() { - install_red_hat_linux_git || return 1 - return 0 -} - -install_red_hat_enterprise_linux_stable() { - install_red_hat_linux_stable || return 1 - return 0 -} - -install_red_hat_enterprise_linux_git() { - install_red_hat_linux_git || return 1 - return 0 -} - -install_red_hat_enterprise_server_stable() { - install_red_hat_linux_stable || return 1 - return 0 -} - -install_red_hat_enterprise_server_git() { - install_red_hat_linux_git || return 1 - return 0 -} - -install_red_hat_enterprise_workstation_stable() { - install_red_hat_linux_stable || return 1 - return 0 -} - -install_red_hat_enterprise_workstation_git() { - install_red_hat_linux_git || return 1 - return 0 -} - -install_red_hat_linux_stable_post() { - install_centos_stable_post || return 1 - return 0 -} - -install_red_hat_linux_restart_daemons() { - install_centos_restart_daemons || return 1 - return 0 -} - -install_red_hat_linux_git_post() { - install_centos_git_post || return 1 - return 0 -} - -install_red_hat_enterprise_stable_post() { - install_red_hat_linux_stable_post || return 1 - return 0 -} - -install_red_hat_enterprise_restart_daemons() { - install_red_hat_linux_restart_daemons || return 1 - return 0 -} - -install_red_hat_enterprise_git_post() { - install_red_hat_linux_git_post || return 1 - return 0 -} - -install_red_hat_enterprise_linux_stable_post() { - install_red_hat_linux_stable_post || return 1 - return 0 -} - -install_red_hat_enterprise_linux_restart_daemons() { - install_red_hat_linux_restart_daemons || return 1 - return 0 -} - -install_red_hat_enterprise_linux_git_post() { - install_red_hat_linux_git_post || return 1 - return 0 -} - -install_red_hat_enterprise_server_stable_post() { - install_red_hat_linux_stable_post || return 1 - return 0 -} - -install_red_hat_enterprise_server_restart_daemons() { - install_red_hat_linux_restart_daemons || return 1 - return 0 -} - -install_red_hat_enterprise_server_git_post() { - install_red_hat_linux_git_post || return 1 - return 0 -} - -install_red_hat_enterprise_workstation_stable_post() { - install_red_hat_linux_stable_post || return 1 - return 0 -} - -install_red_hat_enterprise_workstation_restart_daemons() { - install_red_hat_linux_restart_daemons || return 1 - return 0 -} - -install_red_hat_enterprise_workstation_git_post() { - install_red_hat_linux_git_post || return 1 - return 0 -} - -install_red_hat_linux_testing_deps() { - install_centos_testing_deps || return 1 - return 0 -} - -install_red_hat_linux_testing() { - install_centos_testing || return 1 - return 0 -} - -install_red_hat_linux_testing_post() { - install_centos_testing_post || return 1 - return 0 -} - -install_red_hat_enterprise_testing_deps() { - install_centos_testing_deps || return 1 - return 0 -} - -install_red_hat_enterprise_testing() { - install_centos_testing || return 1 - return 0 -} - -install_red_hat_enterprise_testing_post() { - install_centos_testing_post || return 1 - return 0 -} - -install_red_hat_enterprise_server_testing_deps() { - install_centos_testing_deps || return 1 - return 0 -} - -install_red_hat_enterprise_server_testing() { - install_centos_testing || return 1 - return 0 -} - -install_red_hat_enterprise_server_testing_post() { - install_centos_testing_post || return 1 - return 0 -} - -install_red_hat_enterprise_workstation_testing_deps() { - install_centos_testing_deps || return 1 - return 0 -} - -install_red_hat_enterprise_workstation_testing() { - install_centos_testing || return 1 - return 0 -} - -install_red_hat_enterprise_workstation_testing_post() { - install_centos_testing_post || return 1 - return 0 -} -# -# Ended RedHat Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# Oracle Linux Install Functions -# -install_oracle_linux_stable_deps() { - install_centos_stable_deps || return 1 - return 0 -} - -install_oracle_linux_git_deps() { - install_centos_git_deps || return 1 - return 0 -} - -install_oracle_linux_testing_deps() { - install_centos_testing_deps || return 1 - return 0 -} - -install_oracle_linux_stable() { - install_centos_stable || return 1 - return 0 -} - -install_oracle_linux_git() { - install_centos_git || return 1 - return 0 -} - -install_oracle_linux_testing() { - install_centos_testing || return 1 - return 0 -} - -install_oracle_linux_stable_post() { - install_centos_stable_post || return 1 - return 0 -} - -install_oracle_linux_git_post() { - install_centos_git_post || return 1 - return 0 -} - -install_oracle_linux_testing_post() { - install_centos_testing_post || return 1 - return 0 -} - -install_oracle_linux_restart_daemons() { - install_centos_restart_daemons || return 1 - return 0 -} - -install_oracle_linux_check_services() { - install_centos_check_services || return 1 - return 0 -} -# -# Ended Oracle Linux Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# Scientific Linux Install Functions -# -install_scientific_linux_stable_deps() { - install_centos_stable_deps || return 1 - return 0 -} - -install_scientific_linux_git_deps() { - install_centos_git_deps || return 1 - return 0 -} - -install_scientific_linux_testing_deps() { - install_centos_testing_deps || return 1 - return 0 -} - -install_scientific_linux_stable() { - install_centos_stable || return 1 - return 0 -} - -install_scientific_linux_git() { - install_centos_git || return 1 - return 0 -} - -install_scientific_linux_testing() { - install_centos_testing || return 1 - return 0 -} - -install_scientific_linux_stable_post() { - install_centos_stable_post || return 1 - return 0 -} - -install_scientific_linux_git_post() { - install_centos_git_post || return 1 - return 0 -} - -install_scientific_linux_testing_post() { - install_centos_testing_post || return 1 - return 0 -} - -install_scientific_linux_restart_daemons() { - install_centos_restart_daemons || return 1 - return 0 -} - -install_scientific_linux_check_services() { - install_centos_check_services || return 1 - return 0 -} -# -# Ended Scientific Linux Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# CloudLinux Install Functions -# -install_cloud_linux_stable_deps() { - install_centos_stable_deps || return 1 - return 0 -} - -install_cloud_linux_git_deps() { - install_centos_git_deps || return 1 - return 0 -} - -install_cloud_linux_testing_deps() { - install_centos_testing_deps || return 1 - return 0 -} - -install_cloud_linux_stable() { - install_centos_stable || return 1 - return 0 -} - -install_cloud_linux_git() { - install_centos_git || return 1 - return 0 -} - -install_cloud_linux_testing() { - install_centos_testing || return 1 - return 0 -} - -install_cloud_linux_stable_post() { - install_centos_stable_post || return 1 - return 0 -} - -install_cloud_linux_git_post() { - install_centos_git_post || return 1 - return 0 -} - -install_cloud_linux_testing_post() { - install_centos_testing_post || return 1 - return 0 -} - -install_cloud_linux_restart_daemons() { - install_centos_restart_daemons || return 1 - return 0 -} - -install_cloud_linux_check_services() { - install_centos_check_services || return 1 - return 0 -} -# -# End of CloudLinux Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# Alpine Linux Install Functions -# -install_alpine_linux_stable_deps() { - if ! grep -q '^[^#].\+alpine/.\+/community' /etc/apk/repositories; then - # Add community repository entry based on the "main" repo URL - __REPO=$(grep '^[^#].\+alpine/.\+/main\>' /etc/apk/repositories) - echo "${__REPO}" | sed -e 's/main/community/' >> /etc/apk/repositories - fi - - apk update - - # Get latest root CA certs - apk -U add ca-certificates - - if ! __check_command_exists openssl; then - # Install OpenSSL to be able to pull from https:// URLs - apk -U add openssl - fi -} - -install_alpine_linux_git_deps() { - install_alpine_linux_stable_deps || return 1 - - if ! __check_command_exists git; then - apk -U add git || return 1 - fi - - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - apk -U add python2 py-virtualenv py2-crypto py2-m2crypto py2-setuptools \ - py2-jinja2 py2-yaml py2-markupsafe py2-msgpack py2-psutil \ - py2-zmq zeromq py2-requests || return 1 - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # We're on the master branch, install whichever tornado is on the requirements file - __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - if [ "${__REQUIRED_TORNADO}" != "" ]; then - apk -U add py2-tornado || return 1 - fi - fi - else - apk -U add python2 py2-pip py2-setuptools || return 1 - _PY_EXE=python2 - return 0 - fi - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi -} - -install_alpine_linux_stable() { - __PACKAGES="salt" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-master" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-syndic" - fi - - # shellcheck disable=SC2086 - apk -U add ${__PACKAGES} || return 1 - return 0 -} - -install_alpine_linux_git() { - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - return 0 - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - python2 setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 - else - python2 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 - fi -} - -install_alpine_linux_post() { - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /sbin/rc-update ]; then - script_url="${_SALTSTACK_REPO_URL%.git}/raw/master/pkg/alpine/salt-$fname" - [ -f "/etc/init.d/salt-$fname" ] || __fetch_url "/etc/init.d/salt-$fname" "$script_url" - - # shellcheck disable=SC2181 - if [ $? -eq 0 ]; then - chmod +x "/etc/init.d/salt-$fname" - else - echoerror "Failed to get OpenRC init script for $OS_NAME from $script_url." - return 1 - fi - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - /sbin/rc-update add "salt-$fname" > /dev/null 2>&1 || return 1 - fi - done -} - -install_alpine_linux_restart_daemons() { - [ "${_START_DAEMONS}" -eq $BS_FALSE ] && return - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - # Disable stdin to fix shell session hang on killing tee pipe - /sbin/rc-service salt-$fname stop < /dev/null > /dev/null 2>&1 - /sbin/rc-service salt-$fname start < /dev/null || return 1 - done -} - -install_alpine_linux_check_services() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - __check_services_openrc salt-$fname || return 1 - done - - return 0 -} - -daemons_running_alpine_linux() { - [ "${_START_DAEMONS}" -eq $BS_FALSE ] && return - - FAILED_DAEMONS=0 - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - # shellcheck disable=SC2009 - if [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then - echoerror "salt-$fname was not found running" - FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) - fi - done - - return $FAILED_DAEMONS -} - -# -# Ended Alpine Linux Install Functions -# -####################################################################################################################### - - -####################################################################################################################### -# -# Amazon Linux AMI Install Functions -# - -install_amazon_linux_ami_deps() { - # Shim to figure out if we're using old (rhel) or new (aws) rpms. - _USEAWS=$BS_FALSE - pkg_append="python" - - if [ "$ITYPE" = "stable" ]; then - repo_rev="$STABLE_REV" - else - repo_rev="latest" - fi - - if echo $repo_rev | grep -E -q '^archive'; then - year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) - else - year=$(echo "$repo_rev" | cut -c1-4) - fi - - if echo "$repo_rev" | grep -E -q '^(latest|2016\.11)$' || \ - [ "$year" -gt 2016 ]; then - _USEAWS=$BS_TRUE - pkg_append="python27" - fi - - # We need to install yum-utils before doing anything else when installing on - # Amazon Linux ECS-optimized images. See issue #974. - __yum_install_noinput yum-utils - - # Do upgrade early - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - yum -y update || return 1 - fi - - if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then - __REPO_FILENAME="salt.repo" - - # Set a few vars to make life easier. - if [ $_USEAWS -eq $BS_TRUE ]; then - base_url="$HTTP_VAL://${_REPO_URL}/yum/amazon/latest/\$basearch/$repo_rev/" - gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" - repo_name="SaltStack repo for Amazon Linux" - else - base_url="$HTTP_VAL://${_REPO_URL}/yum/redhat/6/\$basearch/$repo_rev/" - gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" - repo_name="SaltStack repo for RHEL/CentOS 6" - fi - - # This should prob be refactored to use __install_saltstack_rhel_repository() - # With args passed in to do the right thing. Reformatted to be more like the - # amazon linux yum file. - if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then - cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" -[saltstack-repo] -name=$repo_name -failovermethod=priority -priority=10 -gpgcheck=1 -gpgkey=$gpg_key -baseurl=$base_url -_eof - fi - - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 - # which is already installed - __PACKAGES="m2crypto ${pkg_append}-crypto ${pkg_append}-jinja2 ${pkg_append}-PyYAML" - __PACKAGES="${__PACKAGES} ${pkg_append}-msgpack ${pkg_append}-requests ${pkg_append}-zmq" - __PACKAGES="${__PACKAGES} ${pkg_append}-futures" - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - fi - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi -} - -install_amazon_linux_ami_git_deps() { - if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then - yum -y install ca-certificates || return 1 - fi - - PIP_EXE='pip' - if __check_command_exists python2.7; then - if ! __check_command_exists pip2.7; then - if ! __check_command_exists easy_install-2.7; then - __yum_install_noinput python27-setuptools - fi - /usr/bin/easy_install-2.7 pip || return 1 - fi - PIP_EXE='/usr/local/bin/pip2.7' - _PY_EXE='python2.7' - fi - - install_amazon_linux_ami_deps || return 1 - - if ! __check_command_exists git; then - __yum_install_noinput git || return 1 - fi - - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - __PACKAGES="" - __PIP_PACKAGES="" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - __check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud" - __PACKAGES="${__PACKAGES} python27-pip" - __PIP_PACKAGES="${__PIP_PACKAGES} apache-libcloud>=$_LIBCLOUD_MIN_VERSION" - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # We're on the master branch, install whichever tornado is on the requirements file - __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - if [ "${__REQUIRED_TORNADO}" != "" ]; then - __PACKAGES="${__PACKAGES} ${pkg_append}-tornado" - fi - fi - - if [ "${__PACKAGES}" != "" ]; then - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - fi - - if [ "${__PIP_PACKAGES}" != "" ]; then - # shellcheck disable=SC2086 - ${PIP_EXE} install ${__PIP_PACKAGES} || return 1 - fi - else - __PACKAGES="python27-pip python27-setuptools python27-devel gcc" - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - fi - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_amazon_linux_ami_2_git_deps() { - if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then - yum -y install ca-certificates || return 1 - fi - - install_amazon_linux_ami_2_deps || return 1 - - if [ "$_PY_MAJOR_VERSION" -eq 2 ]; then - PY_PKG_VER=2 - PIP_EXE='/bin/pip' - else - PY_PKG_VER=3 - PIP_EXE='/bin/pip3' - fi - __PACKAGES="python${PY_PKG_VER}-pip" - - if ! __check_command_exists "${PIP_EXE}"; then - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - fi - - if ! __check_command_exists git; then - __yum_install_noinput git || return 1 - fi - - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - - __PACKAGES="" - __PIP_PACKAGES="" - - if [ "$_INSTALL_CLOUD" -eq "$BS_TRUE" ]; then - __check_pip_allowed "You need to allow pip based installations (-P) in order to install apache-libcloud" - if [ "$PARSED_VERSION" -eq "2" ]; then - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq "3" ]; then - __PACKAGES="${__PACKAGES} python3-pip" - __PIP_PACKAGES="${__PIP_PACKAGES} tornado<$_TORNADO_MAX_PY3_VERSION" - else - __PACKAGES="${__PACKAGES} python2-pip" - fi - else - __PACKAGES="${__PACKAGES} python27-pip" - fi - __PIP_PACKAGES="${__PIP_PACKAGES} apache-libcloud>=$_LIBCLOUD_MIN_VERSION" - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # We're on the master branch, install whichever tornado is on the requirements file - __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - if [ "${__REQUIRED_TORNADO}" != "" ]; then - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq "3" ]; then - __PIP_PACKAGES="${__PIP_PACKAGES} tornado<$_TORNADO_MAX_PY3_VERSION" - else - __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-tornado" - fi - fi - fi - - if [ "${__PIP_PACKAGES}" != "" ]; then - __check_pip_allowed "You need to allow pip based installations (-P) in order to install ${__PIP_PACKAGES}" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip" - fi - - if [ "${__PACKAGES}" != "" ]; then - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - fi - - if [ "${__PIP_PACKAGES}" != "" ]; then - # shellcheck disable=SC2086 - ${PIP_EXE} install ${__PIP_PACKAGES} || return 1 - fi - else - __PACKAGES="python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools python${PY_PKG_VER}-devel gcc" - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - fi - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_amazon_linux_ami_2_deps() { - # Shim to figure out if we're using old (rhel) or new (aws) rpms. - _USEAWS=$BS_FALSE - pkg_append="python" - - if [ "$ITYPE" = "stable" ]; then - repo_rev="$STABLE_REV" - else - repo_rev="latest" - fi - - if echo $repo_rev | grep -E -q '^archive'; then - year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) - else - year=$(echo "$repo_rev" | cut -c1-4) - fi - - if echo "$repo_rev" | grep -E -q '^(latest|2016\.11)$' || \ - [ "$year" -gt 2016 ]; then - _USEAWS=$BS_TRUE - pkg_append="python" - fi - - # We need to install yum-utils before doing anything else when installing on - # Amazon Linux ECS-optimized images. See issue #974. - __yum_install_noinput yum-utils - - # Do upgrade early - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - yum -y update || return 1 - fi - - if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then - __REPO_FILENAME="salt.repo" - __PY_VERSION_REPO="yum" - PY_PKG_VER="" - repo_label="saltstack-repo" - repo_name="SaltStack repo for Amazon Linux 2" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __REPO_FILENAME="salt.repo" - __PY_VERSION_REPO="py3" - PY_PKG_VER=3 - repo_label="saltstack-py3-repo" - repo_name="SaltStack Python 3 repo for Amazon Linux 2" - fi - - base_url="$HTTP_VAL://${_REPO_URL}/${__PY_VERSION_REPO}/amazon/2/\$basearch/$repo_rev/" - gpg_key="${base_url}SALTSTACK-GPG-KEY.pub,${base_url}base/RPM-GPG-KEY-CentOS-7" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" - fi - - # This should prob be refactored to use __install_saltstack_rhel_repository() - # With args passed in to do the right thing. Reformatted to be more like the - # amazon linux yum file. - if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then - cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" -[$repo_label] -name=$repo_name -failovermethod=priority -priority=10 -gpgcheck=1 -gpgkey=$gpg_key -baseurl=$base_url -_eof - fi - - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 - # which is already installed - if [ -n "${PY_PKG_VER}" ] && [ "${PY_PKG_VER}" -eq 3 ]; then - __PACKAGES="${pkg_append}${PY_PKG_VER}-m2crypto ${pkg_append}${PY_PKG_VER}-pyyaml" - else - __PACKAGES="m2crypto PyYAML ${pkg_append}-futures" - fi - - __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-crypto ${pkg_append}${PY_PKG_VER}-jinja2 procps-ng" - __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-msgpack ${pkg_append}${PY_PKG_VER}-requests ${pkg_append}${PY_PKG_VER}-zmq" - - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - fi - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi -} - -install_amazon_linux_ami_stable() { - install_centos_stable || return 1 - return 0 -} - -install_amazon_linux_ami_stable_post() { - install_centos_stable_post || return 1 - return 0 -} - -install_amazon_linux_ami_restart_daemons() { - install_centos_restart_daemons || return 1 - return 0 -} - -install_amazon_linux_ami_git() { - install_centos_git || return 1 - return 0 -} - -install_amazon_linux_ami_git_post() { - install_centos_git_post || return 1 - return 0 -} - -install_amazon_linux_ami_testing() { - install_centos_testing || return 1 - return 0 -} - -install_amazon_linux_ami_testing_post() { - install_centos_testing_post || return 1 - return 0 -} - -install_amazon_linux_ami_2_stable() { - install_centos_stable || return 1 - return 0 -} - -install_amazon_linux_ami_2_stable_post() { - install_centos_stable_post || return 1 - return 0 -} - -install_amazon_linux_ami_2_restart_daemons() { - install_centos_restart_daemons || return 1 - return 0 -} - -install_amazon_linux_ami_2_git() { - install_centos_git || return 1 - return 0 -} - -install_amazon_linux_ami_2_git_post() { - install_centos_git_post || return 1 - return 0 -} - -install_amazon_linux_ami_2_testing() { - install_centos_testing || return 1 - return 0 -} - -install_amazon_linux_ami_2_testing_post() { - install_centos_testing_post || return 1 - return 0 -} - -install_amazon_linux_ami_2_check_services() { - install_centos_check_services || return 1 - return 0 -} - -# -# Ended Amazon Linux AMI Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# Arch Install Functions -# -install_arch_linux_stable_deps() { - if [ ! -f /etc/pacman.d/gnupg ]; then - pacman-key --init && pacman-key --populate archlinux || return 1 - fi - - # Pacman does not resolve dependencies on outdated versions - # They always need to be updated - pacman -Syy --noconfirm - - pacman -S --noconfirm --needed archlinux-keyring || return 1 - - pacman -Su --noconfirm --needed pacman || return 1 - - if __check_command_exists pacman-db-upgrade; then - pacman-db-upgrade || return 1 - fi - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then - PY_PKG_VER=2 - else - PY_PKG_VER="" - fi - - # YAML module is used for generating custom master/minion configs - # shellcheck disable=SC2086 - pacman -Su --noconfirm --needed python${PY_PKG_VER}-yaml - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - # shellcheck disable=SC2086 - pacman -Su --noconfirm --needed python${PY_PKG_VER}-apache-libcloud || return 1 - fi - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - pacman -Su --noconfirm --needed ${_EXTRA_PACKAGES} || return 1 - fi -} - -install_arch_linux_git_deps() { - install_arch_linux_stable_deps - - # Don't fail if un-installing python2-distribute threw an error - if ! __check_command_exists git; then - pacman -Sy --noconfirm --needed git || return 1 - fi - - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - pacman -R --noconfirm python2-distribute - pacman -Su --noconfirm --needed python2-crypto python2-setuptools python2-jinja \ - python2-m2crypto python2-markupsafe python2-msgpack python2-psutil \ - python2-pyzmq zeromq python2-requests python2-systemd || return 1 - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # We're on the master branch, install whichever tornado is on the requirements file - __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - if [ "${__REQUIRED_TORNADO}" != "" ]; then - pacman -Su --noconfirm --needed python2-tornado - fi - fi - else - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then - PY_PKG_VER=2 - else - PY_PKG_VER="" - fi - __PACKAGES="python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" - # shellcheck disable=SC2086 - pacman -Su --noconfirm --needed ${__PACKAGES} - fi - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_arch_linux_stable() { - # Pacman does not resolve dependencies on outdated versions - # They always need to be updated - pacman -Syy --noconfirm - - pacman -Su --noconfirm --needed pacman || return 1 - # See https://mailman.archlinux.org/pipermail/arch-dev-public/2013-June/025043.html - # to know why we're ignoring below. - pacman -Syu --noconfirm --ignore filesystem,bash || return 1 - pacman -S --noconfirm --needed bash || return 1 - pacman -Su --noconfirm || return 1 - # We can now resume regular salt update - pacman -Syu --noconfirm salt || return 1 - return 0 -} - -install_arch_linux_git() { - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - return 0 - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - python2 setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 - else - python2 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 - fi - return 0 -} - -install_arch_linux_post() { - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - # Since Arch's pacman renames configuration files - if [ "$_TEMP_CONFIG_DIR" != "null" ] && [ -f "$_SALT_ETC_DIR/$fname.pacorig" ]; then - # Since a configuration directory was provided, it also means that any - # configuration file copied was renamed by Arch, see: - # https://wiki.archlinux.org/index.php/Pacnew_and_Pacsave_Files#.pacorig - __copyfile "$_SALT_ETC_DIR/$fname.pacorig" "$_SALT_ETC_DIR/$fname" $BS_TRUE - fi - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - if [ -f /usr/bin/systemctl ]; then - # Using systemd - /usr/bin/systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( - /usr/bin/systemctl preset salt-$fname.service > /dev/null 2>&1 && - /usr/bin/systemctl enable salt-$fname.service > /dev/null 2>&1 - ) - sleep 1 - /usr/bin/systemctl daemon-reload - continue - fi - - # XXX: How do we enable old Arch init.d scripts? - done -} - -install_arch_linux_git_post() { - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /usr/bin/systemctl ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - /usr/bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( - /usr/bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && - /usr/bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 - ) - sleep 1 - /usr/bin/systemctl daemon-reload - continue - fi - - # SysV init!? - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname" "/etc/rc.d/init.d/salt-$fname" - chmod +x /etc/rc.d/init.d/salt-$fname - done -} - -install_arch_linux_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /usr/bin/systemctl ]; then - /usr/bin/systemctl stop salt-$fname.service > /dev/null 2>&1 - /usr/bin/systemctl start salt-$fname.service && continue - echodebug "Failed to start salt-$fname using systemd" - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - systemctl status salt-$fname.service - journalctl -xe - fi - fi - - /etc/rc.d/salt-$fname stop > /dev/null 2>&1 - /etc/rc.d/salt-$fname start - done -} - -install_arch_check_services() { - if [ ! -f /usr/bin/systemctl ]; then - # Not running systemd!? Don't check! - return 0 - fi - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - __check_services_systemd salt-$fname || return 1 - done - - return 0 -} -# -# Ended Arch Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# FreeBSD Install Functions -# - -# Using a separate conf step to head for idempotent install... -__configure_freebsd_pkg_details() { - _SALT_ETC_DIR="/usr/local/etc/salt" - _PKI_DIR=${_SALT_ETC_DIR}/pki - _POST_NEON_PIP_INSTALL_ARGS="--prefix=/usr/local" -} - -install_freebsd_deps() { - __configure_freebsd_pkg_details - pkg install -y pkg -} - -install_freebsd_git_deps() { - install_freebsd_deps || return 1 - - if ! __check_command_exists git; then - /usr/local/sbin/pkg install -y git || return 1 - fi - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - - SALT_DEPENDENCIES=$(/usr/local/sbin/pkg rquery %dn py38-salt) - # shellcheck disable=SC2086 - /usr/local/sbin/pkg install -y ${SALT_DEPENDENCIES} python || return 1 - - /usr/local/sbin/pkg install -y py38-requests || return 1 - /usr/local/sbin/pkg install -y py38-tornado4 || return 1 - - else - /usr/local/sbin/pkg install -y python py38-pip py38-setuptools libzmq4 libunwind || return 1 - fi - - echodebug "Adapting paths to FreeBSD" - # The list of files was taken from Salt's BSD port Makefile - for file in doc/man/salt-key.1 doc/man/salt-cp.1 doc/man/salt-minion.1 \ - doc/man/salt-syndic.1 doc/man/salt-master.1 doc/man/salt-run.1 \ - doc/man/salt.7 doc/man/salt.1 doc/man/salt-call.1; do - [ ! -f $file ] && continue - echodebug "Patching ${file}" - sed -in -e "s|/etc/salt|${_SALT_ETC_DIR}|" \ - -e "s|/srv/salt|${_SALT_ETC_DIR}/states|" \ - -e "s|/srv/pillar|${_SALT_ETC_DIR}/pillar|" ${file} - done - if [ ! -f salt/syspaths.py ]; then - # We still can't provide the system paths, salt 0.16.x - # Let's patch salt's source and adapt paths to what's expected on FreeBSD - echodebug "Replacing occurrences of '/etc/salt' with ${_SALT_ETC_DIR}" - # The list of files was taken from Salt's BSD port Makefile - for file in conf/minion conf/master salt/config.py salt/client.py \ - salt/modules/mysql.py salt/utils/parsers.py salt/modules/tls.py \ - salt/modules/postgres.py salt/utils/migrations.py; do - [ ! -f $file ] && continue - echodebug "Patching ${file}" - sed -in -e "s|/etc/salt|${_SALT_ETC_DIR}|" \ - -e "s|/srv/salt|${_SALT_ETC_DIR}/states|" \ - -e "s|/srv/pillar|${_SALT_ETC_DIR}/pillar|" ${file} - done - fi - echodebug "Finished patching" - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - - fi - - return 0 -} - -install_freebsd_stable() { -# -# installing latest version of salt from FreeBSD CURRENT ports repo -# - # shellcheck disable=SC2086 - /usr/local/sbin/pkg install -y py38-salt || return 1 - - return 0 -} - -install_freebsd_git() { - - # /usr/local/bin/python3 in FreeBSD is a symlink to /usr/local/bin/python3.7 - __PYTHON_PATH=$(readlink -f "$(command -v python3)") - __ESCAPED_PYTHON_PATH=$(echo "${__PYTHON_PATH}" | sed 's/\//\\\//g') - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - __install_salt_from_repo_post_neon "${__PYTHON_PATH}" || return 1 - for script in salt_api salt_master salt_minion salt_proxy salt_syndic; do - __fetch_url "/usr/local/etc/rc.d/${script}" "https://raw.githubusercontent.com/freebsd/freebsd-ports/master/sysutils/py-salt/files/${script}.in" || return 1 - sed -i '' 's/%%PREFIX%%/\/usr\/local/g' /usr/local/etc/rc.d/${script} - sed -i '' "s/%%PYTHON_CMD%%/${__ESCAPED_PYTHON_PATH}/g" /usr/local/etc/rc.d/${script} - chmod +x /usr/local/etc/rc.d/${script} || return 1 - done - - return 0 - fi - - # Install from git - if [ ! -f salt/syspaths.py ]; then - # We still can't provide the system paths, salt 0.16.x - ${__PYTHON_PATH} setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 - else - ${__PYTHON_PATH} setup.py \ - --salt-root-dir=/ \ - --salt-config-dir="${_SALT_ETC_DIR}" \ - --salt-cache-dir="${_SALT_CACHE_DIR}" \ - --salt-sock-dir=/var/run/salt \ - --salt-srv-root-dir="${_SALT_ETC_DIR}" \ - --salt-base-file-roots-dir="${_SALT_ETC_DIR}/states" \ - --salt-base-pillar-roots-dir="${_SALT_ETC_DIR}/pillar" \ - --salt-base-master-roots-dir="${_SALT_ETC_DIR}/salt-master" \ - --salt-logs-dir=/var/log/salt \ - --salt-pidfile-dir=/var/run \ - ${SETUP_PY_INSTALL_ARGS} install \ - || return 1 - fi - - for script in salt_api salt_master salt_minion salt_proxy salt_syndic; do - __fetch_url "/usr/local/etc/rc.d/${script}" "https://raw.githubusercontent.com/freebsd/freebsd-ports/master/sysutils/py-salt/files/${script}.in" || return 1 - sed -i '' 's/%%PREFIX%%/\/usr\/local/g' /usr/local/etc/rc.d/${script} - sed -i '' "s/%%PYTHON_CMD%%/${__ESCAPED_PYTHON_PATH}/g" /usr/local/etc/rc.d/${script} - chmod +x /usr/local/etc/rc.d/${script} || return 1 - done - - # And we're good to go - return 0 -} - -install_freebsd_stable_post() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - enable_string="salt_${fname}_enable=YES" - grep "$enable_string" /etc/rc.conf >/dev/null 2>&1 - [ $? -eq 1 ] && sysrc $enable_string - - done -} - -install_freebsd_git_post() { - install_freebsd_stable_post || return 1 - return 0 -} - -install_freebsd_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - service salt_$fname stop > /dev/null 2>&1 - service salt_$fname start - done -} -# -# Ended FreeBSD Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# OpenBSD Install Functions -# - -install_openbsd_deps() { - if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then - OPENBSD_REPO='https://cdn.openbsd.org/pub/OpenBSD' - echoinfo "setting package repository to $OPENBSD_REPO" - echo "${OPENBSD_REPO}" >/etc/installurl || return 1 - fi - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - pkg_add -I -v ${_EXTRA_PACKAGES} || return 1 - fi - return 0 -} - -install_openbsd_git_deps() { - install_openbsd_deps || return 1 - - if ! __check_command_exists git; then - pkg_add -I -v git || return 1 - fi - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - pkg_add -I -v py-pip py-setuptools - fi - - # - # Let's trigger config_salt() - # - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_openbsd_git() { - # - # Install from git - # - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - return 0 - fi - - if [ ! -f salt/syspaths.py ]; then - # We still can't provide the system paths, salt 0.16.x - /usr/local/bin/python2.7 setup.py ${SETUP_PY_INSTALL_ARGS} install || return 1 - fi - return 0 -} - -install_openbsd_stable() { - pkg_add -r -I -v salt || return 1 - return 0 -} - -install_openbsd_post() { - for fname in api master minion syndic; do - [ $fname = "api" ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - rcctl enable salt_$fname - done - - return 0 -} - -install_openbsd_check_services() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && continue - - if [ -f /etc/rc.d/salt_${fname} ]; then - __check_services_openbsd salt_${fname} || return 1 - fi - done - - return 0 -} - -install_openbsd_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - rcctl restart salt_${fname} - done - - return 0 -} - -# -# Ended OpenBSD Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# SmartOS Install Functions -# -install_smartos_deps() { - smartos_deps="$(pkgin show-deps salt | grep '^\s' | grep -v '\snot' | xargs) py27-m2crypto" - pkgin -y install "${smartos_deps}" || return 1 - - # Set _SALT_ETC_DIR to SmartOS default if they didn't specify - _SALT_ETC_DIR=${BS_SALT_ETC_DIR:-/opt/local/etc/salt} - # We also need to redefine the PKI directory - _PKI_DIR=${_SALT_ETC_DIR}/pki - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - # Let's set the configuration directory to /tmp - _TEMP_CONFIG_DIR="/tmp" - CONFIG_SALT_FUNC="config_salt" - - # Let's download, since they were not provided, the default configuration files - if [ ! -f "$_SALT_ETC_DIR/minion" ] && [ ! -f "$_TEMP_CONFIG_DIR/minion" ]; then - # shellcheck disable=SC2086 - curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/minion" -L \ - https://raw.githubusercontent.com/saltstack/salt/master/conf/minion || return 1 - fi - if [ ! -f "$_SALT_ETC_DIR/master" ] && [ ! -f $_TEMP_CONFIG_DIR/master ]; then - # shellcheck disable=SC2086 - curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/master" -L \ - https://raw.githubusercontent.com/saltstack/salt/master/conf/master || return 1 - fi - fi - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - pkgin -y install py27-apache-libcloud || return 1 - fi - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - pkgin -y install ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 -} - -install_smartos_git_deps() { - install_smartos_deps || return 1 - - if ! __check_command_exists git; then - pkgin -y install git || return 1 - fi - - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # Install whichever tornado is in the requirements file - __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - __check_pip_allowed "You need to allow pip based installations (-P) in order to install the python package '${__REQUIRED_TORNADO}'" - - # Install whichever futures is in the requirements file - __REQUIRED_FUTURES="$(grep futures "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - __check_pip_allowed "You need to allow pip based installations (-P) in order to install the python package '${__REQUIRED_FUTURES}'" - - if [ "${__REQUIRED_TORNADO}" != "" ]; then - if ! __check_command_exists pip; then - pkgin -y install py27-pip - fi - pip install -U "${__REQUIRED_TORNADO}" - fi - - if [ "${__REQUIRED_FUTURES}" != "" ]; then - if ! __check_command_exists pip; then - pkgin -y install py27-pip - fi - pip install -U "${__REQUIRED_FUTURES}" - fi - fi - else - if ! __check_command_exists pip; then - pkgin -y install py27-pip - fi - pkgin -y install py27-setuptools - fi - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_smartos_stable() { - pkgin -y install salt || return 1 - return 0 -} - -install_smartos_git() { - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - return 0 - fi - - # Use setuptools in order to also install dependencies - # lets force our config path on the setup for now, since salt/syspaths.py only got fixed in 2015.5.0 - USE_SETUPTOOLS=1 /opt/local/bin/python setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install || return 1 - return 0 -} - -install_smartos_post() { - smf_dir="/opt/custom/smf" - - # Install manifest files if needed. - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - svcs network/salt-$fname > /dev/null 2>&1 - if [ $? -eq 1 ]; then - if [ ! -f "$_TEMP_CONFIG_DIR/salt-$fname.xml" ]; then - # shellcheck disable=SC2086 - curl $_CURL_ARGS -s -o "$_TEMP_CONFIG_DIR/salt-$fname.xml" -L \ - "https://raw.githubusercontent.com/saltstack/salt/master/pkg/smartos/salt-$fname.xml" - fi - svccfg import "$_TEMP_CONFIG_DIR/salt-$fname.xml" - if [ "${VIRTUAL_TYPE}" = "global" ]; then - if [ ! -d "$smf_dir" ]; then - mkdir -p "$smf_dir" || return 1 - fi - if [ ! -f "$smf_dir/salt-$fname.xml" ]; then - __copyfile "$_TEMP_CONFIG_DIR/salt-$fname.xml" "$smf_dir/" || return 1 - fi - fi - fi - done - - return 0 -} - -install_smartos_git_post() { - smf_dir="/opt/custom/smf" - - # Install manifest files if needed. - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - svcs "network/salt-$fname" > /dev/null 2>&1 - if [ $? -eq 1 ]; then - svccfg import "${_SALT_GIT_CHECKOUT_DIR}/pkg/smartos/salt-$fname.xml" - if [ "${VIRTUAL_TYPE}" = "global" ]; then - if [ ! -d $smf_dir ]; then - mkdir -p "$smf_dir" - fi - if [ ! -f "$smf_dir/salt-$fname.xml" ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/smartos/salt-$fname.xml" "$smf_dir/" - fi - fi - fi - done - - return 0 -} - -install_smartos_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - # Stop if running && Start service - svcadm disable salt-$fname > /dev/null 2>&1 - svcadm enable salt-$fname - done - - return 0 -} -# -# Ended SmartOS Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# openSUSE Install Functions. -# -__ZYPPER_REQUIRES_REPLACE_FILES=-1 - -__set_suse_pkg_repo() { - - # Set distro repo variable - if [ "${DISTRO_MAJOR_VERSION}" -gt 2015 ]; then - DISTRO_REPO="openSUSE_Tumbleweed" - elif [ "${DISTRO_MAJOR_VERSION}" -ge 42 ] || [ "${DISTRO_MAJOR_VERSION}" -eq 15 ]; then - DISTRO_REPO="openSUSE_Leap_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" - else - DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}_SP${SUSE_PATCHLEVEL}" - fi - - if [ "$_DOWNSTREAM_PKG_REPO" -eq $BS_TRUE ]; then - suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack" - suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" - else - suse_pkg_url_base="${HTTP_VAL}://repo.saltproject.io/opensuse" - suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack:products.repo" - fi - SUSE_PKG_URL="$suse_pkg_url_base/$suse_pkg_url_path" -} - -__check_and_refresh_suse_pkg_repo() { - # Check to see if systemsmanagement_saltstack exists - __zypper repos | grep -q systemsmanagement_saltstack - - if [ $? -eq 1 ]; then - # zypper does not yet know anything about systemsmanagement_saltstack - __zypper addrepo --refresh "${SUSE_PKG_URL}" || return 1 - fi -} - -__version_lte() { - if ! __check_command_exists python; then - zypper --non-interactive install --replacefiles --auto-agree-with-licenses python || \ - zypper --non-interactive install --auto-agree-with-licenses python || return 1 - fi - - if [ "$(python -c 'import sys; V1=tuple([int(i) for i in sys.argv[1].split(".")]); V2=tuple([int(i) for i in sys.argv[2].split(".")]); print V1<=V2' "$1" "$2")" = "True" ]; then - __ZYPPER_REQUIRES_REPLACE_FILES=${BS_TRUE} - else - __ZYPPER_REQUIRES_REPLACE_FILES=${BS_FALSE} - fi -} - -__zypper() { - # Check if any zypper process is running before calling zypper again. - # This is useful when a zypper call is part of a boot process and will - # wait until the zypper process is finished, such as on AWS AMIs. - while pgrep -l zypper; do - sleep 1 - done - - zypper --non-interactive "${@}" - # Return codes between 100 and 104 are only informations, not errors - # https://en.opensuse.org/SDB:Zypper_manual#EXIT_CODES - if [ "$?" -gt "99" ] && [ "$?" -le "104" ]; then - return 0 - fi - return $? -} - -__zypper_install() { - if [ "${__ZYPPER_REQUIRES_REPLACE_FILES}" = "-1" ]; then - __version_lte "1.10.4" "$(zypper --version | awk '{ print $2 }')" - fi - if [ "${__ZYPPER_REQUIRES_REPLACE_FILES}" = "${BS_TRUE}" ]; then - # In case of file conflicts replace old files. - # Option present in zypper 1.10.4 and newer: - # https://github.com/openSUSE/zypper/blob/95655728d26d6d5aef7796b675f4cc69bc0c05c0/package/zypper.changes#L253 - __zypper install --auto-agree-with-licenses --replacefiles "${@}"; return $? - else - __zypper install --auto-agree-with-licenses "${@}"; return $? - fi -} - -__opensuse_prep_install() { - # DRY function for common installation preparatory steps for SUSE - if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then - # Is the repository already known - __set_suse_pkg_repo - # Check zypper repos and refresh if necessary - __check_and_refresh_suse_pkg_repo - fi - - __zypper --gpg-auto-import-keys refresh - - # shellcheck disable=SC2181 - if [ $? -ne 0 ] && [ $? -ne 4 ]; then - # If the exit code is not 0, and it's not 4 (failed to update a - # repository) return a failure. Otherwise continue. - return 1 - fi - - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - __zypper --gpg-auto-import-keys update || return 1 - fi -} - -install_opensuse_stable_deps() { - __opensuse_prep_install || return 1 - - if [ "$DISTRO_MAJOR_VERSION" -eq 12 ] && [ "$DISTRO_MINOR_VERSION" -eq 3 ]; then - # Because patterns-openSUSE-minimal_base-conflicts conflicts with python, lets remove the first one - __zypper remove patterns-openSUSE-minimal_base-conflicts - fi - - # YAML module is used for generating custom master/minion configs - # requests is still used by many salt modules - # Salt needs python-zypp installed in order to use the zypper module - __PACKAGES="python-PyYAML python-requests python-zypp" - - # shellcheck disable=SC2086 - __zypper_install ${__PACKAGES} || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __zypper_install ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 -} - -install_opensuse_git_deps() { - if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ] && ! __check_command_exists update-ca-certificates; then - __zypper_install ca-certificates || return 1 - fi - - install_opensuse_stable_deps || return 1 - - if ! __check_command_exists git; then - __zypper_install git || return 1 - fi - - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - __zypper_install patch || return 1 - - __PACKAGES="libzmq5 python-Jinja2 python-m2crypto python-msgpack-python python-pycrypto python-pyzmq python-xml python-futures" - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # We're on the master branch, install whichever tornado is on the requirements file - __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - if [ "${__REQUIRED_TORNADO}" != "" ]; then - __PACKAGES="${__PACKAGES} python-tornado" - fi - fi - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} python-apache-libcloud" - fi - # Check for Tumbleweed - elif [ "${DISTRO_MAJOR_VERSION}" -ge 20210101 ]; then - __PACKAGES="python3-pip" - else - __PACKAGES="python-pip python-setuptools gcc" - fi - - # shellcheck disable=SC2086 - __zypper_install ${__PACKAGES} || return 1 - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_opensuse_stable() { - __PACKAGES="" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-master" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-syndic" - fi - - # shellcheck disable=SC2086 - __zypper_install $__PACKAGES || return 1 - - return 0 -} - -install_opensuse_git() { - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - return 0 - fi - - python setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 - return 0 -} - -install_opensuse_stable_post() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ] || [ -f /usr/bin/systemctl ]; then - systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) - sleep 1 - systemctl daemon-reload - continue - fi - - /sbin/chkconfig --add salt-$fname - /sbin/chkconfig salt-$fname on - done - - return 0 -} - -install_opensuse_git_post() { - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if command -v systemctl; then - use_usr_lib=$BS_FALSE - - if [ "${DISTRO_MAJOR_VERSION}" -ge 15 ]; then - use_usr_lib=$BS_TRUE - fi - - if [ "${DISTRO_MAJOR_VERSION}" -eq 12 ] && [ -d "/usr/lib/systemd/" ]; then - use_usr_lib=$BS_TRUE - fi - - if [ "${use_usr_lib}" -eq $BS_TRUE ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/usr/lib/systemd/system/salt-${fname}.service" - else - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" - fi - - continue - fi - - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-$fname" "/etc/init.d/salt-$fname" - chmod +x /etc/init.d/salt-$fname - done - - install_opensuse_stable_post || return 1 - - return 0 -} - -install_opensuse_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ]; then - systemctl stop salt-$fname > /dev/null 2>&1 - systemctl start salt-$fname.service && continue - echodebug "Failed to start salt-$fname using systemd" - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - systemctl status salt-$fname.service - journalctl -xe - fi - fi - - service salt-$fname stop > /dev/null 2>&1 - service salt-$fname start - done -} - -install_opensuse_check_services() { - if [ ! -f /bin/systemctl ]; then - # Not running systemd!? Don't check! - return 0 - fi - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - __check_services_systemd salt-$fname > /dev/null 2>&1 || __check_services_systemd salt-$fname.service > /dev/null 2>&1 || return 1 - done - - return 0 -} -# -# End of openSUSE Install Functions. -# -####################################################################################################################### - -####################################################################################################################### -# -# openSUSE Leap 15 -# - -install_opensuse_15_stable_deps() { - __opensuse_prep_install || return 1 - - # SUSE only packages Salt for Python 3 on Leap 15 - # Py3 is the default bootstrap install for Leap 15 - # However, git installs might specify "-x python2" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then - PY_PKG_VER=2 - else - PY_PKG_VER=3 - fi - - # YAML module is used for generating custom master/minion configs - # requests is still used by many salt modules - __PACKAGES="python${PY_PKG_VER}-PyYAML python${PY_PKG_VER}-requests" - - # shellcheck disable=SC2086 - __zypper_install ${__PACKAGES} || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __zypper_install ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 -} - -install_opensuse_15_git_deps() { - install_opensuse_15_stable_deps || return 1 - - if ! __check_command_exists git; then - __zypper_install git || return 1 - fi - - __git_clone_and_checkout || return 1 - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then - PY_PKG_VER=2 - else - PY_PKG_VER=3 - fi - - __PACKAGES="python${PY_PKG_VER}-xml" - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - - # Py3 is the default bootstrap install for Leap 15 - # However, git installs might specify "-x python2" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then - # This is required by some of the python2 packages below - __PACKAGES="${__PACKAGES} libpython2_7-1_0 python2-futures python-ipaddress" - fi - - __PACKAGES="${__PACKAGES} libzmq5 python${PY_PKG_VER}-Jinja2 python${PY_PKG_VER}-msgpack" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pycrypto python${PY_PKG_VER}-pyzmq" - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # We're on the master branch, install whichever tornado is on the requirements file - __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - if [ "${__REQUIRED_TORNADO}" != "" ]; then - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-tornado" - fi - fi - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-apache-libcloud" - fi - else - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" - fi - - # shellcheck disable=SC2086 - __zypper_install ${__PACKAGES} || return 1 - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_opensuse_15_git() { - - # Py3 is the default bootstrap install for Leap 15 - if [ -n "$_PY_EXE" ]; then - _PYEXE=${_PY_EXE} - else - _PYEXE=python3 - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - return 0 - fi - - ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 - return 0 -} - -# -# End of openSUSE Leap 15 -# -####################################################################################################################### - -####################################################################################################################### -# -# SUSE Enterprise 15 -# - -install_suse_15_stable_deps() { - __opensuse_prep_install || return 1 - install_opensuse_15_stable_deps || return 1 - - return 0 -} - -install_suse_15_git_deps() { - install_suse_15_stable_deps || return 1 - - if ! __check_command_exists git; then - __zypper_install git-core || return 1 - fi - - install_opensuse_15_git_deps || return 1 - - return 0 -} - -install_suse_15_stable() { - install_opensuse_stable || return 1 - return 0 -} - -install_suse_15_git() { - install_opensuse_15_git || return 1 - return 0 -} - -install_suse_15_stable_post() { - install_opensuse_stable_post || return 1 - return 0 -} - -install_suse_15_git_post() { - install_opensuse_git_post || return 1 - return 0 -} - -install_suse_15_restart_daemons() { - install_opensuse_restart_daemons || return 1 - return 0 -} - -# -# End of SUSE Enterprise 15 -# -####################################################################################################################### - -####################################################################################################################### -# -# SUSE Enterprise 12 -# - -install_suse_12_stable_deps() { - __opensuse_prep_install || return 1 - - # YAML module is used for generating custom master/minion configs - # requests is still used by many salt modules - # Salt needs python-zypp installed in order to use the zypper module - __PACKAGES="python-PyYAML python-requests python-zypp" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} python-apache-libcloud" - fi - - # shellcheck disable=SC2086,SC2090 - __zypper_install ${__PACKAGES} || return 1 - - # SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which - # we want to install, even with --non-interactive. - # Let's try to install the higher version first and then the lower one in case of failure - __zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __zypper_install ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 -} - -install_suse_12_git_deps() { - install_suse_12_stable_deps || return 1 - - if ! __check_command_exists git; then - __zypper_install git-core || return 1 - fi - - __git_clone_and_checkout || return 1 - - __PACKAGES="" - # shellcheck disable=SC2089 - __PACKAGES="${__PACKAGES} libzmq4 python-Jinja2 python-msgpack-python python-pycrypto" - __PACKAGES="${__PACKAGES} python-pyzmq python-xml" - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # We're on the master branch, install whichever tornado is on the requirements file - __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - if [ "${__REQUIRED_TORNADO}" != "" ]; then - __PACKAGES="${__PACKAGES} python-tornado" - fi - fi - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} python-apache-libcloud" - fi - - # shellcheck disable=SC2086 - __zypper_install ${__PACKAGES} || return 1 - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_suse_12_stable() { - install_opensuse_stable || return 1 - return 0 -} - -install_suse_12_git() { - install_opensuse_git || return 1 - return 0 -} - -install_suse_12_stable_post() { - install_opensuse_stable_post || return 1 - return 0 -} - -install_suse_12_git_post() { - install_opensuse_git_post || return 1 - return 0 -} - -install_suse_12_restart_daemons() { - install_opensuse_restart_daemons || return 1 - return 0 -} - -# -# End of SUSE Enterprise 12 -# -####################################################################################################################### - -####################################################################################################################### -# -# SUSE Enterprise 11 -# - -install_suse_11_stable_deps() { - __opensuse_prep_install || return 1 - - # YAML module is used for generating custom master/minion configs - __PACKAGES="python-PyYAML" - - # shellcheck disable=SC2086,SC2090 - __zypper_install ${__PACKAGES} || return 1 - - # SLES 11 SP3 ships with both python-M2Crypto-0.22.* and python-m2crypto-0.21 and we will be asked which - # we want to install, even with --non-interactive. - # Let's try to install the higher version first and then the lower one in case of failure - __zypper_install 'python-M2Crypto>=0.22' || __zypper_install 'python-M2Crypto>=0.21' || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __zypper_install ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 -} - -install_suse_11_git_deps() { - install_suse_11_stable_deps || return 1 - - if ! __check_command_exists git; then - __zypper_install git || return 1 - fi - - __git_clone_and_checkout || return 1 - - __PACKAGES="" - # shellcheck disable=SC2089 - __PACKAGES="${__PACKAGES} libzmq4 python-Jinja2 python-msgpack-python python-pycrypto" - __PACKAGES="${__PACKAGES} python-pyzmq python-xml python-zypp" - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" ]; then - # We're on the master branch, install whichever tornado is on the requirements file - __REQUIRED_TORNADO="$(grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt")" - if [ "${__REQUIRED_TORNADO}" != "" ]; then - __PACKAGES="${__PACKAGES} python-tornado" - fi - fi - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} python-apache-libcloud" - fi - - # shellcheck disable=SC2086 - __zypper_install ${__PACKAGES} || return 1 - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_suse_11_stable() { - install_opensuse_stable || return 1 - return 0 -} - -install_suse_11_git() { - install_opensuse_git || return 1 - return 0 -} - -install_suse_11_stable_post() { - install_opensuse_stable_post || return 1 - return 0 -} - -install_suse_11_git_post() { - install_opensuse_git_post || return 1 - return 0 -} - -install_suse_11_restart_daemons() { - install_opensuse_restart_daemons || return 1 - return 0 -} - - -# -# End of SUSE Enterprise 11 -# -####################################################################################################################### - -####################################################################################################################### -# -# SUSE Enterprise General Functions -# - -# Used for both SLE 11 and 12 -install_suse_check_services() { - if [ ! -f /bin/systemctl ]; then - # Not running systemd!? Don't check! - return 0 - fi - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - __check_services_systemd salt-$fname || return 1 - done - - return 0 -} - -# -# End of SUSE Enterprise General Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# Gentoo Install Functions. -# -__autounmask() { - # Unmask package(s) and accept changes - # - # Usually it's a good thing to have config files protected by portage, but - # in this case this would require to interrupt the bootstrapping script at - # this point, manually merge the changes using etc-update/dispatch-conf/ - # cfg-update and then restart the bootstrapping script, so instead we allow - # at this point to modify certain config files directly - export CONFIG_PROTECT_MASK="${CONFIG_PROTECT_MASK:-} - /etc/portage/package.accept_keywords - /etc/portage/package.keywords - /etc/portage/package.license - /etc/portage/package.unmask - /etc/portage/package.use" - emerge --autounmask --autounmask-continue --autounmask-only --autounmask-write "${@}"; return $? -} - -__emerge() { - EMERGE_FLAGS='-q' - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - EMERGE_FLAGS='-v' - fi - - # Do not re-emerge packages that are already installed - EMERGE_FLAGS="${EMERGE_FLAGS} --noreplace" - - if [ "$_GENTOO_USE_BINHOST" -eq $BS_TRUE ]; then - EMERGE_FLAGS="${EMERGE_FLAGS} --getbinpkg" - fi - - # shellcheck disable=SC2086 - emerge ${EMERGE_FLAGS} "${@}"; return $? -} - -__gentoo_pre_dep() { - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - if __check_command_exists eix; then - eix-sync - else - emerge --sync - fi - else - if __check_command_exists eix; then - eix-sync -q - else - emerge --sync --quiet - fi - fi - if [ ! -d /etc/portage ]; then - mkdir /etc/portage - fi - - # Enable Python 3.6 target for pre Neon Salt release - if echo "${STABLE_REV}" | grep -q "2019" || [ "${ITYPE}" = "git" ] && [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - EXTRA_PYTHON_TARGET=python3_6 - fi - - # Enable Python 3.7 target for Salt Neon using GIT - if [ "${ITYPE}" = "git" ] && [ "${GIT_REV}" = "v3000" ]; then - EXTRA_PYTHON_TARGET=python3_7 - fi - - if [ -n "${EXTRA_PYTHON_TARGET:-}" ]; then - if ! emerge --info | sed 's/.*\(PYTHON_TARGETS="[^"]*"\).*/\1/' | grep -q "${EXTRA_PYTHON_TARGET}" ; then - echo "PYTHON_TARGETS=\"\${PYTHON_TARGETS} ${EXTRA_PYTHON_TARGET}\"" >> /etc/portage/make.conf - emerge --deep --with-bdeps=y --newuse --quiet @world - fi - fi -} - -__gentoo_post_dep() { - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __autounmask ${_EXTRA_PACKAGES} || return 1 - # shellcheck disable=SC2086 - __emerge ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 -} - -install_gentoo_deps() { - __gentoo_pre_dep || return 1 - - # Make sure that the 'libcloud' use flag is set when Salt Cloud support is requested - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - SALT_USE_FILE='/etc/portage/package.use' - if [ -d '/etc/portage/package.use' ]; then - SALT_USE_FILE='/etc/portage/package.use/salt' - fi - - SALT_USE_FLAGS="$(grep -E '^[<>=~]*app-admin/salt.*' ${SALT_USE_FILE} 2>/dev/null)" - SALT_USE_FLAG_LIBCLOUD="$(echo "${SALT_USE_FLAGS}" | grep ' libcloud' 2>/dev/null)" - - # Set the libcloud use flag, if it is not set yet - if [ -z "${SALT_USE_FLAGS}" ]; then - echo "app-admin/salt libcloud" >> ${SALT_USE_FILE} - elif [ -z "${SALT_USE_FLAG_LIBCLOUD}" ]; then - sed 's#^\([<>=~]*app-admin/salt[^ ]*\)\(.*\)#\1 libcloud\2#g' -i ${SALT_USE_FILE} - fi - fi - - __gentoo_post_dep || return 1 -} - -install_gentoo_git_deps() { - __gentoo_pre_dep || return 1 - - # Install pip if it does not exist - if ! __check_command_exists pip ; then - GENTOO_GIT_PACKAGES="${GENTOO_GIT_PACKAGES:-} dev-python/pip" - fi - - # Install GIT if it does not exist - if ! __check_command_exists git ; then - GENTOO_GIT_PACKAGES="${GENTOO_GIT_PACKAGES:-} dev-vcs/git" - fi - - # Salt <3000 does not automatically install dependencies. It has to be done manually. - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - GENTOO_GIT_PACKAGES="${GENTOO_GIT_PACKAGES:-} - sys-apps/pciutils - dev-python/pyyaml - dev-python/pyzmq - dev-python/libnacl - dev-python/pycryptodome - dev-python/py - dev-python/requests - /dev/null 2>&1 || ( - systemctl preset salt-$fname.service > /dev/null 2>&1 && - systemctl enable salt-$fname.service > /dev/null 2>&1 - ) - else - # Salt minion cannot start in a docker container because the "net" service is not available - if [ $fname = "minion" ] && [ -f /.dockerenv ]; then - sed '/need net/d' -i /etc/init.d/salt-$fname - fi - - rc-update add "salt-$fname" > /dev/null 2>&1 || return 1 - fi - done -} - -install_gentoo_git_post() { - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if __check_command_exists systemctl ; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - systemctl is-enabled salt-$fname.service > /dev/null 2>&1 || ( - systemctl preset salt-$fname.service > /dev/null 2>&1 && - systemctl enable salt-$fname.service > /dev/null 2>&1 - ) - else - cat <<_eof > "/etc/init.d/salt-${fname}" -#!/sbin/openrc-run -# Copyright 1999-2015 Gentoo Foundation -# Distributed under the terms of the GNU General Public License v2 - -command="/usr/bin/salt-${fname}" -command_args="\${SALT_OPTS}" -command_background="1" -pidfile="/var/run/salt-${fname}.pid" -name="SALT ${fname} daemon" -retry="20" - -depend() { - use net logger -} -_eof - chmod +x /etc/init.d/salt-$fname - - cat <<_eof > "/etc/conf.d/salt-${fname}" -# /etc/conf.d/salt-${fname}: config file for /etc/init.d/salt-master - -# see man pages for salt-${fname} or run 'salt-${fname} --help' -# for valid cmdline options -SALT_OPTS="--log-level=warning" -_eof - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - rc-update add "salt-$fname" > /dev/null 2>&1 || return 1 - fi - done - - return 0 -} - -install_gentoo_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - # Ensure upstart configs / systemd units are loaded - if __check_command_exists systemctl ; then - systemctl daemon-reload - fi - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if __check_command_exists systemctl ; then - systemctl stop salt-$fname > /dev/null 2>&1 - systemctl start salt-$fname.service && continue - echodebug "Failed to start salt-$fname using systemd" - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - systemctl status salt-$fname.service - journalctl -xe - fi - else - # Disable stdin to fix shell session hang on killing tee pipe - rc-service salt-$fname stop < /dev/null > /dev/null 2>&1 - rc-service salt-$fname start < /dev/null || return 1 - fi - done - - return 0 -} - -install_gentoo_check_services() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if __check_command_exists systemctl ; then - __check_services_systemd salt-$fname || return 1 - else - __check_services_openrc salt-$fname || return 1 - fi - done - - return 0 -} -# -# End of Gentoo Install Functions. -# -####################################################################################################################### - -####################################################################################################################### -# -# VoidLinux Install Functions -# -install_voidlinux_stable_deps() { - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - xbps-install -Suy || return 1 - fi - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - xbps-install -Suy "${_EXTRA_PACKAGES}" || return 1 - fi - - return 0 -} - -install_voidlinux_stable() { - xbps-install -Suy salt || return 1 - return 0 -} - -install_voidlinux_stable_post() { - for fname in master minion syndic; do - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - ln -s /etc/sv/salt-$fname /var/service/. - done -} - -install_voidlinux_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - for fname in master minion syndic; do - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - sv restart salt-$fname - done -} - -install_voidlinux_check_services() { - for fname in master minion syndic; do - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - [ -e /var/service/salt-$fname ] || return 1 - done - - return 0 -} - -daemons_running_voidlinux() { - [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 - - FAILED_DAEMONS=0 - for fname in master minion syndic; do - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ "$(sv status salt-$fname | grep run)" = "" ]; then - echoerror "salt-$fname was not found running" - FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) - fi - done - - return $FAILED_DAEMONS -} -# -# Ended VoidLinux Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# OS X / Darwin Install Functions -# - -__macosx_get_packagesite() { - DARWIN_ARCH="x86_64" - - __PY_VERSION_REPO="py2" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PY_VERSION_REPO="py3" - fi - - PKG="salt-${STABLE_REV}-${__PY_VERSION_REPO}-${DARWIN_ARCH}.pkg" - SALTPKGCONFURL="https://repo.saltproject.io/osx/${PKG}" -} - -# Using a separate conf step to head for idempotent install... -__configure_macosx_pkg_details() { - __macosx_get_packagesite || return 1 - return 0 -} - -install_macosx_stable_deps() { - __configure_macosx_pkg_details || return 1 - return 0 -} - -install_macosx_git_deps() { - install_macosx_stable_deps || return 1 - - if ! echo "$PATH" | grep -q /usr/local/bin; then - echowarn "/usr/local/bin was not found in \$PATH. Adding it for the duration of the script execution." - export PATH=/usr/local/bin:$PATH - fi - - __fetch_url "/tmp/get-pip.py" "https://bootstrap.pypa.io/get-pip.py" || return 1 - - if [ -n "$_PY_EXE" ]; then - _PYEXE=${_PY_EXE} - else - _PYEXE=python2.7 - fi - - # Install PIP - $_PYEXE /tmp/get-pip.py || return 1 - - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - return 0 - fi - - __PIP_REQUIREMENTS="dev_python27.txt" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PIP_REQUIREMENTS="dev_python34.txt" - fi - - requirements_file="${_SALT_GIT_CHECKOUT_DIR}/requirements/${__PIP_REQUIREMENTS}" - pip install -U -r "${requirements_file}" --install-option="--prefix=/opt/salt" || return 1 - - return 0 -} - -install_macosx_stable() { - install_macosx_stable_deps || return 1 - - __fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1 - - /usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1 - - return 0 -} - -install_macosx_git() { - - if [ -n "$_PY_EXE" ]; then - _PYEXE=${_PY_EXE} - else - _PYEXE=python2.7 - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 - return 0 - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - $_PYEXE setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/opt/salt || return 1 - else - $_PYEXE setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/opt/salt || return 1 - fi - - return 0 -} - -install_macosx_stable_post() { - if [ ! -f /etc/paths.d/salt ]; then - print "%s\n" "/opt/salt/bin" "/usr/local/sbin" > /etc/paths.d/salt - fi - - # Don'f fail because of unknown variable on the next step - set +o nounset - # shellcheck disable=SC1091 - . /etc/profile - # Revert nounset to it's previous state - set -o nounset - - return 0 -} - -install_macosx_git_post() { - install_macosx_stable_post || return 1 - return 0 -} - -install_macosx_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 - /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 - - return 0 -} -# -# Ended OS X / Darwin Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# Default minion configuration function. Matches ANY distribution as long as -# the -c options is passed. -# -config_salt() { - # If the configuration directory is not passed, return - [ "$_TEMP_CONFIG_DIR" = "null" ] && return - - if [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then - echowarn "Passing -C (config only) option implies -F (forced overwrite)." - - if [ "$_FORCE_OVERWRITE" -ne $BS_TRUE ]; then - echowarn "Overwriting configs in 11 seconds!" - sleep 11 - _FORCE_OVERWRITE=$BS_TRUE - fi - fi - - # Let's create the necessary directories - [ -d "$_SALT_ETC_DIR" ] || mkdir "$_SALT_ETC_DIR" || return 1 - [ -d "$_PKI_DIR" ] || (mkdir -p "$_PKI_DIR" && chmod 700 "$_PKI_DIR") || return 1 - - # If -C or -F was passed, we don't need a .bak file for the config we're updating - # This is used in the custom master/minion config file checks below - CREATE_BAK=$BS_TRUE - if [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then - CREATE_BAK=$BS_FALSE - fi - - CONFIGURED_ANYTHING=$BS_FALSE - - # Copy the grains file if found - if [ -f "$_TEMP_CONFIG_DIR/grains" ]; then - echodebug "Moving provided grains file from $_TEMP_CONFIG_DIR/grains to $_SALT_ETC_DIR/grains" - __movefile "$_TEMP_CONFIG_DIR/grains" "$_SALT_ETC_DIR/grains" || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - fi - - if [ "$_INSTALL_MINION" -eq $BS_TRUE ] || \ - [ "$_CONFIG_ONLY" -eq $BS_TRUE ] || [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then - # Create the PKI directory - [ -d "$_PKI_DIR/minion" ] || (mkdir -p "$_PKI_DIR/minion" && chmod 700 "$_PKI_DIR/minion") || return 1 - - # Check to see if a custom minion config json dict was provided - if [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then - - # Check if a minion config file already exists and move to .bak if needed - if [ -f "$_SALT_ETC_DIR/minion" ] && [ "$CREATE_BAK" -eq "$BS_TRUE" ]; then - __movefile "$_SALT_ETC_DIR/minion" "$_SALT_ETC_DIR/minion.bak" $BS_TRUE || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - fi - - # Overwrite/create the config file with the yaml string - __overwriteconfig "$_SALT_ETC_DIR/minion" "$_CUSTOM_MINION_CONFIG" || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - - # Copy the minions configuration if found - # Explicitly check for custom master config to avoid moving the minion config - elif [ -f "$_TEMP_CONFIG_DIR/minion" ] && [ "$_CUSTOM_MASTER_CONFIG" = "null" ]; then - __movefile "$_TEMP_CONFIG_DIR/minion" "$_SALT_ETC_DIR" "$_FORCE_OVERWRITE" || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - fi - - # Copy the minion's keys if found - if [ -f "$_TEMP_CONFIG_DIR/minion.pem" ]; then - __movefile "$_TEMP_CONFIG_DIR/minion.pem" "$_PKI_DIR/minion/" "$_FORCE_OVERWRITE" || return 1 - chmod 400 "$_PKI_DIR/minion/minion.pem" || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - fi - if [ -f "$_TEMP_CONFIG_DIR/minion.pub" ]; then - __movefile "$_TEMP_CONFIG_DIR/minion.pub" "$_PKI_DIR/minion/" "$_FORCE_OVERWRITE" || return 1 - chmod 664 "$_PKI_DIR/minion/minion.pub" || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - fi - # For multi-master-pki, copy the master_sign public key if found - if [ -f "$_TEMP_CONFIG_DIR/master_sign.pub" ]; then - __movefile "$_TEMP_CONFIG_DIR/master_sign.pub" "$_PKI_DIR/minion/" || return 1 - chmod 664 "$_PKI_DIR/minion/master_sign.pub" || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - fi - fi - - # only (re)place master or syndic configs if -M (install master) or -S - # (install syndic) specified - OVERWRITE_MASTER_CONFIGS=$BS_FALSE - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then - OVERWRITE_MASTER_CONFIGS=$BS_TRUE - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ] && [ "$_CONFIG_ONLY" -eq $BS_TRUE ]; then - OVERWRITE_MASTER_CONFIGS=$BS_TRUE - fi - - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ] || [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ] || [ "$OVERWRITE_MASTER_CONFIGS" -eq $BS_TRUE ] || [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then - # Create the PKI directory - [ -d "$_PKI_DIR/master" ] || (mkdir -p "$_PKI_DIR/master" && chmod 700 "$_PKI_DIR/master") || return 1 - - # Check to see if a custom master config json dict was provided - if [ "$_CUSTOM_MASTER_CONFIG" != "null" ]; then - - # Check if a master config file already exists and move to .bak if needed - if [ -f "$_SALT_ETC_DIR/master" ] && [ "$CREATE_BAK" -eq "$BS_TRUE" ]; then - __movefile "$_SALT_ETC_DIR/master" "$_SALT_ETC_DIR/master.bak" $BS_TRUE || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - fi - - # Overwrite/create the config file with the yaml string - __overwriteconfig "$_SALT_ETC_DIR/master" "$_CUSTOM_MASTER_CONFIG" || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - - # Copy the masters configuration if found - elif [ -f "$_TEMP_CONFIG_DIR/master" ]; then - __movefile "$_TEMP_CONFIG_DIR/master" "$_SALT_ETC_DIR" || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - fi - - # Copy the master's keys if found - if [ -f "$_TEMP_CONFIG_DIR/master.pem" ]; then - __movefile "$_TEMP_CONFIG_DIR/master.pem" "$_PKI_DIR/master/" || return 1 - chmod 400 "$_PKI_DIR/master/master.pem" || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - fi - if [ -f "$_TEMP_CONFIG_DIR/master.pub" ]; then - __movefile "$_TEMP_CONFIG_DIR/master.pub" "$_PKI_DIR/master/" || return 1 - chmod 664 "$_PKI_DIR/master/master.pub" || return 1 - CONFIGURED_ANYTHING=$BS_TRUE - fi - fi - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - # Recursively copy salt-cloud configs with overwriting if necessary - for file in "$_TEMP_CONFIG_DIR"/cloud*; do - if [ -f "$file" ]; then - __copyfile "$file" "$_SALT_ETC_DIR" || return 1 - elif [ -d "$file" ]; then - subdir="$(basename "$file")" - mkdir -p "$_SALT_ETC_DIR/$subdir" - for file_d in "$_TEMP_CONFIG_DIR/$subdir"/*; do - if [ -f "$file_d" ]; then - __copyfile "$file_d" "$_SALT_ETC_DIR/$subdir" || return 1 - fi - done - fi - done - fi - - if [ "$_CONFIG_ONLY" -eq $BS_TRUE ] && [ $CONFIGURED_ANYTHING -eq $BS_FALSE ]; then - echowarn "No configuration or keys were copied over. No configuration was done!" - exit 0 - fi - - return 0 -} -# -# Ended Default Configuration function -# -####################################################################################################################### - -####################################################################################################################### -# -# Default salt master minion keys pre-seed function. Matches ANY distribution -# as long as the -k option is passed. -# -preseed_master() { - # Create the PKI directory - - if [ "$(find "$_TEMP_KEYS_DIR" -maxdepth 1 -type f | wc -l)" -lt 1 ]; then - echoerror "No minion keys were uploaded. Unable to pre-seed master" - return 1 - fi - - SEED_DEST="$_PKI_DIR/master/minions" - [ -d "$SEED_DEST" ] || (mkdir -p "$SEED_DEST" && chmod 700 "$SEED_DEST") || return 1 - - for keyfile in "$_TEMP_KEYS_DIR"/*; do - keyfile=$(basename "${keyfile}") - src_keyfile="${_TEMP_KEYS_DIR}/${keyfile}" - dst_keyfile="${SEED_DEST}/${keyfile}" - - # If it's not a file, skip to the next - [ ! -f "$src_keyfile" ] && continue - - __movefile "$src_keyfile" "$dst_keyfile" || return 1 - chmod 664 "$dst_keyfile" || return 1 - done - - return 0 -} -# -# Ended Default Salt Master Pre-Seed minion keys function -# -####################################################################################################################### - -####################################################################################################################### -# -# This function checks if all of the installed daemons are running or not. -# -daemons_running() { - [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 - - FAILED_DAEMONS=0 - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - # shellcheck disable=SC2009 - if [ "${DISTRO_NAME}" = "SmartOS" ]; then - if [ "$(svcs -Ho STA salt-$fname)" != "ON" ]; then - echoerror "salt-$fname was not found running" - FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) - fi - elif [ "$(ps wwwaux | grep -v grep | grep salt-$fname)" = "" ]; then - echoerror "salt-$fname was not found running" - FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) - fi - done - - return $FAILED_DAEMONS -} -# -# Ended daemons running check function -# -####################################################################################################################### - -#====================================================================================================================== -# LET'S PROCEED WITH OUR INSTALLATION -#====================================================================================================================== - -# Let's get the dependencies install function -DEP_FUNC_NAMES="" -if [ ${_NO_DEPS} -eq $BS_FALSE ]; then - DEP_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_deps" - DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_deps" - DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_deps" - DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_deps" - DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_deps" - DEP_FUNC_NAMES="$DEP_FUNC_NAMES install_${DISTRO_NAME_L}_deps" -fi - -DEPS_INSTALL_FUNC="null" -for FUNC_NAME in $(__strip_duplicates "$DEP_FUNC_NAMES"); do - if __function_defined "$FUNC_NAME"; then - DEPS_INSTALL_FUNC="$FUNC_NAME" - break - fi -done -echodebug "DEPS_INSTALL_FUNC=${DEPS_INSTALL_FUNC}" - -# Let's get the Salt config function -CONFIG_FUNC_NAMES="config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_salt" -CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_salt" -CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_salt" -CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_salt" -CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}_${ITYPE}_salt" -CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_${DISTRO_NAME_L}_salt" -CONFIG_FUNC_NAMES="$CONFIG_FUNC_NAMES config_salt" - -CONFIG_SALT_FUNC="null" -for FUNC_NAME in $(__strip_duplicates "$CONFIG_FUNC_NAMES"); do - if __function_defined "$FUNC_NAME"; then - CONFIG_SALT_FUNC="$FUNC_NAME" - break - fi -done -echodebug "CONFIG_SALT_FUNC=${CONFIG_SALT_FUNC}" - -# Let's get the pre-seed master function -PRESEED_FUNC_NAMES="preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_master" -PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_master" -PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_master" -PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_master" -PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}_${ITYPE}_master" -PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_${DISTRO_NAME_L}_master" -PRESEED_FUNC_NAMES="$PRESEED_FUNC_NAMES preseed_master" - -PRESEED_MASTER_FUNC="null" -for FUNC_NAME in $(__strip_duplicates "$PRESEED_FUNC_NAMES"); do - if __function_defined "$FUNC_NAME"; then - PRESEED_MASTER_FUNC="$FUNC_NAME" - break - fi -done -echodebug "PRESEED_MASTER_FUNC=${PRESEED_MASTER_FUNC}" - -# Let's get the install function -INSTALL_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" -INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" -INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}" - -INSTALL_FUNC="null" -for FUNC_NAME in $(__strip_duplicates "$INSTALL_FUNC_NAMES"); do - if __function_defined "$FUNC_NAME"; then - INSTALL_FUNC="$FUNC_NAME" - break - fi -done -echodebug "INSTALL_FUNC=${INSTALL_FUNC}" - -# Let's get the post install function -POST_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_post" -POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_post" -POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_post" -POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_post" -POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_post" -POST_FUNC_NAMES="$POST_FUNC_NAMES install_${DISTRO_NAME_L}_post" - -POST_INSTALL_FUNC="null" -for FUNC_NAME in $(__strip_duplicates "$POST_FUNC_NAMES"); do - if __function_defined "$FUNC_NAME"; then - POST_INSTALL_FUNC="$FUNC_NAME" - break - fi -done -echodebug "POST_INSTALL_FUNC=${POST_INSTALL_FUNC}" - -# Let's get the start daemons install function -STARTDAEMONS_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_restart_daemons" -STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_restart_daemons" -STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_restart_daemons" -STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_restart_daemons" -STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_restart_daemons" -STARTDAEMONS_FUNC_NAMES="$STARTDAEMONS_FUNC_NAMES install_${DISTRO_NAME_L}_restart_daemons" - -STARTDAEMONS_INSTALL_FUNC="null" -for FUNC_NAME in $(__strip_duplicates "$STARTDAEMONS_FUNC_NAMES"); do - if __function_defined "$FUNC_NAME"; then - STARTDAEMONS_INSTALL_FUNC="$FUNC_NAME" - break - fi -done -echodebug "STARTDAEMONS_INSTALL_FUNC=${STARTDAEMONS_INSTALL_FUNC}" - -# Let's get the daemons running check function. -DAEMONS_RUNNING_FUNC_NAMES="daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" -DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" -DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}" -DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}" -DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}_${ITYPE}" -DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}" -DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running" - -DAEMONS_RUNNING_FUNC="null" -for FUNC_NAME in $(__strip_duplicates "$DAEMONS_RUNNING_FUNC_NAMES"); do - if __function_defined "$FUNC_NAME"; then - DAEMONS_RUNNING_FUNC="$FUNC_NAME" - break - fi -done -echodebug "DAEMONS_RUNNING_FUNC=${DAEMONS_RUNNING_FUNC}" - -# Let's get the check services function -if [ ${_DISABLE_SALT_CHECKS} -eq $BS_FALSE ]; then - CHECK_SERVICES_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}_check_services" - CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}_check_services" - CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_check_services" - CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_check_services" - CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}_check_services" - CHECK_SERVICES_FUNC_NAMES="$CHECK_SERVICES_FUNC_NAMES install_${DISTRO_NAME_L}_check_services" -else - CHECK_SERVICES_FUNC_NAMES="" -fi - -CHECK_SERVICES_FUNC="null" -for FUNC_NAME in $(__strip_duplicates "$CHECK_SERVICES_FUNC_NAMES"); do - if __function_defined "$FUNC_NAME"; then - CHECK_SERVICES_FUNC="$FUNC_NAME" - break - fi -done -echodebug "CHECK_SERVICES_FUNC=${CHECK_SERVICES_FUNC}" - -if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ "$DEPS_INSTALL_FUNC" = "null" ]; then - echoerror "No dependencies installation function found. Exiting..." - exit 1 -fi - -if [ "$INSTALL_FUNC" = "null" ]; then - echoerror "No installation function found. Exiting..." - exit 1 -fi - - -# Install dependencies -if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ $_CONFIG_ONLY -eq $BS_FALSE ]; then - # Only execute function is not in config mode only - echoinfo "Running ${DEPS_INSTALL_FUNC}()" - if ! ${DEPS_INSTALL_FUNC}; then - echoerror "Failed to run ${DEPS_INSTALL_FUNC}()!!!" - exit 1 - fi -fi - - -if [ "${ITYPE}" = "git" ] && [ ${_NO_DEPS} -eq ${BS_TRUE} ]; then - if ! __git_clone_and_checkout; then - echo "Failed to clone and checkout git repository." - exit 1 - fi -fi - - -# Triggering config_salt() if overwriting master or minion configs -if [ "$_CUSTOM_MASTER_CONFIG" != "null" ] || [ "$_CUSTOM_MINION_CONFIG" != "null" ]; then - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="$_SALT_ETC_DIR" - fi - - if [ ${_NO_DEPS} -eq $BS_FALSE ] && [ $_CONFIG_ONLY -eq $BS_TRUE ]; then - # Execute function to satisfy dependencies for configuration step - echoinfo "Running ${DEPS_INSTALL_FUNC}()" - if ! ${DEPS_INSTALL_FUNC}; then - echoerror "Failed to run ${DEPS_INSTALL_FUNC}()!!!" - exit 1 - fi - fi -fi - -# Configure Salt -if [ "$CONFIG_SALT_FUNC" != "null" ] && [ "$_TEMP_CONFIG_DIR" != "null" ]; then - echoinfo "Running ${CONFIG_SALT_FUNC}()" - if ! ${CONFIG_SALT_FUNC}; then - echoerror "Failed to run ${CONFIG_SALT_FUNC}()!!!" - exit 1 - fi -fi - -# Drop the master address if passed -if [ "$_SALT_MASTER_ADDRESS" != "null" ]; then - [ ! -d "$_SALT_ETC_DIR/minion.d" ] && mkdir -p "$_SALT_ETC_DIR/minion.d" - cat <<_eof > "$_SALT_ETC_DIR/minion.d/99-master-address.conf" -master: $_SALT_MASTER_ADDRESS -_eof -fi - -# Drop the minion id if passed -if [ "$_SALT_MINION_ID" != "null" ]; then - [ ! -d "$_SALT_ETC_DIR" ] && mkdir -p "$_SALT_ETC_DIR" - echo "$_SALT_MINION_ID" > "$_SALT_ETC_DIR/minion_id" -fi - -# Pre-seed master keys -if [ "$PRESEED_MASTER_FUNC" != "null" ] && [ "$_TEMP_KEYS_DIR" != "null" ]; then - echoinfo "Running ${PRESEED_MASTER_FUNC}()" - if ! ${PRESEED_MASTER_FUNC}; then - echoerror "Failed to run ${PRESEED_MASTER_FUNC}()!!!" - exit 1 - fi -fi - -# Install Salt -if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - # Only execute function is not in config mode only - echoinfo "Running ${INSTALL_FUNC}()" - if ! ${INSTALL_FUNC}; then - echoerror "Failed to run ${INSTALL_FUNC}()!!!" - exit 1 - fi -fi - -# Run any post install function. Only execute function if not in config mode only -if [ "$POST_INSTALL_FUNC" != "null" ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoinfo "Running ${POST_INSTALL_FUNC}()" - if ! ${POST_INSTALL_FUNC}; then - echoerror "Failed to run ${POST_INSTALL_FUNC}()!!!" - exit 1 - fi -fi - -# Run any check services function, Only execute function if not in config mode only -if [ "$CHECK_SERVICES_FUNC" != "null" ] && [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoinfo "Running ${CHECK_SERVICES_FUNC}()" - if ! ${CHECK_SERVICES_FUNC}; then - echoerror "Failed to run ${CHECK_SERVICES_FUNC}()!!!" - exit 1 - fi -fi - -# Run any start daemons function -if [ "$STARTDAEMONS_INSTALL_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; then - echoinfo "Running ${STARTDAEMONS_INSTALL_FUNC}()" - echodebug "Waiting ${_SLEEP} seconds for processes to settle before checking for them" - sleep ${_SLEEP} - if ! ${STARTDAEMONS_INSTALL_FUNC}; then - echoerror "Failed to run ${STARTDAEMONS_INSTALL_FUNC}()!!!" - exit 1 - fi -fi - -# Check if the installed daemons are running or not -if [ "$DAEMONS_RUNNING_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; then - echoinfo "Running ${DAEMONS_RUNNING_FUNC}()" - echodebug "Waiting ${_SLEEP} seconds for processes to settle before checking for them" - sleep ${_SLEEP} # Sleep a little bit to let daemons start - if ! ${DAEMONS_RUNNING_FUNC}; then - echoerror "Failed to run ${DAEMONS_RUNNING_FUNC}()!!!" - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ "$_ECHO_DEBUG" -eq $BS_FALSE ]; then - echoerror "salt-$fname was not found running. Pass '-D' to ${__ScriptName} when bootstrapping for additional debugging information..." - continue - fi - - [ ! -f "$_SALT_ETC_DIR/$fname" ] && [ $fname != "syndic" ] && echodebug "$_SALT_ETC_DIR/$fname does not exist" - - echodebug "Running salt-$fname by hand outputs: $(nohup salt-$fname -l debug)" - - [ ! -f /var/log/salt/$fname ] && echodebug "/var/log/salt/$fname does not exist. Can't cat its contents!" && continue - - echodebug "DAEMON LOGS for $fname:" - echodebug "$(cat /var/log/salt/$fname)" - echo - done - - echodebug "Running Processes:" - echodebug "$(ps auxwww)" - - exit 1 - fi -fi - -# Done! -if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then - echoinfo "Salt installed!" -else - echoinfo "Salt configured!" -fi - -exit 0 - -# vim: set sts=4 ts=4 et diff --git a/salt/salt/scripts/bs_dev b/salt/salt/scripts/bs_dev deleted file mode 100644 index e69de29bb..000000000 diff --git a/salt/salt/scripts/bs_stable b/salt/salt/scripts/bs_stable deleted file mode 100644 index e69de29bb..000000000 From 2d688331df675eee946c659a24f2e95c7959a59c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 12 Oct 2023 15:32:04 -0400 Subject: [PATCH 190/417] handle version install for stable and onedir install type --- salt/salt/scripts/bootstrap-salt.sh | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh index 464243c84..d422a5295 100644 --- a/salt/salt/scripts/bootstrap-salt.sh +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -5130,7 +5130,11 @@ install_centos_onedir() { local master='salt-master' local minion='salt-minion' local syndic='salt-syndic' - local ver="$_ONEDIR_REV" + if [ "$ITYPE" = "stable" ]; then + local ver="$_ONEDIR_REV" + elif [ "$ITYPE" = "onedir" ]; then + local ver="${ONEDIR_REV##*/}" + fi if [ ! -z $ver ]; then cloud+="-$ver" From 49a651fd72deb8e15affdd9a2ca6fc866641283a Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 12 Oct 2023 15:43:22 -0400 Subject: [PATCH 191/417] adjust var name --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 17c62af81..60296d2f3 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -91,7 +91,7 @@ fi # if packages are updated and the box isn't rebooted if [[ $is_debian ]]; then update_packages - if [[ -f "/var/run/reboot-required" ]] && [ -z "$TESTING" ]; then + if [[ -f "/var/run/reboot-required" ]] && [ -z "$test_profile" ]; then whiptail_debian_reboot_required reboot fi From b6af59d9b09c8ea405275cdebedd9ec000ca042c Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 12 Oct 2023 15:47:53 -0400 Subject: [PATCH 192/417] 2.4.20 hotfix --- DOWNLOAD_AND_VERIFY_ISO.md | 22 ++++++++++----------- sigs/securityonion-2.4.20-20231012.iso.sig | Bin 0 -> 566 bytes 2 files changed, 11 insertions(+), 11 deletions(-) create mode 100644 sigs/securityonion-2.4.20-20231012.iso.sig diff --git a/DOWNLOAD_AND_VERIFY_ISO.md b/DOWNLOAD_AND_VERIFY_ISO.md index dabfd285c..539dd9e8e 100644 --- a/DOWNLOAD_AND_VERIFY_ISO.md +++ b/DOWNLOAD_AND_VERIFY_ISO.md @@ -1,18 +1,18 @@ -### 2.4.20-20231006 ISO image released on 2023/10/06 +### 2.4.20-20231012 ISO image released on 2023/10/12 ### Download and Verify -2.4.20-20231006 ISO image: -https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231006.iso +2.4.20-20231012 ISO image: +https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231012.iso -MD5: 269F00308C53976BF0EAE788D1DB29DB -SHA1: 3F7C2324AE1271112F3B752BA4724AF36688FC27 -SHA256: 542B8B3F4F75AD24DC78007F8FE0857E00DC4CC9F4870154DCB8D5D0C4144B65 +MD5: 7D6ACA843068BA9432B3FF63BFD1EF0F +SHA1: BEF2B906066A1B04921DF0B80E7FDD4BC8ECED5C +SHA256: 5D511D50F11666C69AE12435A47B9A2D30CB3CC88F8D38DC58A5BC0ECADF1BF5 Signature for ISO image: -https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231006.iso.sig +https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231012.iso.sig Signing key: https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2.4/main/KEYS @@ -26,22 +26,22 @@ wget https://raw.githubusercontent.com/Security-Onion-Solutions/securityonion/2. Download the signature file for the ISO: ``` -wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231006.iso.sig +wget https://github.com/Security-Onion-Solutions/securityonion/raw/2.4/main/sigs/securityonion-2.4.20-20231012.iso.sig ``` Download the ISO image: ``` -wget https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231006.iso +wget https://download.securityonion.net/file/securityonion/securityonion-2.4.20-20231012.iso ``` Verify the downloaded ISO image using the signature file: ``` -gpg --verify securityonion-2.4.20-20231006.iso.sig securityonion-2.4.20-20231006.iso +gpg --verify securityonion-2.4.20-20231012.iso.sig securityonion-2.4.20-20231012.iso ``` The output should show "Good signature" and the Primary key fingerprint should match what's shown below: ``` -gpg: Signature made Tue 03 Oct 2023 11:40:51 AM EDT using RSA key ID FE507013 +gpg: Signature made Thu 12 Oct 2023 01:28:32 PM EDT using RSA key ID FE507013 gpg: Good signature from "Security Onion Solutions, LLC " gpg: WARNING: This key is not certified with a trusted signature! gpg: There is no indication that the signature belongs to the owner. diff --git a/sigs/securityonion-2.4.20-20231012.iso.sig b/sigs/securityonion-2.4.20-20231012.iso.sig new file mode 100644 index 0000000000000000000000000000000000000000..0704f7d1c253915a81383222f6238a3d5d62bdc7 GIT binary patch literal 566 zcmV-60?GY}0y6{v0SEvc79j-41gSkXz6^6dp_W8^5Ma0dP;e6k0%a&HzyJyf5PT3| zxBgIY6GqGr|4VA;xMiecEI{gjUYL2W<40M5tH3?g_4Y`oAyjO0cF^+WeS(7J&}s@- zOx2!={+X!hztM{3;w{PuK!3{CmAU`PQ&$_PYvXIryf9AkQx<`1Y-n?Q4}$ReYDY3- zkL(mYW^n4{y1r-VgH^6u3?|#b4##(wrBYf2cnH-$P@^;F?}mn^Shy*wY1|{RxVK>Y zkDh@Zu0{#DHO9VL@2m3obxlgVjXDj#1DCsA3)icR&Ga?8SBYq)3A$0cy(Dt zXwe(*W;p4(pZ6@(4I~+#m-LG}X3CVX>(h6_RFs@|0})09rzd0EGN)L=x8R@##@1yl zTVF>NT-rxZS))jQ$y*ZN_uG}Wzt-83>^~YInB}AP#c_kMtOq^Mq9Fz_pnaBK5n3=- ze5mCQDqMFZ9Cq2l_+r2>`LTS*y7qBP8muMVWwBg4Dkfo?^4ljQ#&zf*JT^%6ZGk$fS=MJ$EP$=Oler)b>|nKO z;BgH?2IF-C&M23kWqbMLwfY-*_FiIR`KjmETbDAfJ!Ut}G&{9jy!%A#P6!6+BY-Uc zNhi2Du?+1A*_2tG5E20s%kvm{T=H}fPuvT@XbP{zF+!aqy+vby-3$^HTHk}9KXsA{ EFqyLvWB>pF literal 0 HcmV?d00001 From 5250292e95361427f04b1df2a60505bb47874cc5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 12 Oct 2023 15:54:22 -0400 Subject: [PATCH 193/417] only allow stable install type. require -r to be used --- salt/salt/scripts/bootstrap-salt.sh | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh index d422a5295..f56d6b17b 100644 --- a/salt/salt/scripts/bootstrap-salt.sh +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -611,6 +611,19 @@ if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git|onedir|onedir_rc|old-stable exit 1 fi +# Due to our modifications to install_centos_onedir it is easiest to just lock down to only allowing stable install +if [ "$(echo "$ITYPE" | grep stable)" = "" ]; then + echoerror "This script has been modified to only support stable installation type..." + exit 1 +fi + +# We want to require this script to only run with -r. We dont want to accidentally try to install from another repo +# and we dont want to put salt.repo in /etc/yum.repos.d/ +if [ "$_DISABLE_REPOS" -eq $BS_FALSE ];then + echoerror "This script has been modified to required the usage of the -r flag which disables this script from using its own repos..." + exit 1 +fi + # If doing a git install, check what branch/tag/sha will be checked out if [ "$ITYPE" = "git" ]; then if [ "$#" -eq 0 ];then @@ -5123,6 +5136,8 @@ install_centos_onedir_deps() { return 0 } +# This function has been modified to allow for specific versions to be installed +# when not using the salt repo install_centos_onedir() { __PACKAGES="" @@ -5130,11 +5145,7 @@ install_centos_onedir() { local master='salt-master' local minion='salt-minion' local syndic='salt-syndic' - if [ "$ITYPE" = "stable" ]; then - local ver="$_ONEDIR_REV" - elif [ "$ITYPE" = "onedir" ]; then - local ver="${ONEDIR_REV##*/}" - fi + local ver="$_ONEDIR_REV" if [ ! -z $ver ]; then cloud+="-$ver" From d2002a51587d06439eb37e68adafe0937d0a4943 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 12 Oct 2023 15:58:33 -0400 Subject: [PATCH 194/417] add additional comments --- salt/salt/scripts/bootstrap-salt.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh index f56d6b17b..156489f4e 100644 --- a/salt/salt/scripts/bootstrap-salt.sh +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -613,7 +613,7 @@ fi # Due to our modifications to install_centos_onedir it is easiest to just lock down to only allowing stable install if [ "$(echo "$ITYPE" | grep stable)" = "" ]; then - echoerror "This script has been modified to only support stable installation type..." + echoerror "This script has been modified to only support stable installation type. Installation type \"$ITYPE\" is not allowed..." exit 1 fi From 6c5f8e4e2d2a3ada0ee87f13b3304ab42fae8c7f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 12 Oct 2023 16:19:59 -0400 Subject: [PATCH 195/417] Update HOTFIX --- HOTFIX | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/HOTFIX b/HOTFIX index afd2e4c40..d3f5a12fa 100644 --- a/HOTFIX +++ b/HOTFIX @@ -1 +1 @@ -20231012 + From 1641aa111b2f2f4b86e75bb28aec5c5332a23051 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 13 Oct 2023 13:46:31 -0400 Subject: [PATCH 196/417] add checkmine back --- salt/salt/engines/checkmine.py | 28 ++++++++++++++++++++++++++++ salt/salt/files/engines.conf | 6 ++++++ salt/salt/master.sls | 7 +++++-- 3 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 salt/salt/engines/checkmine.py create mode 100644 salt/salt/files/engines.conf diff --git a/salt/salt/engines/checkmine.py b/salt/salt/engines/checkmine.py new file mode 100644 index 000000000..851d9a555 --- /dev/null +++ b/salt/salt/engines/checkmine.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +import logging +from time import sleep +from os import remove + +log = logging.getLogger(__name__) + +def start(interval=30): + log.info("checkmine engine started") + minionid = __grains__['id'] + while True: + try: + ca_crt = __salt__['saltutil.runner']('mine.get', tgt=minionid, fun='x509.get_pem_entries')[minionid]['/etc/pki/ca.crt'] + log.info('Successfully queried Salt mine for the CA.') + except: + log.error('Could not pull CA from the Salt mine.') + log.info('Removing /var/cache/salt/master/minions/%s/mine.p to force Salt mine to be repopulated.' % minionid) + try: + remove('/var/cache/salt/master/minions/%s/mine.p' % minionid) + log.info('Removed /var/cache/salt/master/minions/%s/mine.p' % minionid) + except FileNotFoundError: + log.error('/var/cache/salt/master/minions/%s/mine.p does not exist' % minionid) + + __salt__['mine.send'](name='x509.get_pem_entries', glob_path='/etc/pki/ca.crt') + log.warning('Salt mine repopulated with /etc/pki/ca.crt') + + sleep(interval) diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf new file mode 100644 index 000000000..878aa9c49 --- /dev/null +++ b/salt/salt/files/engines.conf @@ -0,0 +1,6 @@ +engines_dirs: + - /etc/salt/engines + +engines: + - checkmine: + interval: 30 diff --git a/salt/salt/master.sls b/salt/salt/master.sls index b10a4df0f..1e0e6e303 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -18,14 +18,17 @@ salt_master_service: - enable: True checkmine_engine: - file.absent: + file.managed: - name: /etc/salt/engines/checkmine.py + - source: salt://salt/engines/checkmine.py + - makedirs: True - watch_in: - service: salt_minion_service engines_config: - file.absent: + file.managed: - name: /etc/salt/minion.d/engines.conf + - source: salt://salt/files/engines.conf - watch_in: - service: salt_minion_service From 57684efddfe99b92b5297fd6435d8eed0e47cfa8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 13 Oct 2023 16:23:16 -0400 Subject: [PATCH 197/417] checkmine looks for 1 byte file and verify mine ip is correct --- salt/salt/engines/checkmine.py | 58 +++++++++++++++++++++++----------- 1 file changed, 39 insertions(+), 19 deletions(-) diff --git a/salt/salt/engines/checkmine.py b/salt/salt/engines/checkmine.py index 851d9a555..54f66a5ed 100644 --- a/salt/salt/engines/checkmine.py +++ b/salt/salt/engines/checkmine.py @@ -2,27 +2,47 @@ import logging from time import sleep -from os import remove +import os +import salt.client log = logging.getLogger(__name__) +local = salt.client.LocalClient() -def start(interval=30): - log.info("checkmine engine started") - minionid = __grains__['id'] - while True: - try: - ca_crt = __salt__['saltutil.runner']('mine.get', tgt=minionid, fun='x509.get_pem_entries')[minionid]['/etc/pki/ca.crt'] - log.info('Successfully queried Salt mine for the CA.') - except: - log.error('Could not pull CA from the Salt mine.') - log.info('Removing /var/cache/salt/master/minions/%s/mine.p to force Salt mine to be repopulated.' % minionid) - try: - remove('/var/cache/salt/master/minions/%s/mine.p' % minionid) - log.info('Removed /var/cache/salt/master/minions/%s/mine.p' % minionid) - except FileNotFoundError: - log.error('/var/cache/salt/master/minions/%s/mine.p does not exist' % minionid) +def start(interval=10): + def mine_flush(minion): + log.warning('checkmine engine: flushing mine cache for %s' % minion) + local.cmd(minion, 'mine.flush') - __salt__['mine.send'](name='x509.get_pem_entries', glob_path='/etc/pki/ca.crt') - log.warning('Salt mine repopulated with /etc/pki/ca.crt') + def mine_update(minion): + log.warning('checkmine engine: updating mine cache for %s' % minion) + local.cmd(minion, 'mine.update') - sleep(interval) + log.info("checkmine engine: started") + cachedir = __opts__['cachedir'] + while True: + log.debug('checkmine engine: checking which minions are alive') + manage_alived = __salt__['saltutil.runner']('manage.alived', show_ip=True) + log.debug('checkmine engine: alive minions: %s' % ' , '.join(manage_alived)) + + for minion in manage_alived: + mine_path = os.path.join(cachedir, 'minions', minion, 'mine.p') + mine_size = os.path.getsize(mine_path) + log.debug('checkmine engine: minion: %s mine_size: %i' % (minion, mine_size)) + # For some reason the mine file can be corrupt and only be 1 byte in size + if mine_size == 1: + log.error('checkmine engine: found %s to be 1 byte' % mine_path) + mine_flush(minion) + mine_update(minion) + # Update the mine if the ip in the mine doesn't match returned from manage.alived + else: + network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs') + mine_ip = network_ip_addrs[minion][0] + log.debug('checkmine engine: minion: %s mine_ip: %s' % (minion, mine_ip)) + manage_alived_ip = manage_alived[minion] + log.debug('checkmine engine: minion: %s managed_alived_ip: %s' % (minion, manage_alived_ip)) + if mine_ip != manage_alived_ip: + log.error('checkmine engine: found minion %s has manage_alived_ip %s but a mine_ip of %s' % (minion, manage_alived_ip, mine_ip)) + mine_flush(minion) + mine_update(minion) + + sleep(interval) From e23b3a62f30f2467389ba68578a7056943387f0c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 13 Oct 2023 16:24:11 -0400 Subject: [PATCH 198/417] default interval of 60s --- salt/salt/engines/checkmine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/salt/engines/checkmine.py b/salt/salt/engines/checkmine.py index 54f66a5ed..09e624ba3 100644 --- a/salt/salt/engines/checkmine.py +++ b/salt/salt/engines/checkmine.py @@ -8,7 +8,7 @@ import salt.client log = logging.getLogger(__name__) local = salt.client.LocalClient() -def start(interval=10): +def start(interval=60): def mine_flush(minion): log.warning('checkmine engine: flushing mine cache for %s' % minion) local.cmd(minion, 'mine.flush') From 2773da5a125623cb10a4983394d5862bf8419427 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 16 Oct 2023 10:34:45 -0400 Subject: [PATCH 199/417] run the checkmine engine under master instead of minion --- salt/salt/engines/{ => master}/checkmine.py | 0 salt/salt/files/engines.conf | 2 +- salt/salt/master.sls | 19 +++++++++++++++---- 3 files changed, 16 insertions(+), 5 deletions(-) rename salt/salt/engines/{ => master}/checkmine.py (100%) diff --git a/salt/salt/engines/checkmine.py b/salt/salt/engines/master/checkmine.py similarity index 100% rename from salt/salt/engines/checkmine.py rename to salt/salt/engines/master/checkmine.py diff --git a/salt/salt/files/engines.conf b/salt/salt/files/engines.conf index 878aa9c49..7c43e99e1 100644 --- a/salt/salt/files/engines.conf +++ b/salt/salt/files/engines.conf @@ -3,4 +3,4 @@ engines_dirs: engines: - checkmine: - interval: 30 + interval: 60 diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 1e0e6e303..182498678 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -20,17 +20,28 @@ salt_master_service: checkmine_engine: file.managed: - name: /etc/salt/engines/checkmine.py - - source: salt://salt/engines/checkmine.py + - source: salt://salt/engines/master/checkmine.py - makedirs: True - watch_in: - - service: salt_minion_service + - service: salt_master_service -engines_config: - file.managed: +# prior to 2.4.30 this engine ran on the manager with salt-minion +# this has changed to running with the salt-master in 2.4.30 +remove_engines_config: + file.absent: - name: /etc/salt/minion.d/engines.conf - source: salt://salt/files/engines.conf - watch_in: - service: salt_minion_service + - order: last + +engines_config: + file.managed: + - name: /etc/salt/master.d/engines.conf + - source: salt://salt/files/engines.conf + - watch_in: + - service: salt_master_service + - order: last {% else %} From 1a7761c531968ca62edea907b6c615dd39db5d7d Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 16 Oct 2023 11:00:31 -0400 Subject: [PATCH 200/417] display container dl status during soup --- salt/common/tools/sbin/so-image-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 11d2d6366..82487d69b 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -137,7 +137,7 @@ update_docker_containers() { for i in "${TRUSTED_CONTAINERS[@]}" do if [ -z "$PROGRESS_CALLBACK" ]; then - echo "Downloading $i" >> "$LOG_FILE" 2>&1 + echo "Downloading $i" 2>&1 | tee "$LOG_FILE" else $PROGRESS_CALLBACK $i fi From 07902d17cc4fea82cb24af20b16810a18fc6fdb8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 16 Oct 2023 11:20:19 -0400 Subject: [PATCH 201/417] display container dl status during soup --- salt/common/tools/sbin/so-image-common | 2 +- salt/manager/tools/sbin/soup | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index 82487d69b..7e510e3ad 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -137,7 +137,7 @@ update_docker_containers() { for i in "${TRUSTED_CONTAINERS[@]}" do if [ -z "$PROGRESS_CALLBACK" ]; then - echo "Downloading $i" 2>&1 | tee "$LOG_FILE" + echo "Downloading $i" >> "$LOG_FILE" 2>&1 else $PROGRESS_CALLBACK $i fi diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 998d27539..0e11276ea 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -829,7 +829,7 @@ main() { else update_registry set +e - update_docker_containers "soup" "" "" "$SOUP_LOG" + update_docker_containers 'soup' '' '' '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG" set -e fi From 84c39b5de7b40b59577d7f7e41de78ff0e51e294 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 16 Oct 2023 13:01:13 -0400 Subject: [PATCH 202/417] only add heavynodes to remoteHostUrls --- salt/soc/defaults.map.jinja | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/salt/soc/defaults.map.jinja b/salt/soc/defaults.map.jinja index 2587051c5..83cb5637c 100644 --- a/salt/soc/defaults.map.jinja +++ b/salt/soc/defaults.map.jinja @@ -13,11 +13,13 @@ {% do SOCDEFAULTS.soc.config.server.modules[module].update({'hostUrl': application_url}) %} {% endfor %} -{# add nodes from the logstash:nodes pillar to soc.server.modules.elastic.remoteHostUrls #} +{# add all grid heavy nodes to soc.server.modules.elastic.remoteHostUrls #} {% for node_type, minions in salt['pillar.get']('logstash:nodes', {}).items() %} -{% for m in minions.keys() %} -{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %} -{% endfor %} +{% if node_type in ['heavynode'] %} +{% for m in minions.keys() %} +{% do SOCDEFAULTS.soc.config.server.modules.elastic.remoteHostUrls.append('https://' ~ m ~ ':9200') %} +{% endfor %} +{% endif %} {% endfor %} {% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %} From a637b0e61b53976a15035f225197e01f12de50d4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 16 Oct 2023 14:58:58 -0400 Subject: [PATCH 203/417] apply salt.master and minion state early in setup to prevent the services from restarting later in setup --- setup/so-setup | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index 60296d2f3..dc209b756 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -714,6 +714,7 @@ if ! [[ -f $install_opt_file ]]; then logCmd "salt-call state.apply common.packages" logCmd "salt-call state.apply common" + logCmd "salt-call state.apply salt.master" logCmd "salt-call state.apply docker" firewall_generate_templates set_initial_firewall_policy @@ -768,8 +769,6 @@ if ! [[ -f $install_opt_file ]]; then checkin_at_boot set_initial_firewall_access logCmd "salt-call schedule.enable -linfo --local" - systemctl restart salt-master - systemctl restart salt-minion verify_setup else touch /root/accept_changes From c0030bc513001530c57ee880708ccf1a8805f8ea Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 16 Oct 2023 15:00:07 -0400 Subject: [PATCH 204/417] dont need to restart minion service when just adding sleep delay on service start --- salt/salt/minion.sls | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/salt/minion.sls b/salt/salt/minion.sls index 865bd367f..e0c422e7f 100644 --- a/salt/salt/minion.sls +++ b/salt/salt/minion.sls @@ -67,6 +67,9 @@ set_log_levels: - "log_level: info" - "log_level_logfile: info" +# prior to 2.4.30 this managed file would restart the salt-minion service when updated +# since this file is currently only adding a sleep timer on service start +# it is not required to restart the service salt_minion_service_unit_file: file.managed: - name: {{ SYSTEMD_UNIT_FILE }} @@ -89,6 +92,5 @@ salt_minion_service: - file: mine_functions {% if INSTALLEDSALTVERSION|string == SALTVERSION|string %} - file: set_log_levels - - file: salt_minion_service_unit_file {% endif %} - order: last From 9f3a9dfab09b1b11b7cbbab152d952babc4e2c16 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 16 Oct 2023 15:00:53 -0400 Subject: [PATCH 205/417] reorder salt.master state --- salt/salt/master.sls | 32 +++++++++++++++----------------- 1 file changed, 15 insertions(+), 17 deletions(-) diff --git a/salt/salt/master.sls b/salt/salt/master.sls index 182498678..0a65f3e01 100644 --- a/salt/salt/master.sls +++ b/salt/salt/master.sls @@ -12,19 +12,6 @@ hold_salt_master_package: - name: salt-master {% endif %} -salt_master_service: - service.running: - - name: salt-master - - enable: True - -checkmine_engine: - file.managed: - - name: /etc/salt/engines/checkmine.py - - source: salt://salt/engines/master/checkmine.py - - makedirs: True - - watch_in: - - service: salt_master_service - # prior to 2.4.30 this engine ran on the manager with salt-minion # this has changed to running with the salt-master in 2.4.30 remove_engines_config: @@ -32,15 +19,26 @@ remove_engines_config: - name: /etc/salt/minion.d/engines.conf - source: salt://salt/files/engines.conf - watch_in: - - service: salt_minion_service - - order: last + - service: salt_minion_service + +checkmine_engine: + file.managed: + - name: /etc/salt/engines/checkmine.py + - source: salt://salt/engines/master/checkmine.py + - makedirs: True engines_config: file.managed: - name: /etc/salt/master.d/engines.conf - source: salt://salt/files/engines.conf - - watch_in: - - service: salt_master_service + +salt_master_service: + service.running: + - name: salt-master + - enable: True + - watch: + - file: checkmine_engine + - file: engines_config - order: last {% else %} From e5c936e8cf088fe46597333e6aaa921a99c6ccc9 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 16 Oct 2023 15:18:26 -0400 Subject: [PATCH 206/417] Replace external zeek-community-id with builtin community-id. Disable plugin-tds + plugin-profinet. Not updated for Zeek 6.x Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/zeek/defaults.yaml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/salt/zeek/defaults.yaml b/salt/zeek/defaults.yaml index 4435670a2..ad34e1a93 100644 --- a/salt/zeek/defaults.yaml +++ b/salt/zeek/defaults.yaml @@ -49,12 +49,13 @@ zeek: - frameworks/files/hash-all-files - frameworks/files/detect-MHR - policy/frameworks/notice/extend-email/hostnames + - policy/frameworks/notice/community-id + - policy/protocols/conn/community-id-logging - ja3 - hassh - intel - cve-2020-0601 - securityonion/bpfconf - - securityonion/communityid - securityonion/file-extraction - oui-logging - icsnpp-modbus @@ -65,8 +66,8 @@ zeek: - icsnpp-opcua-binary - icsnpp-bsap - icsnpp-s7comm - - zeek-plugin-tds - - zeek-plugin-profinet + # - zeek-plugin-tds + # - zeek-plugin-profinet - zeek-spicy-wireguard - zeek-spicy-stun load-sigs: @@ -75,7 +76,7 @@ zeek: - LogAscii::use_json = T; - CaptureLoss::watch_interval = 5 mins; networks: - HOME_NET: + HOME_NET: - 192.168.0.0/16 - 10.0.0.0/8 - 172.16.0.0/12 @@ -120,4 +121,4 @@ zeek: - stats - stderr - stdout - + From ed693a7ae67e63e547e45a141fc570030749e7a5 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 16 Oct 2023 15:48:51 -0400 Subject: [PATCH 207/417] Remove commented lines in defaults.yaml to avoid UI issues. Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/zeek/defaults.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/salt/zeek/defaults.yaml b/salt/zeek/defaults.yaml index ad34e1a93..ce22c1aef 100644 --- a/salt/zeek/defaults.yaml +++ b/salt/zeek/defaults.yaml @@ -66,8 +66,6 @@ zeek: - icsnpp-opcua-binary - icsnpp-bsap - icsnpp-s7comm - # - zeek-plugin-tds - # - zeek-plugin-profinet - zeek-spicy-wireguard - zeek-spicy-stun load-sigs: From 53fcafea50fb4f7f07928d0252d465560cd86dad Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 16 Oct 2023 16:31:43 -0400 Subject: [PATCH 208/417] redo how we check if salt-master is ready and accessible --- salt/common/tools/sbin/so-common | 22 +++++++++++++++++----- salt/manager/tools/sbin/soup | 12 +++--------- setup/so-functions | 5 ----- setup/so-setup | 5 +++++ 4 files changed, 25 insertions(+), 19 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index f754b34ef..c19d51a42 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -133,11 +133,23 @@ check_elastic_license() { } check_salt_master_status() { - local timeout=$1 - echo "Checking if we can talk to the salt master" - salt-call state.show_top concurrent=true - - return + local count=0 + local attempts="${1:- 10}" + current_time="$(date '+%b %d %H:%M:%S')" + echo "Checking if we can access the salt master and that it is ready at: ${current_time}" + while ! salt-call state.show_top -l error concurrent=true 1> /dev/null; do + current_time="$(date '+%b %d %H:%M:%S')" + echo "Can't access salt master or it is not ready at: ${current_time}" + ((count+=1)) + if [[ $count -eq $attempts ]]; then + # 10 attempts takes about 5.5 minutes + echo "Gave up trying to access salt-master" + return 1 + fi + done + current_time="$(date '+%b %d %H:%M:%S')" + echo "Successfully accessed and salt master ready at: ${current_time}" + return 0 } check_salt_minion_status() { diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0e11276ea..84501bad5 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -736,14 +736,8 @@ main() { echo "" set_os - if ! check_salt_master_status; then - echo "Could not talk to salt master" - echo "Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master." - echo "SOUP will now attempt to start the salt-master service and exit." - exit 1 - fi - echo "This node can communicate with the salt-master." + check_salt_master_status 1 || fail "Could not talk to salt master: Please run 'systemctl status salt-master' to ensure the salt-master service is running and check the log at /opt/so/log/salt/master." echo "Checking to see if this is a manager." echo "" @@ -881,7 +875,7 @@ main() { # Testing that salt-master is up by checking that is it connected to itself set +e echo "Waiting on the Salt Master service to be ready." - salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details." + check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details." set -e # update the salt-minion configs here and start the minion @@ -917,7 +911,7 @@ main() { set +e echo "Waiting on the Salt Master service to be ready." - salt-call state.show_top -l error queue=True || fail "salt-master could not be reached. Check $SOUP_LOG for details." + check_salt_master_status || fail "Can't access salt master or it is not ready. Check $SOUP_LOG for details." set -e echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." diff --git a/setup/so-functions b/setup/so-functions index 42a4b4ac6..68fd01550 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -2111,11 +2111,6 @@ saltify() { } -# Run a salt command to generate the minion key -salt_firstcheckin() { - salt-call state.show_top >> /dev/null 2>&1 # send output to /dev/null because we don't actually care about the ouput -} - salt_install_module_deps() { logCmd "salt-pip install docker --no-index --only-binary=:all: --find-links files/salt_module_deps/docker/" logCmd "salt-pip install pymysql --no-index --only-binary=:all: --find-links files/salt_module_deps/pymysql/" diff --git a/setup/so-setup b/setup/so-setup index dc209b756..4db24aa1a 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -715,6 +715,11 @@ if ! [[ -f $install_opt_file ]]; then logCmd "salt-call state.apply common.packages" logCmd "salt-call state.apply common" logCmd "salt-call state.apply salt.master" + + # wait here until we get a response from the salt-master since it may have just restarted + # exit setup after 5-6 minutes of trying + check_salt_master_status || fail "Can't access salt master or it is not ready" + logCmd "salt-call state.apply docker" firewall_generate_templates set_initial_firewall_policy From 01cb0fccb62fb1cfebdb4be88d9b2201a7cd8559 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 17 Oct 2023 10:01:11 -0400 Subject: [PATCH 209/417] mark suricata 7 log line as fp fo so-log-check --- salt/common/tools/sbin/so-log-check | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index c2d16fd86..395f60c7d 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -136,6 +136,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|invalid query input" # false positive (Invalid user input in hunt query) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0 fi if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then @@ -230,4 +231,4 @@ else echo -e "\nResult: One or more errors found" fi -exit $RESULT \ No newline at end of file +exit $RESULT From 7c2cdb78e92d58ab0ef5363a2098915394674d11 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 17 Oct 2023 10:31:53 -0400 Subject: [PATCH 210/417] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 8ea99f559..7d52aac7f 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.30 +2.4.0-foxtrot From 1db88bdbb59357988c4e335b5a908785b93a101c Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 17 Oct 2023 10:33:39 -0400 Subject: [PATCH 211/417] Update so-common --- salt/common/tools/sbin/so-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index c19d51a42..a2f11555a 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -8,7 +8,7 @@ # Elastic agent is not managed by salt. Because of this we must store this base information in a # script that accompanies the soup system. Since so-common is one of those special soup files, # and since this same logic is required during installation, it's included in this file. -ELASTIC_AGENT_TARBALL_VERSION="8.8.2" +ELASTIC_AGENT_TARBALL_VERSION="8.10.3" ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5" ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" From 24329e3731564bacdeeb7806318c6d752602311e Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 17 Oct 2023 10:34:38 -0400 Subject: [PATCH 212/417] Update config_saved_objects.ndjson --- salt/kibana/files/config_saved_objects.ndjson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kibana/files/config_saved_objects.ndjson b/salt/kibana/files/config_saved_objects.ndjson index a2dedd324..c3742af6d 100644 --- a/salt/kibana/files/config_saved_objects.ndjson +++ b/salt/kibana/files/config_saved_objects.ndjson @@ -1 +1 @@ -{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.8.2","id": "8.8.2","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="} +{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.3","id": "8.10.3","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="} From 8db6fef92dd6e998bafcacfdaad83b2948ce63a7 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 17 Oct 2023 10:35:36 -0400 Subject: [PATCH 213/417] Elastic 8.10.3 --- salt/kibana/tools/sbin_jinja/so-kibana-config-load | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kibana/tools/sbin_jinja/so-kibana-config-load b/salt/kibana/tools/sbin_jinja/so-kibana-config-load index 159a69e68..88195f32b 100644 --- a/salt/kibana/tools/sbin_jinja/so-kibana-config-load +++ b/salt/kibana/tools/sbin_jinja/so-kibana-config-load @@ -63,7 +63,7 @@ update() { IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))' for i in "${LINES[@]}"; do - RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.8.2" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ") + RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.3" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ") echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi done From adcb7840bd03b36553a6b8c1a28bf1912b731eeb Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 17 Oct 2023 10:38:20 -0400 Subject: [PATCH 214/417] Elastic 8.10.3 --- .../integrations/elastic-defend/elastic-defend-endpoints.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json b/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json index 8ab4f748e..de35f803b 100644 --- a/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json +++ b/salt/elasticfleet/files/integrations/elastic-defend/elastic-defend-endpoints.json @@ -5,7 +5,7 @@ "package": { "name": "endpoint", "title": "Elastic Defend", - "version": "8.8.0" + "version": "8.10.2" }, "enabled": true, "policy_id": "endpoints-initial", From 99054a2687c5b20795d48f26d5a823db92aa03ba Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 17 Oct 2023 10:47:26 -0400 Subject: [PATCH 215/417] Elastic 8.10.4 --- salt/common/tools/sbin/so-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index a2f11555a..530cdeb60 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -8,7 +8,7 @@ # Elastic agent is not managed by salt. Because of this we must store this base information in a # script that accompanies the soup system. Since so-common is one of those special soup files, # and since this same logic is required during installation, it's included in this file. -ELASTIC_AGENT_TARBALL_VERSION="8.10.3" +ELASTIC_AGENT_TARBALL_VERSION="8.10.4" ELASTIC_AGENT_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" ELASTIC_AGENT_MD5_URL="https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.md5" ELASTIC_AGENT_FILE="/nsm/elastic-fleet/artifacts/elastic-agent_SO-$ELASTIC_AGENT_TARBALL_VERSION.tar.gz" From 8cab242ad079237d50e7aeb9165df794dc432b4d Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 17 Oct 2023 10:48:31 -0400 Subject: [PATCH 216/417] Elastic 8.10.4 --- salt/kibana/files/config_saved_objects.ndjson | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kibana/files/config_saved_objects.ndjson b/salt/kibana/files/config_saved_objects.ndjson index c3742af6d..bc503debb 100644 --- a/salt/kibana/files/config_saved_objects.ndjson +++ b/salt/kibana/files/config_saved_objects.ndjson @@ -1 +1 @@ -{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.3","id": "8.10.3","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="} +{"attributes": {"buildNum": 39457,"defaultIndex": "logs-*","defaultRoute": "/app/dashboards#/view/a8411b30-6d03-11ea-b301-3d6c35840645","discover:sampleSize": 100,"theme:darkMode": true,"timepicker:timeDefaults": "{\n \"from\": \"now-24h\",\n \"to\": \"now\"\n}"},"coreMigrationVersion": "8.10.4","id": "8.10.4","references": [],"type": "config","updated_at": "2021-10-10T10:10:10.105Z","version": "WzI5NzUsMl0="} From be2a8295247fea2bdb56af616303fbecf36b9651 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 17 Oct 2023 10:49:03 -0400 Subject: [PATCH 217/417] Elastic 8.10.4 --- salt/kibana/tools/sbin_jinja/so-kibana-config-load | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/kibana/tools/sbin_jinja/so-kibana-config-load b/salt/kibana/tools/sbin_jinja/so-kibana-config-load index 88195f32b..b9df9c6d4 100644 --- a/salt/kibana/tools/sbin_jinja/so-kibana-config-load +++ b/salt/kibana/tools/sbin_jinja/so-kibana-config-load @@ -63,7 +63,7 @@ update() { IFS=$'\r\n' GLOBIGNORE='*' command eval 'LINES=($(cat $1))' for i in "${LINES[@]}"; do - RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.3" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ") + RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -X PUT "localhost:5601/api/saved_objects/config/8.10.4" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d " $i ") echo $RESPONSE; if [[ "$RESPONSE" != *"\"success\":true"* ]] && [[ "$RESPONSE" != *"updated_at"* ]] ; then RETURN_CODE=1;fi done From 06e731c762b7ec877729cadec5c3bc7aa855b5d9 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 17 Oct 2023 13:33:12 -0400 Subject: [PATCH 218/417] Update VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 7d52aac7f..8ea99f559 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.0-foxtrot +2.4.30 From 496b97d706365da98f38ad4965a5bcb69be2db48 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 17 Oct 2023 15:42:42 -0400 Subject: [PATCH 219/417] handle the mine file not being present before checking the size --- salt/salt/engines/master/checkmine.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/salt/salt/engines/master/checkmine.py b/salt/salt/engines/master/checkmine.py index 09e624ba3..0cfb4032d 100644 --- a/salt/salt/engines/master/checkmine.py +++ b/salt/salt/engines/master/checkmine.py @@ -26,8 +26,16 @@ def start(interval=60): for minion in manage_alived: mine_path = os.path.join(cachedir, 'minions', minion, 'mine.p') - mine_size = os.path.getsize(mine_path) - log.debug('checkmine engine: minion: %s mine_size: %i' % (minion, mine_size)) + # it is possible that a minion is alive, but there isn't a mine.p file + try: + mine_size = os.path.getsize(mine_path) + log.debug('checkmine engine: minion: %s mine_size: %i' % (minion, mine_size)) + except FileNotFoundError: + log.warning('checkmine engine: minion: %s %s does not exist' % (minion, mine_path)) + mine_flush(minion) + mine_update(minion) + continue + # For some reason the mine file can be corrupt and only be 1 byte in size if mine_size == 1: log.error('checkmine engine: found %s to be 1 byte' % mine_path) From d9862aefcfa84f10dedf2b1e95eaf685b5f6f517 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 17 Oct 2023 17:09:52 -0400 Subject: [PATCH 220/417] handle mine.p not being present. only check if mine_ip exists, dont compare to alived ip --- salt/salt/engines/master/checkmine.py | 35 +++++++++++++++------------ 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/salt/salt/engines/master/checkmine.py b/salt/salt/engines/master/checkmine.py index 0cfb4032d..c62c1f058 100644 --- a/salt/salt/engines/master/checkmine.py +++ b/salt/salt/engines/master/checkmine.py @@ -9,6 +9,10 @@ log = logging.getLogger(__name__) local = salt.client.LocalClient() def start(interval=60): + def mine_delete(minion, func): + log.warning('checkmine engine: deleting mine function %s for %s' % (func, minion)) + local.cmd(minion, 'mine.delete', [func]) + def mine_flush(minion): log.warning('checkmine engine: flushing mine cache for %s' % minion) local.cmd(minion, 'mine.flush') @@ -21,36 +25,35 @@ def start(interval=60): cachedir = __opts__['cachedir'] while True: log.debug('checkmine engine: checking which minions are alive') - manage_alived = __salt__['saltutil.runner']('manage.alived', show_ip=True) + manage_alived = __salt__['saltutil.runner']('manage.alived', show_ip=False) log.debug('checkmine engine: alive minions: %s' % ' , '.join(manage_alived)) for minion in manage_alived: mine_path = os.path.join(cachedir, 'minions', minion, 'mine.p') - # it is possible that a minion is alive, but there isn't a mine.p file + # it is possible that a minion is alive, but hasn't created a mine file yet try: mine_size = os.path.getsize(mine_path) log.debug('checkmine engine: minion: %s mine_size: %i' % (minion, mine_size)) + # For some reason the mine file can be corrupt and only be 1 byte in size + if mine_size == 1: + log.error('checkmine engine: found %s to be 1 byte' % mine_path) + mine_flush(minion) + mine_update(minion) + continue except FileNotFoundError: log.warning('checkmine engine: minion: %s %s does not exist' % (minion, mine_path)) mine_flush(minion) mine_update(minion) continue - # For some reason the mine file can be corrupt and only be 1 byte in size - if mine_size == 1: - log.error('checkmine engine: found %s to be 1 byte' % mine_path) - mine_flush(minion) - mine_update(minion) # Update the mine if the ip in the mine doesn't match returned from manage.alived - else: - network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs') + network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs') + try: mine_ip = network_ip_addrs[minion][0] - log.debug('checkmine engine: minion: %s mine_ip: %s' % (minion, mine_ip)) - manage_alived_ip = manage_alived[minion] - log.debug('checkmine engine: minion: %s managed_alived_ip: %s' % (minion, manage_alived_ip)) - if mine_ip != manage_alived_ip: - log.error('checkmine engine: found minion %s has manage_alived_ip %s but a mine_ip of %s' % (minion, manage_alived_ip, mine_ip)) - mine_flush(minion) - mine_update(minion) + log.debug('checkmine engine: found minion %s has mine_ip: %s' % (minion, mine_ip)) + except IndexError: + log.error('checkmine engine: found minion %s does\'t have a mine_ip' % (minion)) + mine_delete(minion, 'network.ip_addrs') + mine_update(minion) sleep(interval) From 928fb23e963c425098a4105ef91c965a9a6f814e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 17 Oct 2023 17:28:28 -0400 Subject: [PATCH 221/417] only add node to pillar if returned ip from mine --- pillar/logstash/nodes.sls | 22 +++++++++++++--------- pillar/node_data/ips.sls | 24 ++++++++++++++---------- pillar/nodegroups/init.sls | 0 3 files changed, 27 insertions(+), 19 deletions(-) create mode 100644 pillar/nodegroups/init.sls diff --git a/pillar/logstash/nodes.sls b/pillar/logstash/nodes.sls index 8d3bdab65..228122b03 100644 --- a/pillar/logstash/nodes.sls +++ b/pillar/logstash/nodes.sls @@ -7,19 +7,23 @@ tgt_type='compound') | dictsort() %} -{% set hostname = cached_grains[minionid]['host'] %} -{% set node_type = minionid.split('_')[1] %} -{% if node_type not in node_types.keys() %} -{% do node_types.update({node_type: {hostname: ip[0]}}) %} -{% else %} -{% if hostname not in node_types[node_type] %} -{% do node_types[node_type].update({hostname: ip[0]}) %} +# only add a node to the pillar if it returned an ip from the mine +{% if ip[0] | length > 0%} +{% set hostname = cached_grains[minionid]['host'] %} +{% set node_type = minionid.split('_')[1] %} +{% if node_type not in node_types.keys() %} +{% do node_types.update({node_type: {hostname: ip[0]}}) %} {% else %} -{% do node_types[node_type][hostname].update(ip[0]) %} +{% if hostname not in node_types[node_type] %} +{% do node_types[node_type].update({hostname: ip[0]}) %} +{% else %} +{% do node_types[node_type][hostname].update(ip[0]) %} +{% endif %} {% endif %} -{% endif %} +{% fi %} {% endfor %} + logstash: nodes: {% for node_type, values in node_types.items() %} diff --git a/pillar/node_data/ips.sls b/pillar/node_data/ips.sls index 59c598879..5801d36f1 100644 --- a/pillar/node_data/ips.sls +++ b/pillar/node_data/ips.sls @@ -4,18 +4,22 @@ {% set hostname = minionid.split('_')[0] %} {% set node_type = minionid.split('_')[1] %} {% set is_alive = False %} -{% if minionid in manage_alived.keys() %} -{% if ip[0] == manage_alived[minionid] %} -{% set is_alive = True %} + +# only add a node to the pillar if it returned an ip from the mine +{% if ip | length > 0%} +{% if minionid in manage_alived.keys() %} +{% if ip[0] == manage_alived[minionid] %} +{% set is_alive = True %} +{% endif %} {% endif %} -{% endif %} -{% if node_type not in node_types.keys() %} -{% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %} -{% else %} -{% if hostname not in node_types[node_type] %} -{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %} +{% if node_type not in node_types.keys() %} +{% do node_types.update({node_type: {hostname: {'ip':ip[0], 'alive':is_alive }}}) %} {% else %} -{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %} +{% if hostname not in node_types[node_type] %} +{% do node_types[node_type].update({hostname: {'ip':ip[0], 'alive':is_alive}}) %} +{% else %} +{% do node_types[node_type][hostname].update({'ip':ip[0], 'alive':is_alive}) %} +{% endif %} {% endif %} {% endif %} {% endfor %} diff --git a/pillar/nodegroups/init.sls b/pillar/nodegroups/init.sls new file mode 100644 index 000000000..e69de29bb From fb9a0ab8b627e51dc1c6898bd7b3f4f05ca2b3aa Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 17 Oct 2023 17:33:53 -0400 Subject: [PATCH 222/417] endif not fi in jinja --- pillar/logstash/nodes.sls | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pillar/logstash/nodes.sls b/pillar/logstash/nodes.sls index 228122b03..a77978821 100644 --- a/pillar/logstash/nodes.sls +++ b/pillar/logstash/nodes.sls @@ -8,7 +8,7 @@ %} # only add a node to the pillar if it returned an ip from the mine -{% if ip[0] | length > 0%} +{% if ip | length > 0%} {% set hostname = cached_grains[minionid]['host'] %} {% set node_type = minionid.split('_')[1] %} {% if node_type not in node_types.keys() %} @@ -20,7 +20,7 @@ {% do node_types[node_type][hostname].update(ip[0]) %} {% endif %} {% endif %} -{% fi %} +{% endif %} {% endfor %} From 34717fb65e75281a17b1809df257d88a2f3e2bf5 Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 18 Oct 2023 13:44:09 -0400 Subject: [PATCH 223/417] Add note regarding DNS resolver --- .../files/analyzers/malwarehashregistry/README.md | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 salt/sensoroni/files/analyzers/malwarehashregistry/README.md diff --git a/salt/sensoroni/files/analyzers/malwarehashregistry/README.md b/salt/sensoroni/files/analyzers/malwarehashregistry/README.md new file mode 100644 index 000000000..8c50a3124 --- /dev/null +++ b/salt/sensoroni/files/analyzers/malwarehashregistry/README.md @@ -0,0 +1,10 @@ +# Malware Hash Registry + +## Description +Search Team Cymru's Malware Hash Registry for a file hash. + +## Configuration Requirements + +None. + +**NOTE:** If you try to run the Malware Hash Registry analyzer but it results in a "Name or service not known" error, then it may be a DNS issue. Folks using 8.8.4.4 or 8.8.8.8 as their DNS resolver have reported this issue. A potential workaround is to switch to another DNS resolver like 1.1.1.1. From 138aa9c5542e3fb3a5a85c1a1edc66d6d13ae8ff Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 13:54:14 -0400 Subject: [PATCH 224/417] update the mine with the ca when it is created or changed --- salt/ca/init.sls | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/ca/init.sls b/salt/ca/init.sls index 0eaf86b3c..2f15872ea 100644 --- a/salt/ca/init.sls +++ b/salt/ca/init.sls @@ -50,6 +50,12 @@ pki_public_ca_crt: attempts: 5 interval: 30 +mine_update_ca_crt: + - module.run: + - mine.update: [] + - onchanges: + - file: pki_public_ca_crt + cakeyperms: file.managed: - replace: False From 8e68f9631618c58f9f95d507ebfcc888b143532f Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 13:59:15 -0400 Subject: [PATCH 225/417] check that the manager has a ca in the mine and that it is valid --- salt/salt/engines/master/checkmine.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/salt/salt/engines/master/checkmine.py b/salt/salt/engines/master/checkmine.py index c62c1f058..e72d2fbe6 100644 --- a/salt/salt/engines/master/checkmine.py +++ b/salt/salt/engines/master/checkmine.py @@ -46,6 +46,25 @@ def start(interval=60): mine_update(minion) continue + # if a manager check that the ca in in the mine and it is correct + if minion.split('_')[-1] in ['manager', 'managersearch', 'eval', 'standalone', 'import']: + x509 = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='x509.get_pem_entries') + try: + ca_crt = x509[minion]['/etc/pki/ca.crt'] + log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt)) + # since the cert is defined, make sure it is valid + if not __salt__['x509.verify_private_key'](private_key='/etc/pki/ca.key', public_key='/etc/pki/ca.crt'): + log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion)) + log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt)) + mine_delete(minion, 'x509.get_pem_entries') + mine_update(minion) + else: + log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion)) + except IndexError: + log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion)) + mine_delete(minion, 'x509.get_pem_entries') + mine_update(minion) + # Update the mine if the ip in the mine doesn't match returned from manage.alived network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs') try: From c3cde61202507f177f30212fa75d8b258e6da671 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 15:01:26 -0400 Subject: [PATCH 226/417] docker service watches and requires the intca --- salt/docker/init.sls | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/salt/docker/init.sls b/salt/docker/init.sls index 45ba4a1ac..769c58af8 100644 --- a/salt/docker/init.sls +++ b/salt/docker/init.sls @@ -6,6 +6,9 @@ {% from 'docker/docker.map.jinja' import DOCKER %} {% from 'vars/globals.map.jinja' import GLOBALS %} +# include ssl since docker service requires the intca +include: + - ssl dockergroup: group.present: @@ -86,6 +89,11 @@ docker_running: - enable: True - watch: - file: docker_daemon + - x509: trusttheca + - require: + - file: docker_daemon + - x509: trusttheca + # Reserve OS ports for Docker proxy in case boot settings are not already applied/present # 57314 = Strelka, 47760-47860 = Zeek From 1999db0bb3d593df97a1267e398813d4fabcd084 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 15:02:22 -0400 Subject: [PATCH 227/417] apply ca state early in setup --- setup/so-setup | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 4db24aa1a..052111591 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -714,12 +714,19 @@ if ! [[ -f $install_opt_file ]]; then logCmd "salt-call state.apply common.packages" logCmd "salt-call state.apply common" + # apply the ca state to create the ca and put it in the mine early in the install + # this is done before the salt.master state puts the checkmine engine in place and starts + # checking for a valid ca in the mine for the manager + # the minion ip will already be in the mine from configure_minion function in so-functions + logCmd "salt-call state.apply ca" + # this will apply the salt.minion state first since salt.master includes salt.minion logCmd "salt-call state.apply salt.master" - # wait here until we get a response from the salt-master since it may have just restarted # exit setup after 5-6 minutes of trying check_salt_master_status || fail "Can't access salt master or it is not ready" + # this will also call the ssl state since docker requires the intca + # the salt-minion service will need to be up on the manager to sign requests logCmd "salt-call state.apply docker" firewall_generate_templates set_initial_firewall_policy From 2206cdb0fad836de3ab5a0c9197416e1ed9eb537 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 15:04:39 -0400 Subject: [PATCH 228/417] change soup comment --- pillar/nodegroups/init.sls | 0 salt/manager/tools/sbin/soup | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 pillar/nodegroups/init.sls diff --git a/pillar/nodegroups/init.sls b/pillar/nodegroups/init.sls deleted file mode 100644 index e69de29bb..000000000 diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 84501bad5..fc07765b8 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -578,7 +578,7 @@ update_centos_repo() { } update_salt_mine() { - echo "Populating the mine with network.ip_addrs pillar.host.mainint for each host." + echo "Populating the mine with mine_functions for each host." set +e salt \* mine.update -b 50 set -e From 1c1b23c328af4d2788992c1cfc87451d97e6648b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 15:07:18 -0400 Subject: [PATCH 229/417] fix mine update for ca --- salt/ca/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ca/init.sls b/salt/ca/init.sls index 2f15872ea..477b51a5d 100644 --- a/salt/ca/init.sls +++ b/salt/ca/init.sls @@ -51,7 +51,7 @@ pki_public_ca_crt: interval: 30 mine_update_ca_crt: - - module.run: + module.run: - mine.update: [] - onchanges: - file: pki_public_ca_crt From e58c1e189c63bd6aab9d6b7a32b06ee6cc3a075b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 15:10:17 -0400 Subject: [PATCH 230/417] use x509 instead of file for onchanges --- salt/ca/init.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/ca/init.sls b/salt/ca/init.sls index 477b51a5d..895e8235a 100644 --- a/salt/ca/init.sls +++ b/salt/ca/init.sls @@ -54,7 +54,7 @@ mine_update_ca_crt: module.run: - mine.update: [] - onchanges: - - file: pki_public_ca_crt + - x509: pki_public_ca_crt cakeyperms: file.managed: From dd28dc6ddd26a2ee913cf668dca4f8c9e0754007 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Wed, 18 Oct 2023 15:30:32 -0400 Subject: [PATCH 231/417] Add back plugin-tds/ plugin-profinet. Using patched versions for Zeek 6 Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/zeek/defaults.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/zeek/defaults.yaml b/salt/zeek/defaults.yaml index ce22c1aef..2621c2738 100644 --- a/salt/zeek/defaults.yaml +++ b/salt/zeek/defaults.yaml @@ -66,6 +66,8 @@ zeek: - icsnpp-opcua-binary - icsnpp-bsap - icsnpp-s7comm + - zeek-plugin-tds + - zeek-plugin-profinet - zeek-spicy-wireguard - zeek-spicy-stun load-sigs: From ac28e1b967f35c1f3f21424b14036367b627f48c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 15:53:12 -0400 Subject: [PATCH 232/417] verify crt and key differently in checkmine --- salt/salt/engines/master/checkmine.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/salt/engines/master/checkmine.py b/salt/salt/engines/master/checkmine.py index e72d2fbe6..1440fb72f 100644 --- a/salt/salt/engines/master/checkmine.py +++ b/salt/salt/engines/master/checkmine.py @@ -53,17 +53,20 @@ def start(interval=60): ca_crt = x509[minion]['/etc/pki/ca.crt'] log.debug('checkmine engine: found minion %s has ca_crt: %s' % (minion, ca_crt)) # since the cert is defined, make sure it is valid - if not __salt__['x509.verify_private_key'](private_key='/etc/pki/ca.key', public_key='/etc/pki/ca.crt'): + import salt.modules.x509_v2 as x509_v2 + if not x509_v2.verify_private_key('/etc/pki/ca.key', '/etc/pki/ca.crt'): log.error('checkmine engine: found minion %s does\'t have a valid ca_crt in the mine' % (minion)) log.error('checkmine engine: %s: ca_crt: %s' % (minion, ca_crt)) mine_delete(minion, 'x509.get_pem_entries') mine_update(minion) + continue else: log.debug('checkmine engine: found minion %s has a valid ca_crt in the mine' % (minion)) except IndexError: log.error('checkmine engine: found minion %s does\'t have a ca_crt in the mine' % (minion)) mine_delete(minion, 'x509.get_pem_entries') mine_update(minion) + continue # Update the mine if the ip in the mine doesn't match returned from manage.alived network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs') From 767a54c91b2cc698bc03c54c269b1eda3c15640f Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 18 Oct 2023 20:07:26 +0000 Subject: [PATCH 233/417] Add pkgs --- salt/elasticfleet/defaults.yaml | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index a4862623d..b5d1ee166 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -30,18 +30,25 @@ elasticfleet: packages: - apache - auditd + - auth0 - aws - azure - barracuda + - carbonblack_cloud + - carbonblack_edr - cisco_asa + - cisco_duo + - cisco_meraki + - cisco_umbrella - cloudflare - crowdstrike - darktrace - elasticsearch - endpoint - f5_bigip - - fleet_server - fim + - fireeye + - fleet_server - fortinet - fortinet_fortigate - gcp @@ -57,18 +64,24 @@ elasticfleet: - m365_defender - microsoft_defender_endpoint - microsoft_dhcp + - mimecast - netflow - o365 - okta - osquery_manager - panw - pfsense + - pulse_connect_secure - redis - sentinel_one + - snyk - sonicwall_firewall + - sophos + - sophos_central - symantec_endpoint - system - tcp + - tenable_sc - ti_abusech - ti_misp - ti_otx From a52ee063e597b010df8996fd02423598570fd9a6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 16:35:33 -0400 Subject: [PATCH 234/417] use generate_ca and generate_ssl functions and move them up --- setup/so-setup | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/setup/so-setup b/setup/so-setup index 052111591..73ef29f4b 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -718,7 +718,7 @@ if ! [[ -f $install_opt_file ]]; then # this is done before the salt.master state puts the checkmine engine in place and starts # checking for a valid ca in the mine for the manager # the minion ip will already be in the mine from configure_minion function in so-functions - logCmd "salt-call state.apply ca" + generate_ca # this will apply the salt.minion state first since salt.master includes salt.minion logCmd "salt-call state.apply salt.master" # wait here until we get a response from the salt-master since it may have just restarted @@ -727,6 +727,7 @@ if ! [[ -f $install_opt_file ]]; then # this will also call the ssl state since docker requires the intca # the salt-minion service will need to be up on the manager to sign requests + generate_ssl logCmd "salt-call state.apply docker" firewall_generate_templates set_initial_firewall_policy @@ -734,8 +735,6 @@ if ! [[ -f $install_opt_file ]]; then title "Downloading Elastic Agent Artifacts" download_elastic_agent_artifacts - generate_ca - generate_ssl logCmd "salt-call state.apply -l info firewall" # create these so the registry state can add so-registry to /opt/so/conf/so-status/so-status.conf From 28b7a24cc1b1aeedc404a61fc4fe66b50613c49c Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 18 Oct 2023 20:36:04 +0000 Subject: [PATCH 235/417] Add templates for integrations --- salt/elasticsearch/defaults.yaml | 522 +++++++++++++++++++++++++++++++ 1 file changed, 522 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 91e5191f6..1296ef549 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -4398,3 +4398,525 @@ elasticsearch: min_age: 365d actions: delete: {} + so-logs-auth0_x_logs: + index_sorting: False + index_template: + index_patterns: + - "logs-auth0.logs-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-auth0.logs@package" + - "logs-auth0.logs@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-carbonblack_edr_x_log: + index_sorting: False + index_template: + index_patterns: + - "logs-carbonblack_edr.log-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-carbonblack_edr.log@package" + - "logs-carbonblack_edr.log@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-cisco_duo_x_admin: + index_sorting: False + index_template: + index_patterns: + - "logs-cisco_duo.admin-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-cisco_duo.admin@package" + - "logs-cisco_duo.admin@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-cisco_duo_x_auth: + index_sorting: False + index_template: + index_patterns: + - "logs-cisco_duo.auth-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-cisco_duo.auth@package" + - "logs-cisco_duo.auth@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-cisco_duo_x_offline_enrollment: + index_sorting: False + index_template: + index_patterns: + - "logs-cisco_duo.offline_enrollment-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-cisco_duo.offline_enrollment@package" + - "logs-cisco_duo.offline_enrollment@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-cisco_duo_x_summary: + index_sorting: False + index_template: + index_patterns: + - "logs-cisco_duo.summary-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-cisco_duo.summary@package" + - "logs-cisco_duo.summary@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-cisco_duo_x_telephony: + index_sorting: False + index_template: + index_patterns: + - "logs-cisco_duo.telephony-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-cisco_duo.telephony@package" + - "logs-cisco_duo.telephony@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-cisco_meraki_x_events: + index_sorting: False + index_template: + index_patterns: + - "logs-cisco_meraki.events-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-cisco_meraki.events@package" + - "logs-cisco_meraki.events@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-cisco_meraki_x_log: + index_sorting: False + index_template: + index_patterns: + - "logs-cisco_meraki.log-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-cisco_meraki.log@package" + - "logs-cisco_meraki.log@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-cisco_umbrella_x_log: + index_sorting: False + index_template: + index_patterns: + - "logs-cisco_umbrella.log-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-cisco_umbrella.log@package" + - "logs-cisco_umbrella.log@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-fireeye_x_nx: + index_sorting: False + index_template: + index_patterns: + - "logs-fireeye.nx-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-fireeye.nx@package" + - "logs-fireeye.nx@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-mimecast_x_audit_events: + index_sorting: False + index_template: + index_patterns: + - "logs-mimecast.audit_events-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-mimecast.audit_events@package" + - "logs-mimecast.audit_events@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-mimecast_x_dlp_logs: + index_sorting: False + index_template: + index_patterns: + - "logs-mimecast.dlp_logs-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-mimecast.dlp_logs@package" + - "logs-mimecast.dlp_logs@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-mimecast_x_siem_logs: + index_sorting: False + index_template: + index_patterns: + - "logs-mimecast.siem_logs-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-mimecast.siem_logs@package" + - "logs-mimecast.siem_logs@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-mimecast_x_threat_intel_malware_customer: + index_sorting: False + index_template: + index_patterns: + - "logs-mimecast.threat_intel_malware_customer-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-mimecast.threat_intel_malware_customer@package" + - "logs-mimecast.threat_intel_malware_customer@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-mimecast_x_threat_intel_malware_grid: + index_sorting: False + index_template: + index_patterns: + - "logs-mimecast.threat_intel_malware_grid-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-mimecast.threat_intel_malware_grid@package" + - "logs-mimecast.threat_intel_malware_grid@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-mimecast_x_ttp_ap_logs: + index_sorting: False + index_template: + index_patterns: + - "logs-mimecast.ttp_ap_logs-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-mimecast.ttp_ap_logs@package" + - "logs-mimecast.ttp_ap_logs@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-mimecast_x_ttp_ip_logs: + index_sorting: False + index_template: + index_patterns: + - "logs-mimecast.ttp_ip_logs-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-mimecast.ttp_ip_logs@package" + - "logs-mimecast.ttp_ip_logs@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-mimecast_x_ttp_url_logs: + index_sorting: False + index_template: + index_patterns: + - "logs-mimecast.ttp_url_logs-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-mimecast.ttp_url_logs@package" + - "logs-mimecast.ttp_url_logs@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-pulse_connect_secure_x_log: + index_sorting: False + index_template: + index_patterns: + - "logs-pulse_connect_secure.log-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-pulse_connect_secure.log@package" + - "logs-pulse_connect_secure.log@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-snyk_x_audit: + index_sorting: False + index_template: + index_patterns: + - "logs-snyk.audit-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-snyk.audit@package" + - "logs-snyk.audit@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-snyk_x_vulnerabilities: + index_sorting: False + index_template: + index_patterns: + - "logs-snyk.vulnerabilities-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-snyk.vulnerabilities@package" + - "logs-snyk.vulnerabilities@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-sophos_x_utm: + index_sorting: False + index_template: + index_patterns: + - "logs-sophos.utm-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-sophos.utm@package" + - "logs-sophos.utm@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-sophos_x_xg: + index_sorting: False + index_template: + index_patterns: + - "logs-sophos.xg-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-sophos.xg@package" + - "logs-sophos.xg@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-sophos_central_x_alert: + index_sorting: False + index_template: + index_patterns: + - "logs-sophos_central.alert-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-sophos_central.alert@package" + - "logs-sophos_central.alert@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-sophos_central_x_event: + index_sorting: False + index_template: + index_patterns: + - "logs-sophos_central.event-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-sophos_central.event@package" + - "logs-sophos_central.event@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-tenable_sc_x_asset: + index_sorting: False + index_template: + index_patterns: + - "logs-tenable_sc.asset-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-tenable_sc.asset@package" + - "logs-tenable_sc.asset@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-tenable_sc_x_plugin: + index_sorting: False + index_template: + index_patterns: + - "logs-tenable_sc.plugin-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-tenable_sc.plugin@package" + - "logs-tenable_sc.plugin@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + so-logs-tenable_sc_x_vulnerability: + index_sorting: False + index_template: + index_patterns: + - "logs-tenable_sc.vulnerability-*" + template: + settings: + index: + number_of_replicas: 0 + composed_of: + - "logs-tenable_sc.vulnerability@package" + - "logs-tenable_sc.vulnerability@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false From c135f886a906cfd192b9625e9cba8ea2262fb644 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 18 Oct 2023 20:41:34 +0000 Subject: [PATCH 236/417] Remove Carbon Black Cloud integration --- salt/elasticfleet/defaults.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index b5d1ee166..a17957e7c 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -34,7 +34,6 @@ elasticfleet: - aws - azure - barracuda - - carbonblack_cloud - carbonblack_edr - cisco_asa - cisco_duo From 9eb682bc40fca2463275e43e2db37a7a52978216 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 18:37:35 -0400 Subject: [PATCH 237/417] generate_ca after salt-master and salt-minion states run --- salt/common/tools/sbin/so-common | 2 +- setup/so-functions | 10 ++++++++++ setup/so-setup | 9 +++------ 3 files changed, 14 insertions(+), 7 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index c19d51a42..be001f0d7 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -153,7 +153,7 @@ check_salt_master_status() { } check_salt_minion_status() { - local timeout=$1 + local timeout="${1:-5}" echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 local status=$? diff --git a/setup/so-functions b/setup/so-functions index 68fd01550..7ead07ca7 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -2495,6 +2495,16 @@ wait_for_file() { wait_for_salt_minion() { retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup + local attempt=0 + # each attempts would take about 15 seconds + local maxAttempts=20 + until check_salt_minion_status; do + attempt=$((attempt+1)) + if [[ $attempt -gt $maxAttempts ]]; then + fail_setup + fi + sleep 10 + done } verify_setup() { diff --git a/setup/so-setup b/setup/so-setup index 73ef29f4b..543ac0156 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -714,17 +714,14 @@ if ! [[ -f $install_opt_file ]]; then logCmd "salt-call state.apply common.packages" logCmd "salt-call state.apply common" - # apply the ca state to create the ca and put it in the mine early in the install - # this is done before the salt.master state puts the checkmine engine in place and starts - # checking for a valid ca in the mine for the manager - # the minion ip will already be in the mine from configure_minion function in so-functions - generate_ca # this will apply the salt.minion state first since salt.master includes salt.minion logCmd "salt-call state.apply salt.master" # wait here until we get a response from the salt-master since it may have just restarted # exit setup after 5-6 minutes of trying check_salt_master_status || fail "Can't access salt master or it is not ready" - + # apply the ca state to create the ca and put it in the mine early in the install + # the minion ip will already be in the mine from configure_minion function in so-functions + generate_ca # this will also call the ssl state since docker requires the intca # the salt-minion service will need to be up on the manager to sign requests generate_ssl From ff18b1f074c8448b9503e04acf7b6df41240fde5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 18:45:14 -0400 Subject: [PATCH 238/417] remove redirect --- salt/common/tools/sbin/so-common | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index be001f0d7..1a5e5df6c 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -154,13 +154,13 @@ check_salt_master_status() { check_salt_minion_status() { local timeout="${1:-5}" - echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 + echo "Checking if the salt minion will respond to jobs" salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 local status=$? if [ $status -gt 0 ]; then - echo " Minion did not respond" >> "$setup_log" 2>&1 + echo " Minion did not respond" else - echo " Received job response from salt minion" >> "$setup_log" 2>&1 + echo " Received job response from salt minion" fi return $status From f30a652e19ea9c363f5bf4bba682153c2f56fc5b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 19:31:45 -0400 Subject: [PATCH 239/417] add back redirects --- salt/common/tools/sbin/so-common | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 1a5e5df6c..dce86362f 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -154,13 +154,13 @@ check_salt_master_status() { check_salt_minion_status() { local timeout="${1:-5}" - echo "Checking if the salt minion will respond to jobs" + echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 local status=$? if [ $status -gt 0 ]; then - echo " Minion did not respond" + echo " Minion did not respond" >> "$setup_log" 2>&1 else - echo " Received job response from salt minion" + echo " Received job response from salt minion" >> "$setup_log" 2>&1 fi return $status From b2bb92d41368f93db9122ffcab5ae761e33f6669 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 18 Oct 2023 19:38:19 -0400 Subject: [PATCH 240/417] remove extra space --- salt/common/tools/sbin/so-common | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index dce86362f..be001f0d7 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -154,7 +154,7 @@ check_salt_master_status() { check_salt_minion_status() { local timeout="${1:-5}" - echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 + echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 local status=$? if [ $status -gt 0 ]; then From c5610edd835ff7787acf778c765c60cf3789aa91 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 19 Oct 2023 11:12:20 -0400 Subject: [PATCH 241/417] handle salt for r9 and c9 --- salt/manager/tools/sbin/soup | 16 ++++++++++++---- salt/salt/scripts/bootstrap-salt.sh | 7 ------- setup/so-functions | 3 ++- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 5768500c2..28ac29497 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -620,6 +620,7 @@ upgrade_check_salt() { if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then echo "You are already running the correct version of Salt for Security Onion." else + echo "Salt needs to be upgraded to $NEWSALTVERSION." UPGRADESALT=1 fi } @@ -629,16 +630,23 @@ upgrade_salt() { echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "" # If Oracle Linux - if [[ $OS == 'oel' ]]; then + if [[ $OS == 'oel' || $OS == 'centos' || $OS == 'rocky' ||]]; then echo "Removing yum versionlock for Salt." echo "" yum versionlock delete "salt-*" echo "Updating Salt packages." echo "" set +e - run_check_net_err \ - "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \ - "Could not update salt, please check $SOUP_LOG for details." + if [[ $OS == 'oel' ]]; then + run_check_net_err \ + "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \ + "Could not update salt, please check $SOUP_LOG for details." + # if rocky or centos we want to run without -r to allow the bootstrap script to manage repos + else + run_check_net_err \ + "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \ + "Could not update salt, please check $SOUP_LOG for details." + fi set -e echo "Applying yum versionlock for Salt." echo "" diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh index 156489f4e..a016524e6 100644 --- a/salt/salt/scripts/bootstrap-salt.sh +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -617,13 +617,6 @@ if [ "$(echo "$ITYPE" | grep stable)" = "" ]; then exit 1 fi -# We want to require this script to only run with -r. We dont want to accidentally try to install from another repo -# and we dont want to put salt.repo in /etc/yum.repos.d/ -if [ "$_DISABLE_REPOS" -eq $BS_FALSE ];then - echoerror "This script has been modified to required the usage of the -r flag which disables this script from using its own repos..." - exit 1 -fi - # If doing a git install, check what branch/tag/sha will be checked out if [ "$ITYPE" = "git" ]; then if [ "$#" -eq 0 ];then diff --git a/setup/so-functions b/setup/so-functions index 7ead07ca7..65f21fa20 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1972,6 +1972,7 @@ securityonion_repo() { } repo_sync_local() { + SALTVERSION=$(egrep 'version: [0-9]{4}' ../salt/salt/master.defaults.yaml | sed 's/^.*version: //') info "Repo Sync" if [[ $is_supported ]]; then # Sync the repo from the the SO repo locally. @@ -2021,7 +2022,7 @@ repo_sync_local() { curl -fsSL https://repo.securityonion.net/file/so-repo/prod/2.4/so/so.repo | tee /etc/yum.repos.d/so.repo rpm --import https://repo.saltproject.io/salt/py3/redhat/9/x86_64/SALT-PROJECT-GPG-PUBKEY-2023.pub dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo - curl -fsSL https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/3006.1.repo | tee /etc/yum.repos.d/salt.repo + curl -fsSL "https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/$SALTVERSION.repo" | tee /etc/yum.repos.d/salt.repo dnf repolist curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install else From 13a5c8baa7a08ecbed21a5d3562c8e0fad540872 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 19 Oct 2023 11:19:51 -0400 Subject: [PATCH 242/417] remove extra || --- salt/manager/tools/sbin/soup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 28ac29497..5c7033a2f 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -630,7 +630,7 @@ upgrade_salt() { echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "" # If Oracle Linux - if [[ $OS == 'oel' || $OS == 'centos' || $OS == 'rocky' ||]]; then + if [[ $OS == 'oel' || $OS == 'centos' || $OS == 'rocky' ]]; then echo "Removing yum versionlock for Salt." echo "" yum versionlock delete "salt-*" From e3830fa286f9a0b791c1e9ea7789fbc1ba424e1c Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 19 Oct 2023 13:43:03 -0400 Subject: [PATCH 243/417] all more os to set_os in so-common --- salt/common/tools/sbin/so-common | 44 +++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index fc14e9d0a..a7aace61a 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -452,19 +452,51 @@ set_os() { OS=rocky OSVER=9 is_rocky=true + is_rpm=true elif grep -q "CentOS Stream release 9" /etc/redhat-release; then OS=centos OSVER=9 is_centos=true - elif grep -q "Oracle Linux Server release 9" /etc/system-release; then - OS=oel + is_rpm=true + elif grep -q "AlmaLinux release 9" /etc/redhat-release; then + OS=alma OSVER=9 - is_oracle=true + is_alma=true + is_rpm=true + elif grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release; then + if [ -f /etc/oracle-release ]; then + OS=oracle + OSVER=9 + is_oracle=true + is_rpm=true + else + OS=rhel + OSVER=9 + is_rhel=true + is_rpm=true + fi fi cron_service_name="crond" - else - OS=ubuntu - is_ubuntu=true + elif [ -f /etc/os-release ]; then + if grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then + OSVER=focal + UBVER=20.04 + OS=ubuntu + is_ubuntu=true + is_deb=true + elif grep -q "UBUNTU_CODENAME=jammy" /etc/os-release; then + OSVER=jammy + UBVER=22.04 + OS=ubuntu + is_ubuntu=true + is_deb=true + elif grep -q "VERSION_CODENAME=bookworm" /etc/os-release; then + OSVER=bookworm + DEBVER=12 + is_debian=true + OS=debian + is_deb=true + fi cron_service_name="cron" fi } From 84f8e1cc926f8ac495d312c7b0c3577ba378fba5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 19 Oct 2023 13:46:07 -0400 Subject: [PATCH 244/417] debian family upgrade salt without -r flag --- salt/salt/map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 1120685fb..131ff46ca 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -23,7 +23,7 @@ {% if grains.os|lower in ['Rocky', 'redhat', 'CentOS Stream'] %} {% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} {% elif grains.os_family|lower == 'debian' %} - {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} + {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F -x python3 stable ' ~ SALTVERSION %} {% endif %} {% else %} {% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %} From 90bde94371713196d473062f01b2c9a8089608d9 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 19 Oct 2023 13:46:48 -0400 Subject: [PATCH 245/417] handle debian family salt upgrade for soup --- salt/manager/tools/sbin/soup | 31 +++++++++++++++++++++++++------ 1 file changed, 25 insertions(+), 6 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 5c7033a2f..0ec3b92f9 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -629,19 +629,20 @@ upgrade_salt() { SALTUPGRADED=True echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "" - # If Oracle Linux - if [[ $OS == 'oel' || $OS == 'centos' || $OS == 'rocky' ]]; then + # If rhel family + if [[ $is_rpm ]]; then echo "Removing yum versionlock for Salt." echo "" yum versionlock delete "salt-*" echo "Updating Salt packages." echo "" set +e - if [[ $OS == 'oel' ]]; then + # if oracle run with -r to ignore repos set by bootstrap + if [[ $OS == 'oracle' ]]; then run_check_net_err \ "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \ "Could not update salt, please check $SOUP_LOG for details." - # if rocky or centos we want to run without -r to allow the bootstrap script to manage repos + # if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos else run_check_net_err \ "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \ @@ -652,6 +653,24 @@ upgrade_salt() { echo "" yum versionlock add "salt-*" # Else do Ubuntu things + elif [[ $is_deb ]]; then + echo "Removing apt hold for Salt." + echo "" + apt-mark unhold "salt-common" + apt-mark unhold "salt-master" + apt-mark unhold "salt-minion" + echo "Updating Salt packages." + echo "" + set +e + run_check_net_err \ + "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \ + "Could not update salt, please check $SOUP_LOG for details." + set -e + echo "Applying apt hold for Salt." + echo "" + apt-mark hold "salt-common" + apt-mark hold "salt-master" + apt-mark hold "salt-minion" fi echo "Checking if Salt was upgraded." @@ -663,7 +682,7 @@ upgrade_salt() { echo "Once the issue is resolved, run soup again." echo "Exiting." echo "" - exit 0 + exit 1 else echo "Salt upgrade success." echo "" @@ -793,7 +812,7 @@ main() { if [[ $is_airgap -eq 0 ]]; then yum clean all check_os_updates - elif [[ $OS == 'oel' ]]; then + elif [[ $OS == 'oracle' ]]; then # sync remote repo down to local if not airgap repo_sync check_os_updates From 66ee074795d23617f105e2863b60a87c355c767a Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 19 Oct 2023 15:57:24 -0400 Subject: [PATCH 246/417] add wait_for_salt_minion to so-common --- salt/common/tools/sbin/so-common | 31 ++++++++++++++++++++++++++----- 1 file changed, 26 insertions(+), 5 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index a7aace61a..a2ac96ece 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -152,15 +152,18 @@ check_salt_master_status() { return 0 } +# this is only intended to be used to check the status of the minion check_salt_minion_status() { - local timeout="${1:-5}" - echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 - salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 + local minion="$1" + local timeout="${2:-5}" + local logfile="${3:-'/dev/stdout'}" + echo "Checking if the salt minion will respond to jobs" >> "$logfile" 2>&1 + salt "$minion" test.ping -t $timeout > /dev/null 2>&1 local status=$? if [ $status -gt 0 ]; then - echo " Minion did not respond" >> "$setup_log" 2>&1 + echo " Minion did not respond" >> "$logfile" 2>&1 else - echo " Received job response from salt minion" >> "$setup_log" 2>&1 + echo " Received job response from salt minion" >> "$logfile" 2>&1 fi return $status @@ -440,6 +443,24 @@ run_check_net_err() { fi } +wait_for_salt_minion() { + local minion="$1" + local timeout="${2:-5}" + local logfile="${3:-'/dev/stdout'}" + retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail + local attempt=0 + # each attempts would take about 15 seconds + local maxAttempts=20 + until check_salt_minion_status "$minion" "$timeout" "$logfile"; do + attempt=$((attempt+1)) + if [[ $attempt -gt $maxAttempts ]]; then + return 1 + fi + sleep 10 + done + return 0 +} + salt_minion_count() { local MINIONDIR="/opt/so/saltstack/local/pillar/minions" MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep -v adv_ | wc -l) From 37e803917e7b764eda604dff6ef62bb96bd72bdf Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 19 Oct 2023 15:58:10 -0400 Subject: [PATCH 247/417] have soup wait_for_salt_minion() before running any highstate --- salt/manager/tools/sbin/soup | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0ec3b92f9..9e7948d9e 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -829,7 +829,8 @@ main() { echo "Hotfix applied" update_version enable_highstate - salt-call state.highstate -l info queue=True + wait_for_salt_minion "$MINIONID" "5" "$SOUP_LOG" || fail "Salt minion was not running or ready." + highstate else echo "" echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION." @@ -927,7 +928,8 @@ main() { echo "" echo "Running a highstate. This could take several minutes." set +e - salt-call state.highstate -l info queue=True + wait_for_salt_minion "$MINIONID" "5" "$SOUP_LOG" || fail "Salt minion was not running or ready." + highstate set -e stop_salt_master @@ -942,7 +944,8 @@ main() { set -e echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." - salt-call state.highstate -l info queue=True + wait_for_salt_minion "$MINIONID" "5" "$SOUP_LOG" || fail "Salt minion was not running or ready." + highstate postupgrade_changes [[ $is_airgap -eq 0 ]] && unmount_update From dfcbbfd157ce996af14518f09b581514fbd478b6 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 19 Oct 2023 15:58:50 -0400 Subject: [PATCH 248/417] update call to wait_for_salt_minion with new options in so-functions --- setup/so-functions | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 65f21fa20..546904d03 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1258,7 +1258,7 @@ generate_ssl() { # if the install type is a manager then we need to wait for the minion to be ready before trying # to run the ssl state since we need the minion to sign the certs if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then - wait_for_salt_minion + wait_for_salt_minion "$MINION_ID" "5" "$setup_log" || fail_setup fi info "Applying SSL state" logCmd "salt-call state.apply ssl -l info" @@ -2494,20 +2494,6 @@ wait_for_file() { return 1 } -wait_for_salt_minion() { - retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup - local attempt=0 - # each attempts would take about 15 seconds - local maxAttempts=20 - until check_salt_minion_status; do - attempt=$((attempt+1)) - if [[ $attempt -gt $maxAttempts ]]; then - fail_setup - fi - sleep 10 - done -} - verify_setup() { info "Verifying setup" set -o pipefail From dbfccdfff814c2cfc9d98ed0ab7efaba918c7bb4 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 19 Oct 2023 16:53:03 -0400 Subject: [PATCH 249/417] fix logging when using wait_for_minion --- salt/manager/tools/sbin/soup | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 9e7948d9e..48c3e543d 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -829,7 +829,7 @@ main() { echo "Hotfix applied" update_version enable_highstate - wait_for_salt_minion "$MINIONID" "5" "$SOUP_LOG" || fail "Salt minion was not running or ready." + wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG" || fail "Salt minion was not running or ready." highstate else echo "" @@ -928,7 +928,7 @@ main() { echo "" echo "Running a highstate. This could take several minutes." set +e - wait_for_salt_minion "$MINIONID" "5" "$SOUP_LOG" || fail "Salt minion was not running or ready." + wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG" || fail "Salt minion was not running or ready." highstate set -e @@ -944,7 +944,7 @@ main() { set -e echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." - wait_for_salt_minion "$MINIONID" "5" "$SOUP_LOG" || fail "Salt minion was not running or ready." + wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG" || fail "Salt minion was not running or ready." highstate postupgrade_changes [[ $is_airgap -eq 0 ]] && unmount_update From 2e16250c93d000c20e451ed0e0b539e6c8f84bfa Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 20 Oct 2023 10:00:39 -0400 Subject: [PATCH 250/417] handle a minion not being in the mine data return --- salt/salt/engines/master/checkmine.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/salt/salt/engines/master/checkmine.py b/salt/salt/engines/master/checkmine.py index 1440fb72f..afeb92536 100644 --- a/salt/salt/engines/master/checkmine.py +++ b/salt/salt/engines/master/checkmine.py @@ -67,6 +67,10 @@ def start(interval=60): mine_delete(minion, 'x509.get_pem_entries') mine_update(minion) continue + except KeyError: + log.error('checkmine engine: found minion %s is not in the mine' % (minion)) + mine_flush(minion) + mine_update(minion) # Update the mine if the ip in the mine doesn't match returned from manage.alived network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs') @@ -77,5 +81,8 @@ def start(interval=60): log.error('checkmine engine: found minion %s does\'t have a mine_ip' % (minion)) mine_delete(minion, 'network.ip_addrs') mine_update(minion) - + except KeyError: + log.error('checkmine engine: found minion %s is not in the mine' % (minion)) + mine_flush(minion) + mine_update(minion) sleep(interval) From ef2b89f5bf37c304ab431aa0fbeb21b3f8a3ecd7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 20 Oct 2023 13:40:40 -0400 Subject: [PATCH 251/417] fix attempts logic --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index 7ead07ca7..1c9b0f43d 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -2500,7 +2500,7 @@ wait_for_salt_minion() { local maxAttempts=20 until check_salt_minion_status; do attempt=$((attempt+1)) - if [[ $attempt -gt $maxAttempts ]]; then + if [[ $attempt -eq $maxAttempts ]]; then fail_setup fi sleep 10 From 99662c999fc5b437dc9eba8061a4c874e1692866 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 20 Oct 2023 13:41:24 -0400 Subject: [PATCH 252/417] log operation and minion target --- salt/soc/files/bin/salt-relay.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/soc/files/bin/salt-relay.sh b/salt/soc/files/bin/salt-relay.sh index ac56ec6e3..fea81728d 100755 --- a/salt/soc/files/bin/salt-relay.sh +++ b/salt/soc/files/bin/salt-relay.sh @@ -67,10 +67,10 @@ function manage_minion() { response=$(so-minion "-o=$op" "-m=$minion_id") exit_code=$? if [[ exit_code -eq 0 ]]; then - log "Successful command execution" + log "Successful '$op' command execution on $minion_id" respond "$id" "true" else - log "Unsuccessful command execution: $response ($exit_code)" + log "Unsuccessful '$op' command execution on $minion_id: $response ($exit_code)" respond "$id" "false" fi } From 6d77b1e4c389a90ae475e2e523e302572fdf244b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 20 Oct 2023 13:41:53 -0400 Subject: [PATCH 253/417] continue loop if minion not in mine --- salt/salt/engines/master/checkmine.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/salt/engines/master/checkmine.py b/salt/salt/engines/master/checkmine.py index afeb92536..f33392575 100644 --- a/salt/salt/engines/master/checkmine.py +++ b/salt/salt/engines/master/checkmine.py @@ -71,6 +71,7 @@ def start(interval=60): log.error('checkmine engine: found minion %s is not in the mine' % (minion)) mine_flush(minion) mine_update(minion) + continue # Update the mine if the ip in the mine doesn't match returned from manage.alived network_ip_addrs = __salt__['saltutil.runner']('mine.get', tgt=minion, fun='network.ip_addrs') @@ -85,4 +86,5 @@ def start(interval=60): log.error('checkmine engine: found minion %s is not in the mine' % (minion)) mine_flush(minion) mine_update(minion) + sleep(interval) From c588bf4395aa3d6734c2787695a9b786ceb69d30 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 20 Oct 2023 13:43:12 -0400 Subject: [PATCH 254/417] update mine and highstate minion when added --- salt/manager/tools/sbin/so-minion | 35 ++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 64084dbd0..325bedf4a 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -407,11 +407,9 @@ function update_logstash_outputs() { } function updateMine() { - salt "$MINION_ID" mine.send network.ip_addrs interface="$MNIC" -} -function apply_ES_state() { - salt-call state.apply elasticsearch concurrent=True + retry 10 2 "salt '$MINION_ID' mine.update" True } + function createEVAL() { is_pcaplimit=true add_elasticsearch_to_minion @@ -547,8 +545,6 @@ function createSEARCHNODE() { add_elasticsearch_to_minion add_logstash_to_minion add_telegraf_to_minion - updateMine - apply_ES_state } function createRECEIVER() { @@ -563,6 +559,19 @@ function createDESKTOP() { } function testConnection() { + # the minion should be trying to auth every 10 seconds so 15 seconds should be more than enough time to see this in the log + # this retry was put in because it is possible that a minion is attempted to be pinged before it has authenticated and connected to the Salt master + # causing the first ping to fail and typically wouldn't be successful until the second ping + # this check may pass without the minion being authenticated if it was previously connected and the line exists in the log + retry 15 1 "grep 'Authentication accepted from $MINION_ID' /opt/so/log/salt/master" + local retauth=$? + if [[ $retauth != 0 ]]; then + echo "The Minion did not authenticate with the Salt master in the allotted time" + echo "Deleting the key" + deleteminion + exit 1 + fi + retry 15 3 "salt '$MINION_ID' test.ping" True local ret=$? if [[ $ret != 0 ]]; then @@ -582,9 +591,9 @@ if [[ "$OPERATION" = 'delete' ]]; then deleteminion fi -if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then +if [[ "$OPERATION" == 'add' || "$OPERATION" == 'setup' ]]; then # Skip this if its setup - if [ $OPERATION != 'setup' ]; then + if [[ $OPERATION == 'add' ]]; then # Accept the salt key acceptminion # Test to see if the minion was accepted @@ -605,8 +614,18 @@ if [[ "$OPERATION" = 'add' || "$OPERATION" = 'setup' ]]; then else add_sensoroni_to_minion fi + create$NODETYPE echo "Minion file created for $MINION_ID" + + if [[ "$OPERATION" == 'add' ]]; then + # tell the minion to populate the mine with data from mine_functions which is populated during setup + # this only needs to happen on non managers since they handle this during setup + # and they need to wait for ca creation to update the mine + updateMine + # run this async so the cli doesn't wait for a return + salt "$MINION_ID" state.highstate --async + fi fi if [[ "$OPERATION" = 'test' ]]; then From c4093394464bded7ad6ce2b76720a97e823179d3 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 20 Oct 2023 13:46:24 -0400 Subject: [PATCH 255/417] change post setup highstate cron to 5 minutes since accepting minion runs a highstate --- salt/setup/highstate_cron.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/setup/highstate_cron.sls b/salt/setup/highstate_cron.sls index 862968d97..f8f76e737 100644 --- a/salt/setup/highstate_cron.sls +++ b/salt/setup/highstate_cron.sls @@ -3,5 +3,5 @@ post_setup_cron: - name: 'PATH=$PATH:/usr/sbin salt-call state.highstate' - identifier: post_setup_cron - user: root - - minute: '*/1' + - minute: '*/5' - identifier: post_setup_cron From 7e3aa11a7341c6324b13dbf8367fe21ae80c8870 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 20 Oct 2023 16:27:20 -0400 Subject: [PATCH 256/417] check mine is populated with ip before telling node to highstate --- salt/manager/tools/sbin/so-minion | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index 325bedf4a..f92c0ff55 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -406,8 +406,14 @@ function update_logstash_outputs() { curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" } +function checkMine() { + func = $1 + retry 20 1 "salt-run '$MINION_ID' mine.get '$func'" "$MINION_ID" + +} + function updateMine() { - retry 10 2 "salt '$MINION_ID' mine.update" True + retry 20 1 "salt '$MINION_ID' mine.update" True } function createEVAL() { @@ -623,6 +629,7 @@ if [[ "$OPERATION" == 'add' || "$OPERATION" == 'setup' ]]; then # this only needs to happen on non managers since they handle this during setup # and they need to wait for ca creation to update the mine updateMine + checkMine "network.ip_addrs" # run this async so the cli doesn't wait for a return salt "$MINION_ID" state.highstate --async fi From dc3ca99c12e56c756cbee4998b962dd4410c8004 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 20 Oct 2023 17:16:33 -0400 Subject: [PATCH 257/417] ask the minion if it can see itself in the mine --- salt/manager/tools/sbin/so-minion | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index f92c0ff55..eca96da5c 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -407,8 +407,9 @@ function update_logstash_outputs() { } function checkMine() { - func = $1 - retry 20 1 "salt-run '$MINION_ID' mine.get '$func'" "$MINION_ID" + local func=$1 + # make sure the minion sees itself in the mine since it needs to see itself for states as opposed to using salt-run + retry 20 1 "salt '$MINION_ID' mine.get '\*' '$func'" "$MINION_ID" } From 030a667d260ae59e849b8c59814c9f371580e458 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Mon, 23 Oct 2023 11:47:14 -0400 Subject: [PATCH 258/417] Add -watch to soctopus saltstate for file SOCtopus.conf. Makes container restart @ highstate if file is updated. Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/soctopus/enabled.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/soctopus/enabled.sls b/salt/soctopus/enabled.sls index 0474998cb..567562fbb 100644 --- a/salt/soctopus/enabled.sls +++ b/salt/soctopus/enabled.sls @@ -52,6 +52,8 @@ so-soctopus: - {{ XTRAENV }} {% endfor %} {% endif %} + - watch: + - file: /opt/so/conf/soctopus/SOCtopus.conf - require: - file: soctopusconf - file: navigatordefaultlayer From 660020cc760f2a80b6175922718e35be134d5084 Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 23 Oct 2023 15:45:41 -0400 Subject: [PATCH 259/417] Parse pkt_src for Suricata logs --- salt/elasticsearch/files/ingest/suricata.common | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/files/ingest/suricata.common b/salt/elasticsearch/files/ingest/suricata.common index e12fea0be..6b6a03a60 100644 --- a/salt/elasticsearch/files/ingest/suricata.common +++ b/salt/elasticsearch/files/ingest/suricata.common @@ -2,6 +2,7 @@ "description" : "suricata.common", "processors" : [ { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, + { "rename": { "field": "message2.pkt_src", "target_field": "network.packet_source","ignore_failure": true } }, { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_failure": true } }, { "rename": { "field": "message2.flow_id", "target_field": "log.id.uid", "ignore_failure": true } }, { "rename": { "field": "message2.src_ip", "target_field": "source.ip", "ignore_failure": true } }, From 4a3fc06a4d8119f1b241e3e8ce61f3794559c71f Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 24 Oct 2023 09:18:10 -0400 Subject: [PATCH 260/417] Enable http2 for Suricata --- salt/suricata/defaults.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/suricata/defaults.yaml b/salt/suricata/defaults.yaml index 050efa8f8..e9e39d40a 100644 --- a/salt/suricata/defaults.yaml +++ b/salt/suricata/defaults.yaml @@ -280,7 +280,7 @@ suricata: mqtt: enabled: 'no' http2: - enabled: 'no' + enabled: 'yes' asn1-max-frames: 256 run-as: user: suricata From 6d3465626eaa75f2fb341b7f03b4c4c1aa280dde Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 24 Oct 2023 12:52:25 -0400 Subject: [PATCH 261/417] if deb fam, stop salt-master and salt-minion after salt upgrade --- salt/common/tools/sbin/so-common | 2 +- salt/manager/tools/sbin/soup | 10 ++++++++++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index a2ac96ece..87f40c9d4 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -453,7 +453,7 @@ wait_for_salt_minion() { local maxAttempts=20 until check_salt_minion_status "$minion" "$timeout" "$logfile"; do attempt=$((attempt+1)) - if [[ $attempt -gt $maxAttempts ]]; then + if [[ $attempt -eq $maxAttempts ]]; then return 1 fi sleep 10 diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 48c3e543d..e3c3b15a7 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -866,6 +866,16 @@ main() { echo "Upgrading Salt" # Update the repo files so it can actually upgrade upgrade_salt + + # for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt + # * WARN: Not starting daemons on Debian based distributions + # is not working mostly because starting them is the default behaviour. + if [[ $is_deb ]]; then + systemctl status salt-master + systemctl status salt-minion + stop_salt_master + stop_salt_minion + fi fi preupgrade_changes From 180ba3a9583da95f4ba69ff5eec6991c68554ee8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 24 Oct 2023 13:24:52 -0400 Subject: [PATCH 262/417] if deb fam, stop salt-master and salt-minion after salt upgrade --- salt/manager/tools/sbin/soup | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index e3c3b15a7..58011c379 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -871,10 +871,8 @@ main() { # * WARN: Not starting daemons on Debian based distributions # is not working mostly because starting them is the default behaviour. if [[ $is_deb ]]; then - systemctl status salt-master - systemctl status salt-minion - stop_salt_master stop_salt_minion + stop_salt_master fi fi From 310a6b4f27a02e8a748966ac345cccc2498da3d9 Mon Sep 17 00:00:00 2001 From: defensivedepth Date: Tue, 24 Oct 2023 14:21:01 -0400 Subject: [PATCH 263/417] Add kibana curl config --- salt/common/init.sls | 1 + salt/kibana/files/curl.config.template | 1 + salt/manager/init.sls | 1 + salt/manager/kibana.sls | 7 +++++++ 4 files changed, 10 insertions(+) create mode 100644 salt/kibana/files/curl.config.template create mode 100644 salt/manager/kibana.sls diff --git a/salt/common/init.sls b/salt/common/init.sls index f50f0c61b..8c0089fc0 100644 --- a/salt/common/init.sls +++ b/salt/common/init.sls @@ -8,6 +8,7 @@ include: - common.packages {% if GLOBALS.role in GLOBALS.manager_roles %} - manager.elasticsearch # needed for elastic_curl_config state + - manager.kibana {% endif %} net.core.wmem_default: diff --git a/salt/kibana/files/curl.config.template b/salt/kibana/files/curl.config.template new file mode 100644 index 000000000..7ac434017 --- /dev/null +++ b/salt/kibana/files/curl.config.template @@ -0,0 +1 @@ +user = "{{ salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:user', 'NO_USER_SET') }}:{{ salt['pillar.get']('elasticsearch:auth:users:so_kibana_user:pass', 'NO_PW_SET') }}" diff --git a/salt/manager/init.sls b/salt/manager/init.sls index e808325ef..cbe3455fe 100644 --- a/salt/manager/init.sls +++ b/salt/manager/init.sls @@ -16,6 +16,7 @@ include: - kibana.secrets - manager.sync_es_users - manager.elasticsearch + - manager.kibana repo_log_dir: file.directory: diff --git a/salt/manager/kibana.sls b/salt/manager/kibana.sls new file mode 100644 index 000000000..eb2b968d6 --- /dev/null +++ b/salt/manager/kibana.sls @@ -0,0 +1,7 @@ +elastic_curl_config_distributed: + file.managed: + - name: /opt/so/conf/kibana/curl.config + - source: salt://kibana/files/curl.config.template + - template: jinja + - mode: 600 + - show_changes: False From 1d6e32fbab8724f013044a50c427db5489253c8e Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 24 Oct 2023 15:08:50 -0400 Subject: [PATCH 264/417] dont exit if salt isnt running --- salt/manager/tools/sbin/soup | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 58011c379..fcf8dd187 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -460,7 +460,6 @@ stop_salt_master() { echo "" echo "Killing any queued Salt jobs on the manager." pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1 - set -e echo "" echo "Storing salt-master pid." @@ -468,6 +467,7 @@ stop_salt_master() { echo "Found salt-master PID $MASTERPID" systemctl_func "stop" "salt-master" timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option." + set -e } stop_salt_minion() { @@ -480,14 +480,12 @@ stop_salt_minion() { echo "" echo "Killing Salt jobs on this node." salt-call saltutil.kill_all_jobs --local - set -e echo "Storing salt-minion pid." MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1) echo "Found salt-minion PID $MINIONPID" systemctl_func "stop" "salt-minion" - set +e timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion set -e } From 3e343bff84ec0cc60682e08135dee528e990e5f8 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 24 Oct 2023 16:40:51 -0400 Subject: [PATCH 265/417] fix line to log properly --- salt/manager/tools/sbin/soup | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index fcf8dd187..dee39ac59 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -827,7 +827,7 @@ main() { echo "Hotfix applied" update_version enable_highstate - wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG" || fail "Salt minion was not running or ready." + (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" highstate else echo "" @@ -934,7 +934,7 @@ main() { echo "" echo "Running a highstate. This could take several minutes." set +e - wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG" || fail "Salt minion was not running or ready." + (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" highstate set -e @@ -950,7 +950,7 @@ main() { set -e echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." - wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' 2>&1 | tee -a "$SOUP_LOG" || fail "Salt minion was not running or ready." + (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" highstate postupgrade_changes [[ $is_airgap -eq 0 ]] && unmount_update From dfe707ab644aabdf65c59497ac6077f9cab7c1d5 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Tue, 24 Oct 2023 17:26:39 -0400 Subject: [PATCH 266/417] fix issue/11610 --- salt/pcap/config.sls | 2 +- salt/suricata/config.sls | 2 +- salt/zeek/config.sls | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/pcap/config.sls b/salt/pcap/config.sls index 26236e2ff..9ea5cee65 100644 --- a/salt/pcap/config.sls +++ b/salt/pcap/config.sls @@ -41,7 +41,7 @@ pcap_sbin: - file_mode: 755 {% if PCAPBPF %} - {% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %} + {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + PCAPBPF|join(" "),cwd='/root') %} {% if BPF_CALC['stderr'] == "" %} {% set BPF_COMPILED = ",\\\"--filter=" + BPF_CALC['stdout'] + "\\\"" %} {% else %} diff --git a/salt/suricata/config.sls b/salt/suricata/config.sls index 9da40660e..8d5279349 100644 --- a/salt/suricata/config.sls +++ b/salt/suricata/config.sls @@ -129,7 +129,7 @@ surithresholding: # BPF compilation and configuration {% if SURICATABPF %} - {% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %} + {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + SURICATABPF|join(" "),cwd='/root') %} {% if BPF_CALC['stderr'] == "" %} {% set BPF_STATUS = 1 %} {% else %} diff --git a/salt/zeek/config.sls b/salt/zeek/config.sls index 703da8d85..7fdbd8560 100644 --- a/salt/zeek/config.sls +++ b/salt/zeek/config.sls @@ -152,7 +152,7 @@ plcronscript: # BPF compilation and configuration {% if ZEEKBPF %} - {% set BPF_CALC = salt['cmd.script']('/usr/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %} + {% set BPF_CALC = salt['cmd.script']('salt://common/tools/sbin/so-bpf-compile', GLOBALS.sensor.interface + ' ' + ZEEKBPF|join(" "),cwd='/root') %} {% if BPF_CALC['stderr'] == "" %} {% set BPF_STATUS = 1 %} {% else %} From 3ad480453a192c9fada8bf3aae3d6dd254029093 Mon Sep 17 00:00:00 2001 From: defensivedepth Date: Wed, 25 Oct 2023 07:20:07 -0400 Subject: [PATCH 267/417] Rename to remove dupe --- salt/manager/kibana.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/manager/kibana.sls b/salt/manager/kibana.sls index eb2b968d6..f9aad3f05 100644 --- a/salt/manager/kibana.sls +++ b/salt/manager/kibana.sls @@ -1,4 +1,4 @@ -elastic_curl_config_distributed: +kibana_curl_config_distributed: file.managed: - name: /opt/so/conf/kibana/curl.config - source: salt://kibana/files/curl.config.template From a66006c8a639491ecd13aca6f0357c7cd69d5978 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 25 Oct 2023 09:04:23 -0400 Subject: [PATCH 268/417] minor updates --- salt/kratos/map.jinja | 2 +- salt/kratos/soc_kratos.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/salt/kratos/map.jinja b/salt/kratos/map.jinja index 6d1e2917c..a2477098d 100644 --- a/salt/kratos/map.jinja +++ b/salt/kratos/map.jinja @@ -21,6 +21,6 @@ {% set KRATOSMERGED = salt['pillar.get']('kratos', default=KRATOSDEFAULTS.kratos, merge=true) %} -{% if KRATOSMERGED.oidc.enabled and 'oidc' in salt['pillar.get']('licensed_features') %} +{% if KRATOSMERGED.oidc.enabled and 'oidc' in salt['pillar.get']('features') %} {% do KRATOSMERGED.config.selfservice.methods.update({'oidc': {'enabled': true, 'config': {'providers': [KRATOSMERGED.oidc.config]}}}) %} {% endif %} \ No newline at end of file diff --git a/salt/kratos/soc_kratos.yaml b/salt/kratos/soc_kratos.yaml index 0ac2fcd44..6285bf1ad 100644 --- a/salt/kratos/soc_kratos.yaml +++ b/salt/kratos/soc_kratos.yaml @@ -103,10 +103,10 @@ kratos: methods: password: enabled: - description: Set to True to enable traditional password authentication to SOC. Typically set to true, except when exclusively using OIDC authentication. + description: Set to True to enable traditional password authentication to SOC. Typically set to true, except when exclusively using OIDC authentication. Some external tool interfaces may not be accessible if local password authentication is disabled. global: True advanced: True - helpLink: kratos.html + helpLink: oidc.html config: haveibeenpwned_enabled: description: Set to True to check if a newly chosen password has ever been found in a published list of previously-compromised passwords. Requires outbound Internet connectivity when enabled. From a3e6b1ee1d495f8dae23175bf762378873d91507 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 25 Oct 2023 09:26:36 -0400 Subject: [PATCH 269/417] change generate_ssl wait_for_salt_minion --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index b64daaa92..fd5bc790f 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1258,7 +1258,7 @@ generate_ssl() { # if the install type is a manager then we need to wait for the minion to be ready before trying # to run the ssl state since we need the minion to sign the certs if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then - wait_for_salt_minion "$MINION_ID" "5" "$setup_log" || fail_setup + (wait_for_salt_minion "$MINION_ID" "5" '/dev/stdout' || fail_setup) 2>&1 | tee -a "$setup_log" fi info "Applying SSL state" logCmd "salt-call state.apply ssl -l info" From c41e19ad0b301f1deae35365e62544e987045fdd Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Wed, 25 Oct 2023 11:01:13 -0400 Subject: [PATCH 270/417] Revert "Upgrade/salt3006.3" --- salt/common/tools/sbin/so-common | 75 +- salt/manager/tools/sbin/soup | 62 +- salt/salt/map.jinja | 2 +- salt/salt/master.defaults.yaml | 2 +- salt/salt/minion.defaults.yaml | 2 +- salt/salt/scripts/bootstrap-salt.sh | 2102 +++------------------------ setup/so-functions | 5 +- 7 files changed, 251 insertions(+), 1999 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 87f40c9d4..fc14e9d0a 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -152,18 +152,15 @@ check_salt_master_status() { return 0 } -# this is only intended to be used to check the status of the minion check_salt_minion_status() { - local minion="$1" - local timeout="${2:-5}" - local logfile="${3:-'/dev/stdout'}" - echo "Checking if the salt minion will respond to jobs" >> "$logfile" 2>&1 - salt "$minion" test.ping -t $timeout > /dev/null 2>&1 + local timeout="${1:-5}" + echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 + salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 local status=$? if [ $status -gt 0 ]; then - echo " Minion did not respond" >> "$logfile" 2>&1 + echo " Minion did not respond" >> "$setup_log" 2>&1 else - echo " Received job response from salt minion" >> "$logfile" 2>&1 + echo " Received job response from salt minion" >> "$setup_log" 2>&1 fi return $status @@ -443,24 +440,6 @@ run_check_net_err() { fi } -wait_for_salt_minion() { - local minion="$1" - local timeout="${2:-5}" - local logfile="${3:-'/dev/stdout'}" - retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail - local attempt=0 - # each attempts would take about 15 seconds - local maxAttempts=20 - until check_salt_minion_status "$minion" "$timeout" "$logfile"; do - attempt=$((attempt+1)) - if [[ $attempt -eq $maxAttempts ]]; then - return 1 - fi - sleep 10 - done - return 0 -} - salt_minion_count() { local MINIONDIR="/opt/so/saltstack/local/pillar/minions" MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep -v adv_ | wc -l) @@ -473,51 +452,19 @@ set_os() { OS=rocky OSVER=9 is_rocky=true - is_rpm=true elif grep -q "CentOS Stream release 9" /etc/redhat-release; then OS=centos OSVER=9 is_centos=true - is_rpm=true - elif grep -q "AlmaLinux release 9" /etc/redhat-release; then - OS=alma + elif grep -q "Oracle Linux Server release 9" /etc/system-release; then + OS=oel OSVER=9 - is_alma=true - is_rpm=true - elif grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release; then - if [ -f /etc/oracle-release ]; then - OS=oracle - OSVER=9 - is_oracle=true - is_rpm=true - else - OS=rhel - OSVER=9 - is_rhel=true - is_rpm=true - fi + is_oracle=true fi cron_service_name="crond" - elif [ -f /etc/os-release ]; then - if grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then - OSVER=focal - UBVER=20.04 - OS=ubuntu - is_ubuntu=true - is_deb=true - elif grep -q "UBUNTU_CODENAME=jammy" /etc/os-release; then - OSVER=jammy - UBVER=22.04 - OS=ubuntu - is_ubuntu=true - is_deb=true - elif grep -q "VERSION_CODENAME=bookworm" /etc/os-release; then - OSVER=bookworm - DEBVER=12 - is_debian=true - OS=debian - is_deb=true - fi + else + OS=ubuntu + is_ubuntu=true cron_service_name="cron" fi } diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index dee39ac59..fc07765b8 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -460,6 +460,7 @@ stop_salt_master() { echo "" echo "Killing any queued Salt jobs on the manager." pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1 + set -e echo "" echo "Storing salt-master pid." @@ -467,7 +468,6 @@ stop_salt_master() { echo "Found salt-master PID $MASTERPID" systemctl_func "stop" "salt-master" timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option." - set -e } stop_salt_minion() { @@ -480,12 +480,14 @@ stop_salt_minion() { echo "" echo "Killing Salt jobs on this node." salt-call saltutil.kill_all_jobs --local + set -e echo "Storing salt-minion pid." MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1) echo "Found salt-minion PID $MINIONPID" systemctl_func "stop" "salt-minion" + set +e timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion set -e } @@ -618,7 +620,6 @@ upgrade_check_salt() { if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then echo "You are already running the correct version of Salt for Security Onion." else - echo "Salt needs to be upgraded to $NEWSALTVERSION." UPGRADESALT=1 fi } @@ -627,48 +628,22 @@ upgrade_salt() { SALTUPGRADED=True echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "" - # If rhel family - if [[ $is_rpm ]]; then + # If CentOS + if [[ $OS == 'centos' ]]; then echo "Removing yum versionlock for Salt." echo "" yum versionlock delete "salt-*" echo "Updating Salt packages." echo "" set +e - # if oracle run with -r to ignore repos set by bootstrap - if [[ $OS == 'oracle' ]]; then - run_check_net_err \ - "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \ - "Could not update salt, please check $SOUP_LOG for details." - # if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos - else - run_check_net_err \ - "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \ - "Could not update salt, please check $SOUP_LOG for details." - fi + run_check_net_err \ + "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \ + "Could not update salt, please check $SOUP_LOG for details." set -e echo "Applying yum versionlock for Salt." echo "" yum versionlock add "salt-*" # Else do Ubuntu things - elif [[ $is_deb ]]; then - echo "Removing apt hold for Salt." - echo "" - apt-mark unhold "salt-common" - apt-mark unhold "salt-master" - apt-mark unhold "salt-minion" - echo "Updating Salt packages." - echo "" - set +e - run_check_net_err \ - "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \ - "Could not update salt, please check $SOUP_LOG for details." - set -e - echo "Applying apt hold for Salt." - echo "" - apt-mark hold "salt-common" - apt-mark hold "salt-master" - apt-mark hold "salt-minion" fi echo "Checking if Salt was upgraded." @@ -680,7 +655,7 @@ upgrade_salt() { echo "Once the issue is resolved, run soup again." echo "Exiting." echo "" - exit 1 + exit 0 else echo "Salt upgrade success." echo "" @@ -810,7 +785,7 @@ main() { if [[ $is_airgap -eq 0 ]]; then yum clean all check_os_updates - elif [[ $OS == 'oracle' ]]; then + elif [[ $OS == 'oel' ]]; then # sync remote repo down to local if not airgap repo_sync check_os_updates @@ -827,8 +802,7 @@ main() { echo "Hotfix applied" update_version enable_highstate - (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" - highstate + salt-call state.highstate -l info queue=True else echo "" echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION." @@ -864,14 +838,6 @@ main() { echo "Upgrading Salt" # Update the repo files so it can actually upgrade upgrade_salt - - # for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt - # * WARN: Not starting daemons on Debian based distributions - # is not working mostly because starting them is the default behaviour. - if [[ $is_deb ]]; then - stop_salt_minion - stop_salt_master - fi fi preupgrade_changes @@ -934,8 +900,7 @@ main() { echo "" echo "Running a highstate. This could take several minutes." set +e - (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" - highstate + salt-call state.highstate -l info queue=True set -e stop_salt_master @@ -950,8 +915,7 @@ main() { set -e echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." - (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" - highstate + salt-call state.highstate -l info queue=True postupgrade_changes [[ $is_airgap -eq 0 ]] && unmount_update diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 131ff46ca..1120685fb 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -23,7 +23,7 @@ {% if grains.os|lower in ['Rocky', 'redhat', 'CentOS Stream'] %} {% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} {% elif grains.os_family|lower == 'debian' %} - {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F -x python3 stable ' ~ SALTVERSION %} + {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} {% endif %} {% else %} {% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %} diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index 40b6f5268..126039802 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -2,4 +2,4 @@ # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: master: - version: 3006.3 + version: 3006.1 diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index 71fd18f96..7e1540d17 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -2,6 +2,6 @@ # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: minion: - version: 3006.3 + version: 3006.1 check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default service_start_delay: 30 # in seconds. diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh index a016524e6..47d25949c 100644 --- a/salt/salt/scripts/bootstrap-salt.sh +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -14,7 +14,7 @@ # # BUGS: https://github.com/saltstack/salt-bootstrap/issues # -# COPYRIGHT: (c) 2012-2022 by the SaltStack Team, see AUTHORS.rst for more +# COPYRIGHT: (c) 2012-2021 by the SaltStack Team, see AUTHORS.rst for more # details. # # LICENSE: Apache 2.0 @@ -23,7 +23,7 @@ #====================================================================================================================== set -o nounset # Treat unset variables as an error -__ScriptVersion="2023.08.03" +__ScriptVersion="2021.09.17" __ScriptName="bootstrap-salt.sh" __ScriptFullName="$0" @@ -224,6 +224,7 @@ _KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE} _TEMP_CONFIG_DIR="null" _SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git" _SALT_REPO_URL=${_SALTSTACK_REPO_URL} +_DOWNSTREAM_PKG_REPO=$BS_FALSE _TEMP_KEYS_DIR="null" _SLEEP="${__DEFAULT_SLEEP}" _INSTALL_MASTER=$BS_FALSE @@ -267,8 +268,6 @@ _CUSTOM_MASTER_CONFIG="null" _CUSTOM_MINION_CONFIG="null" _QUIET_GIT_INSTALLATION=$BS_FALSE _REPO_URL="repo.saltproject.io" -_ONEDIR_DIR="salt" -_ONEDIR_NIGHTLY_DIR="salt-dev/${_ONEDIR_DIR}" _PY_EXE="python3" _INSTALL_PY="$BS_FALSE" _TORNADO_MAX_PY3_VERSION="5.0" @@ -276,9 +275,6 @@ _POST_NEON_INSTALL=$BS_FALSE _MINIMUM_PIP_VERSION="9.0.1" _MINIMUM_SETUPTOOLS_VERSION="9.1" _POST_NEON_PIP_INSTALL_ARGS="--prefix=/usr" -_PIP_DOWNLOAD_ARGS="" -_QUICK_START="$BS_FALSE" -_AUTO_ACCEPT_MINION_KEYS="$BS_FALSE" # Defaults for install arguments ITYPE="stable" @@ -294,130 +290,110 @@ __usage() { Usage : ${__ScriptName} [options] [install-type-args] Installation types: - - stable Install latest stable release. This is the default - install type - - stable [branch] Install latest version on a branch. Only supported - for packages available at repo.saltproject.io - - stable [version] Install a specific version. Only supported for - packages available at repo.saltproject.io - To pin a 3xxx minor version, specify it as 3xxx.0 - - testing RHEL-family specific: configure EPEL testing repo - - git Install from the head of the master branch - - git [ref] Install from any git ref (such as a branch, tag, or - commit) - - onedir Install latest onedir release. - - onedir [version] Install a specific version. Only supported for - onedir packages available at repo.saltproject.io - - - onedir_rc Install latest onedir RC release. - - onedir_rc [version] Install a specific version. Only supported for - onedir RC packages available at repo.saltproject.io - - old-stable Install latest old stable release. - - old-stable [branch] Install latest version on a branch. Only supported - for packages available at repo.saltproject.io - - old-stable [version] Install a specific version. Only supported for - packages available at repo.saltproject.io - To pin a 3xxx minor version, specify it as 3xxx.0 + - stable Install latest stable release. This is the default + install type + - stable [branch] Install latest version on a branch. Only supported + for packages available at repo.saltproject.io + - stable [version] Install a specific version. Only supported for + packages available at repo.saltproject.io + To pin a 3xxx minor version, specify it as 3xxx.0 + - testing RHEL-family specific: configure EPEL testing repo + - git Install from the head of the master branch + - git [ref] Install from any git ref (such as a branch, tag, or + commit) Examples: - ${__ScriptName} - ${__ScriptName} stable - - ${__ScriptName} stable 3006 - - ${__ScriptName} stable 3006.1 + - ${__ScriptName} stable 2017.7 + - ${__ScriptName} stable 2017.7.2 - ${__ScriptName} testing - ${__ScriptName} git - ${__ScriptName} git 2017.7 - ${__ScriptName} git v2017.7.2 - ${__ScriptName} git 06f249901a2e2f1ed310d58ea3921a129f214358 - - ${__ScriptName} onedir - - ${__ScriptName} onedir 3006 - - ${__ScriptName} onedir_rc - - ${__ScriptName} onedir_rc 3006 - - ${__ScriptName} old-stable - - ${__ScriptName} old-stable 3005 - - ${__ScriptName} old-stable 3005.1 - Options: - -a Pip install all Python pkg dependencies for Salt. Requires -V to install - all pip pkgs into the virtualenv. - (Only available for Ubuntu based distributions) - -A Pass the salt-master DNS name or IP. This will be stored under - \${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf - -b Assume that dependencies are already installed and software sources are - set up. If git is selected, git tree is still checked out as dependency - step. + -h Display this message + -v Display script version + -n No colours + -D Show debug output -c Temporary configuration directory - -C Only run the configuration function. Implies -F (forced overwrite). - To overwrite Master or Syndic configs, -M or -S, respectively, must - also be specified. Salt installation will be ommitted, but some of the - dependencies could be installed to write configuration with -j or -J. + -g Salt Git repository URL. Default: ${_SALTSTACK_REPO_URL} + -w Install packages from downstream package repository rather than + upstream, saltstack package repository. This is currently only + implemented for SUSE. + -k Temporary directory holding the minion keys which will pre-seed + the master. + -s Sleep time used when waiting for daemons to start, restart and when + checking for the services running. Default: ${__DEFAULT_SLEEP} + -L Also install salt-cloud and required python-libcloud package + -M Also install salt-master + -S Also install salt-syndic + -N Do not install salt-minion + -X Do not start daemons after installation -d Disables checking if Salt services are enabled to start on system boot. You can also do this by touching /tmp/disable_salt_checks on the target host. Default: \${BS_FALSE} - -D Show debug output - -f Force shallow cloning for git installations. - This may result in an "n/a" in the version number. - -F Allow copied files to overwrite existing (config, init.d, etc) - -g Salt Git repository URL. Default: ${_SALTSTACK_REPO_URL} - -h Display this message - -H Use the specified HTTP proxy for all download URLs (including https://). - For example: http://myproxy.example.com:3128 - -i Pass the salt-minion id. This will be stored under - \${BS_SALT_ETC_DIR}/minion_id - -I If set, allow insecure connections while downloading any files. For - example, pass '--no-check-certificate' to 'wget' or '--insecure' to - 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining - GnuPG archive keys insecurely if distro has changed release signatures. - -j Replace the Minion config file with data passed in as a JSON string. If - a Minion config file is found, a reasonable effort will be made to save - the file with a ".bak" extension. If used in conjunction with -C or -F, - no ".bak" file will be created as either of those options will force - a complete overwrite of the file. - -J Replace the Master config file with data passed in as a JSON string. If - a Master config file is found, a reasonable effort will be made to save - the file with a ".bak" extension. If used in conjunction with -C or -F, - no ".bak" file will be created as either of those options will force - a complete overwrite of the file. - -k Temporary directory holding the minion keys which will pre-seed - the master. - -K If set, keep the temporary files in the temporary directories specified - with -c and -k - -l Disable ssl checks. When passed, switches "https" calls to "http" where - possible. - -L Also install salt-cloud and required python-libcloud package - -M Also install salt-master - -n No colours - -N Do not install salt-minion - -p Extra-package to install while installing Salt dependencies. One package - per -p flag. You are responsible for providing the proper package name. -P Allow pip based installations. On some distributions the required salt packages or its dependencies are not available as a package for that distribution. Using this flag allows the script to use pip as a last resort method. NOTE: This only works for functions which actually implement pip based installations. - -q Quiet salt installation from git (setup.py install -q) - -Q Quickstart, install the Salt master and the Salt minion. - And automatically accept the minion key. + -U If set, fully upgrade the system prior to bootstrapping Salt + -I If set, allow insecure connections while downloading any files. For + example, pass '--no-check-certificate' to 'wget' or '--insecure' to + 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining + GnuPG archive keys insecurely if distro has changed release signatures. + -F Allow copied files to overwrite existing (config, init.d, etc) + -K If set, keep the temporary files in the temporary directories specified + with -c and -k + -C Only run the configuration function. Implies -F (forced overwrite). + To overwrite Master or Syndic configs, -M or -S, respectively, must + also be specified. Salt installation will be ommitted, but some of the + dependencies could be installed to write configuration with -j or -J. + -A Pass the salt-master DNS name or IP. This will be stored under + \${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf + -i Pass the salt-minion id. This will be stored under + \${BS_SALT_ETC_DIR}/minion_id + -p Extra-package to install while installing Salt dependencies. One package + per -p flag. You are responsible for providing the proper package name. + -H Use the specified HTTP proxy for all download URLs (including https://). + For example: http://myproxy.example.com:3128 + -b Assume that dependencies are already installed and software sources are + set up. If git is selected, git tree is still checked out as dependency + step. + -f Force shallow cloning for git installations. + This may result in an "n/a" in the version number. + -l Disable ssl checks. When passed, switches "https" calls to "http" where + possible. + -V Install Salt into virtualenv + (only available for Ubuntu based distributions) + -a Pip install all Python pkg dependencies for Salt. Requires -V to install + all pip pkgs into the virtualenv. + (Only available for Ubuntu based distributions) + -r Disable all repository configuration performed by this script. This + option assumes all necessary repository configuration is already present + on the system. -R Specify a custom repository URL. Assumes the custom repository URL points to a repository that mirrors Salt packages located at repo.saltproject.io. The option passed with -R replaces the "repo.saltproject.io". If -R is passed, -r is also set. Currently only - works on CentOS/RHEL and Debian based distributions and macOS. - -s Sleep time used when waiting for daemons to start, restart and when - checking for the services running. Default: ${__DEFAULT_SLEEP} - -S Also install salt-syndic - -r Disable all repository configuration performed by this script. This - option assumes all necessary repository configuration is already present - on the system. - -U If set, fully upgrade the system prior to bootstrapping Salt - -v Display script version - -V Install Salt into virtualenv - (only available for Ubuntu based distributions) + works on CentOS/RHEL and Debian based distributions. + -J Replace the Master config file with data passed in as a JSON string. If + a Master config file is found, a reasonable effort will be made to save + the file with a ".bak" extension. If used in conjunction with -C or -F, + no ".bak" file will be created as either of those options will force + a complete overwrite of the file. + -j Replace the Minion config file with data passed in as a JSON string. If + a Minion config file is found, a reasonable effort will be made to save + the file with a ".bak" extension. If used in conjunction with -C or -F, + no ".bak" file will be created as either of those options will force + a complete overwrite of the file. + -q Quiet salt installation from git (setup.py install -q) -x Changes the Python version used to install Salt. For CentOS 6 git installations python2.7 is supported. - Fedora git installation, CentOS 7, Ubuntu 18.04 support python3. - -X Do not start daemons after installation + Fedora git installation, CentOS 7, Debian 9, Ubuntu 16.04 and 18.04 support python3. -y Installs a different python version on host. Currently this has only been tested with CentOS 6 and is considered experimental. This will install the ius repo on the box if disable repo is false. This must be used in conjunction @@ -430,7 +406,7 @@ EOT } # ---------- end of function __usage ---------- -while getopts ':hvnDc:g:Gyx:k:s:MSNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aqQ' opt +while getopts ':hvnDc:g:Gyx:wk:s:MSNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aq' opt do case "${opt}" in @@ -446,6 +422,7 @@ do echowarn "No need to provide this option anymore, now it is a default behavior." ;; + w ) _DOWNSTREAM_PKG_REPO=$BS_TRUE ;; k ) _TEMP_KEYS_DIR="$OPTARG" ;; s ) _SLEEP=$OPTARG ;; M ) _INSTALL_MASTER=$BS_TRUE ;; @@ -474,7 +451,6 @@ do J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;; j ) _CUSTOM_MINION_CONFIG=$OPTARG ;; q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;; - Q ) _QUICK_START=$BS_TRUE ;; x ) _PY_EXE="$OPTARG" ;; y ) _INSTALL_PY="$BS_TRUE" ;; @@ -596,7 +572,7 @@ fi echoinfo "Running version: ${__ScriptVersion}" echoinfo "Executed by: ${CALLER}" echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'" -echowarn "Running the unstable version of ${__ScriptName}" +#echowarn "Running the unstable version of ${__ScriptName}" # Define installation type if [ "$#" -gt 0 ];then @@ -606,17 +582,11 @@ if [ "$#" -gt 0 ];then fi # Check installation type -if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git|onedir|onedir_rc|old-stable)')" = "" ]; then +if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git)')" = "" ]; then echoerror "Installation type \"$ITYPE\" is not known..." exit 1 fi -# Due to our modifications to install_centos_onedir it is easiest to just lock down to only allowing stable install -if [ "$(echo "$ITYPE" | grep stable)" = "" ]; then - echoerror "This script has been modified to only support stable installation type. Installation type \"$ITYPE\" is not allowed..." - exit 1 -fi - # If doing a git install, check what branch/tag/sha will be checked out if [ "$ITYPE" = "git" ]; then if [ "$#" -eq 0 ];then @@ -632,123 +602,23 @@ if [ "$ITYPE" = "git" ]; then # If doing stable install, check if version specified elif [ "$ITYPE" = "stable" ]; then if [ "$#" -eq 0 ];then - ONEDIR_REV="latest" - _ONEDIR_REV="latest" - ITYPE="onedir" + STABLE_REV="latest" else - if [ "$(echo "$1" | grep -E '^(nightly|latest|3005|3006)$')" != "" ]; then - ONEDIR_REV="$1" - _ONEDIR_REV="$1" - ITYPE="onedir" - shift - elif [ "$(echo "$1" | grep -E '^([3-9][0-5]{2}[5-9](\.[0-9]*)?)')" != "" ]; then - ONEDIR_REV="minor/$1" - _ONEDIR_REV="$1" - ITYPE="onedir" - shift - else - echo "Unknown stable version: $1 (valid: 3005, 3006, latest)" - exit 1 - fi - fi - -# If doing old-stable install, check if version specified -elif [ "$ITYPE" = "old-stable" ]; then - if [ "$#" -eq 0 ];then - ITYPE="stable" - else - if [ "$(echo "$1" | grep -E '^(3003|3004|3005)$')" != "" ]; then + if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3|2019\.2|3000|3001|3002|3003|3004)$')" != "" ]; then STABLE_REV="$1" - ITYPE="stable" shift - elif [ "$(echo "$1" | grep -E '^([3-9][0-5]{3}(\.[0-9]*)?)$')" != "" ]; then + elif [ "$(echo "$1" | grep -E '^(2[0-9]*\.[0-9]*\.[0-9]*|[3-9][0-9]{3}(\.[0-9]*)?)$')" != "" ]; then # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix - ITYPE="stable" STABLE_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') if [ "$(uname)" != "Darwin" ]; then STABLE_REV="archive/$STABLE_REV" fi shift else - echo "Unknown old stable version: $1 (valid: 3003, 3004, 3005)" + echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, 2019.2, 3000, 3001, 3002, 3003, 3004, latest, \$MAJOR.\$MINOR.\$PATCH until 2019.2, \$MAJOR or \$MAJOR.\$PATCH starting from 3000)" exit 1 fi fi - -elif [ "$ITYPE" = "onedir" ]; then - if [ "$#" -eq 0 ];then - ONEDIR_REV="latest" - else - if [ "$(echo "$1" | grep -E '^(nightly|latest|3005|3006)$')" != "" ]; then - ONEDIR_REV="$1" - shift - elif [ "$(echo "$1" | grep -E '^(3005(\.[0-9]*)?)')" != "" ]; then - # Handle the 3005.0 version as 3005 archive (pin to minor) and strip the fake ".0" suffix - ONEDIR_REV=$(echo "$1" | sed -E 's/^(3005)\.0$/\1/') - ONEDIR_REV="minor/$ONEDIR_REV" - shift - elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}(\.[0-9]*)?)')" != "" ]; then - ONEDIR_REV="minor/$1" - shift - else - echo "Unknown onedir version: $1 (valid: 3005, 3006, latest, nightly.)" - exit 1 - fi - fi - -elif [ "$ITYPE" = "onedir_rc" ]; then - # Change the _ONEDIR_DIR to be the location for the RC packages - _ONEDIR_DIR="salt_rc/salt" - - # Change ITYPE to onedir so we use the regular onedir functions - ITYPE="onedir" - - if [ "$#" -eq 0 ];then - ONEDIR_REV="latest" - else - if [ "$(echo "$1" | grep -E '^(latest)$')" != "" ]; then - ONEDIR_REV="$1" - shift - elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}?rc[0-9]-[0-9]$)')" != "" ]; then - # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix - #ONEDIR_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') - ONEDIR_REV="minor/$1" - shift - elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}\.[0-9]?rc[0-9]$)')" != "" ]; then - # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix - #ONEDIR_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') - ONEDIR_REV="minor/$1" - shift - else - echo "Unknown onedir_rc version: $1 (valid: 3005-1, latest.)" - exit 1 - fi - fi -fi - -# Doing a quick start, so install master -# set master address to 127.0.0.1 -if [ "$_QUICK_START" -eq "$BS_TRUE" ]; then - # make install type is stable - ITYPE="stable" - - # make sure the revision is latest - STABLE_REV="latest" - ONEDIR_REV="latest" - - # make sure we're installing the master - _INSTALL_MASTER=$BS_TRUE - - # override incase install minion - # is set to false - _INSTALL_MINION=$BS_TRUE - - # Set master address to loopback IP - _SALT_MASTER_ADDRESS="127.0.0.1" - - # Auto accept the minion key - # when the install is done. - _AUTO_ACCEPT_MINION_KEYS=$BS_TRUE fi # Check for any unparsed arguments. Should be an error. @@ -954,18 +824,6 @@ __fetch_verify() { return 1 } -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __check_url_exists -# DESCRIPTION: Checks if a URL exists -#---------------------------------------------------------------------------------------------------------------------- -__check_url_exists() { - _URL="$1" - if curl --output /dev/null --silent --fail "${_URL}"; then - return 0 - else - return 1 - fi -} #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __gather_hardware_info # DESCRIPTION: Discover hardware information @@ -1087,7 +945,7 @@ __strip_duplicates() { __sort_release_files() { KNOWN_RELEASE_FILES=$(echo "(arch|alpine|centos|debian|ubuntu|fedora|redhat|suse|\ mandrake|mandriva|gentoo|slackware|turbolinux|unitedlinux|void|lsb|system|\ - oracle|os|almalinux|rocky)(-|_)(release|version)" | sed -E 's:[[:space:]]::g') + oracle|os)(-|_)(release|version)" | sed -E 's:[[:space:]]::g') primary_release_files="" secondary_release_files="" # Sort know VS un-known files first @@ -1101,7 +959,7 @@ __sort_release_files() { done # Now let's sort by know files importance, max important goes last in the max_prio list - max_prio="redhat-release centos-release oracle-release fedora-release almalinux-release rocky-release" + max_prio="redhat-release centos-release oracle-release fedora-release" for entry in $max_prio; do if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\2 \\1 \\3:g") @@ -1170,8 +1028,6 @@ __gather_linux_system_info() { elif [ "${DISTRO_NAME}" = "Arch" ]; then DISTRO_NAME="Arch Linux" return - elif [ "${DISTRO_NAME}" = "Rocky" ]; then - DISTRO_NAME="Rocky Linux" fi rv=$(lsb_release -sr) [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") @@ -1230,8 +1086,6 @@ __gather_linux_system_info() { unitedlinux ) n="UnitedLinux" ;; void ) n="VoidLinux" ;; oracle ) n="Oracle Linux" ;; - almalinux ) n="AlmaLinux" ;; - rocky ) n="Rocky Linux" ;; system ) while read -r line; do [ "${n}x" != "systemx" ] && break @@ -1454,7 +1308,7 @@ __gather_system_info() { #---------------------------------------------------------------------------------------------------------------------- # shellcheck disable=SC2034 __ubuntu_derivatives_translation() { - UBUNTU_DERIVATIVES="(trisquel|linuxmint|linaro|elementary_os|neon|pop)" + UBUNTU_DERIVATIVES="(trisquel|linuxmint|linaro|elementary_os|neon)" # Mappings trisquel_6_ubuntu_base="12.04" linuxmint_13_ubuntu_base="12.04" @@ -1467,8 +1321,6 @@ __ubuntu_derivatives_translation() { neon_16_ubuntu_base="16.04" neon_18_ubuntu_base="18.04" neon_20_ubuntu_base="20.04" - neon_22_ubuntu_base="22.04" - pop_22_ubuntu_base="22.04" # Translate Ubuntu derivatives to their base Ubuntu version match=$(echo "$DISTRO_NAME_L" | grep -E ${UBUNTU_DERIVATIVES}) @@ -1528,13 +1380,9 @@ __check_dpkg_architecture() { if [ "$_CUSTOM_REPO_URL" != "null" ]; then warn_msg="Support for arm64 is experimental, make sure the custom repository used has the expected structure and contents." else - # Saltstack official repository has arm64 metadata beginning with Debian 11, - # use amd64 repositories on arm64 for anything older, since all pkgs are arch-independent - if [ "$DISTRO_NAME_L" = "debian" ] && [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then - __REPO_ARCH="amd64" - else - __REPO_ARCH="arm64" - fi + # Saltstack official repository does not yet have arm64 metadata, + # use amd64 repositories on arm64, since all pkgs are arch-independent + __REPO_ARCH="amd64" __REPO_ARCH_DEB="deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg arch=$__REPO_ARCH]" warn_msg="Support for arm64 packages is experimental and might rely on architecture-independent packages from the amd64 repository." fi @@ -1614,9 +1462,6 @@ __ubuntu_codename_translation() { "21") DISTRO_CODENAME="hirsute" ;; - "22") - DISTRO_CODENAME="jammy" - ;; *) DISTRO_CODENAME="trusty" ;; @@ -1643,12 +1488,10 @@ __debian_derivatives_translation() { devuan_1_debian_base="8.0" devuan_2_debian_base="9.0" kali_1_debian_base="7.0" - kali_2021_debian_base="10.0" linuxmint_1_debian_base="8.0" raspbian_8_debian_base="8.0" raspbian_9_debian_base="9.0" raspbian_10_debian_base="10.0" - raspbian_11_debian_base="11.0" bunsenlabs_9_debian_base="9.0" turnkey_9_debian_base="9.0" @@ -1716,14 +1559,6 @@ __debian_codename_translation() { "11") DISTRO_CODENAME="bullseye" ;; - "12") - DISTRO_CODENAME="bookworm" - # FIXME - TEMPORARY - # use bullseye packages until bookworm packages are available - DISTRO_CODENAME="bullseye" - DISTRO_MAJOR_VERSION=11 - rv=11 - ;; *) DISTRO_CODENAME="stretch" ;; @@ -1755,13 +1590,11 @@ __check_end_of_life_versions() { # = 17.04, 17.10 # = 18.10 # = 19.04, 19.10 - # = 20.10 if [ "$DISTRO_MAJOR_VERSION" -lt 16 ] || \ [ "$DISTRO_MAJOR_VERSION" -eq 17 ] || \ [ "$DISTRO_MAJOR_VERSION" -eq 19 ] || \ { [ "$DISTRO_MAJOR_VERSION" -eq 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 18 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then + { [ "$DISTRO_MAJOR_VERSION" -eq 18 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then echoerror "End of life distributions are not supported." echoerror "Please consider upgrading to the next stable. See:" echoerror " https://wiki.ubuntu.com/Releases" @@ -1979,14 +1812,14 @@ elif [ "${DISTRO_NAME_L}" = "debian" ]; then __debian_codename_translation fi -if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(debian|ubuntu|centos|gentoo|red_hat|oracle|scientific|amazon|fedora|macosx|almalinux|rocky)')" = "" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]; then +if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(debian|ubuntu|centos|gentoo|red_hat|oracle|scientific|amazon|fedora|macosx)')" = "" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]; then echoerror "${DISTRO_NAME} does not have major version pegged packages support" exit 1 fi # Only RedHat based distros have testing support if [ "${ITYPE}" = "testing" ]; then - if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(centos|red_hat|amazon|oracle|almalinux|rocky)')" = "" ]; then + if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(centos|red_hat|amazon|oracle)')" = "" ]; then echoerror "${DISTRO_NAME} does not have testing packages support" exit 1 fi @@ -2017,6 +1850,10 @@ if [ "$ITYPE" = "git" ]; then if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then _POST_NEON_INSTALL=$BS_TRUE __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" + if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then + # We do this to properly clone tags + GIT_REV="v${GIT_REV}" + fi echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" else __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?[0-9]{1,4}\.[0-9]{1,2})(\.[0-9]{1,2})?.*$/MATCH/') @@ -2028,6 +1865,10 @@ if [ "$ITYPE" = "git" ]; then if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then _POST_NEON_INSTALL=$BS_TRUE __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" + if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then + # We do this to properly clone tags + GIT_REV="v${GIT_REV}" + fi echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" else __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?[[:digit:]]\{1,4\}\.[[:digit:]]\{1,2\}\)\(\.[[:digit:]]\{1,2\}\)\?.*$/MATCH/') @@ -2190,13 +2031,20 @@ __rpm_import_gpg() { #---------------------------------------------------------------------------------------------------------------------- __yum_install_noinput() { + ENABLE_EPEL_CMD="" + # Skip Amazon Linux for the first round, since EPEL is no longer required. + # See issue #724 + if [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DISTRO_NAME_L" != "amazon_linux_ami" ]; then + ENABLE_EPEL_CMD="--enablerepo=${_EPEL_REPO}" + fi + if [ "$DISTRO_NAME_L" = "oracle_linux" ]; then # We need to install one package at a time because --enablerepo=X disables ALL OTHER REPOS!!!! for package in "${@}"; do - yum -y install "${package}" || yum -y install "${package}" || return $? + yum -y install "${package}" || yum -y install "${package}" ${ENABLE_EPEL_CMD} || return $? done else - yum -y install "${@}" || return $? + yum -y install "${@}" ${ENABLE_EPEL_CMD} || return $? fi } # ---------- end of function __yum_install_noinput ---------- @@ -2209,15 +2057,6 @@ __dnf_install_noinput() { dnf -y install "${@}" || return $? } # ---------- end of function __dnf_install_noinput ---------- -#--- FUNCTION ------------------------------------------------------------------------------------------------------- -# NAME: __tdnf_install_noinput -# DESCRIPTION: (DRY) tdnf install with noinput options -#---------------------------------------------------------------------------------------------------------------------- -__tdnf_install_noinput() { - - tdnf -y install "${@}" || return $? -} # ---------- end of function __tdnf_install_noinput ---------- - #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __git_clone_and_checkout # DESCRIPTION: (DRY) Helper function to clone and checkout salt to a @@ -2743,7 +2582,7 @@ __activate_virtualenv() { # NAME: __install_pip_pkgs # DESCRIPTION: Return 0 or 1 if successfully able to install pip packages. Can provide a different python version to # install pip packages with. If $py_ver is not specified it will use the default python version. -# PARAMETERS: pkgs, py_ver, upgrade +# PARAMETERS: pkgs, py_ver #---------------------------------------------------------------------------------------------------------------------- __install_pip_pkgs() { @@ -2912,15 +2751,15 @@ EOM fi echodebug "Running '${_pip_cmd} install wheel ${_setuptools_dep}'" - ${_pip_cmd} install --upgrade ${_POST_NEON_PIP_INSTALL_ARGS} wheel "${_setuptools_dep}" + ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} wheel "${_setuptools_dep}" echoinfo "Installing salt using ${_py_exe}" cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 mkdir /tmp/git/deps echoinfo "Downloading Salt Dependencies from PyPi" - echodebug "Running '${_pip_cmd} download -d /tmp/git/deps ${_PIP_DOWNLOAD_ARGS} .'" - ${_pip_cmd} download -d /tmp/git/deps ${_PIP_DOWNLOAD_ARGS} . || (echo "Failed to download salt dependencies" && return 1) + echodebug "Running '${_pip_cmd} download -d /tmp/git/deps .'" + ${_pip_cmd} download -d /tmp/git/deps . || (echo "Failed to download salt dependencies" && return 1) echoinfo "Installing Downloaded Salt Dependencies" echodebug "Running '${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/*'" @@ -3079,8 +2918,7 @@ __enable_universe_repository() { __install_saltstack_ubuntu_repository() { # Workaround for latest non-LTS Ubuntu if { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ - # remove 22 version when salt packages for 22.04 are available - [ "$DISTRO_MAJOR_VERSION" -eq 21 ] || [ "$DISTRO_MAJOR_VERSION" -eq 22 ]; then + { [ "$DISTRO_MAJOR_VERSION" -eq 21 ] && [ "$DISTRO_MINOR_VERSION" -eq 04 ]; }; then echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages for previous LTS release. You may experience problems." UBUNTU_VERSION=20.04 UBUNTU_CODENAME="focal" @@ -3119,58 +2957,6 @@ __install_saltstack_ubuntu_repository() { __wait_for_apt apt-get update || return 1 } -__install_saltstack_ubuntu_onedir_repository() { - # Workaround for latest non-LTS Ubuntu - if { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ - [ "$DISTRO_MAJOR_VERSION" -eq 21 ]; then - echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages for previous LTS release. You may experience problems." - UBUNTU_VERSION=20.04 - UBUNTU_CODENAME="focal" - else - UBUNTU_VERSION=${DISTRO_VERSION} - UBUNTU_CODENAME=${DISTRO_CODENAME} - fi - - # Install downloader backend for GPG keys fetching - __PACKAGES='wget' - - # Required as it is not installed by default on Ubuntu 18+ - if [ "$DISTRO_MAJOR_VERSION" -ge 18 ]; then - __PACKAGES="${__PACKAGES} gnupg" - fi - - # Make sure https transport is available - if [ "$HTTP_VAL" = "https" ] ; then - __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" - fi - - # shellcheck disable=SC2086,SC2090 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - __PY_VERSION_REPO="apt" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PY_VERSION_REPO="py3" - fi - - # SaltStack's stable Ubuntu repository: - SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${ONEDIR_REV}/" - if [ "${ONEDIR_REV}" = "nightly" ] ; then - SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/" - fi - echo "$__REPO_ARCH_DEB $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/salt.list - - if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ]; then - __apt_key_fetch "${SALTSTACK_UBUNTU_URL}salt-archive-keyring.gpg" || return 1 - elif [ "$(echo "${ONEDIR_REV}" | grep -E '(latest|nightly)')" != "" ]; then - __apt_key_fetch "${SALTSTACK_UBUNTU_URL}salt-archive-keyring.gpg" || \ - __apt_key_fetch "${SALTSTACK_UBUNTU_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 - else - __apt_key_fetch "${SALTSTACK_UBUNTU_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 - fi - - __wait_for_apt apt-get update || return 1 -} - install_ubuntu_deps() { if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then # Install add-apt-repository @@ -3246,7 +3032,7 @@ install_ubuntu_stable_deps() { if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then - if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ] || [ "$DISTRO_MAJOR_VERSION" -ge 22 ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ]; then __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 else __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && @@ -3327,9 +3113,6 @@ install_ubuntu_git_deps() { fi else __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" - if [ "$DISTRO_MAJOR_VERSION" -ge 22 ]; then - __PACKAGES="${__PACKAGES} g++" - fi # shellcheck disable=SC2086 __apt_get_install_noinput ${__PACKAGES} || return 1 fi @@ -3343,44 +3126,6 @@ install_ubuntu_git_deps() { return 0 } -install_ubuntu_onedir_deps() { - if [ "${_SLEEP}" -eq "${__DEFAULT_SLEEP}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then - # The user did not pass a custom sleep value as an argument, let's increase the default value - echodebug "On Ubuntu systems we increase the default sleep value to 10." - echodebug "See https://github.com/saltstack/salt/issues/12248 for more info." - _SLEEP=10 - fi - - if [ $_START_DAEMONS -eq $BS_FALSE ]; then - echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." - fi - - # No user interaction, libc6 restart services for example - export DEBIAN_FRONTEND=noninteractive - - __wait_for_apt apt-get update || return 1 - - if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then - if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then - if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ]; then - __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 - else - __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && - apt-key update && apt-get update || return 1 - fi - fi - - __apt_get_upgrade_noinput || return 1 - fi - - if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then - __check_dpkg_architecture || return 1 - __install_saltstack_ubuntu_onedir_repository || return 1 - fi - - install_ubuntu_deps || return 1 -} - install_ubuntu_stable() { __PACKAGES="" @@ -3425,15 +3170,7 @@ install_ubuntu_git() { _POST_NEON_PIP_INSTALL_ARGS="" __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 - - # Account for new path for services files in later releases - if [ -d "pkg/common" ]; then - _SERVICE_DIR="pkg/common" - else - _SERVICE_DIR="pkg" - fi - - sed -i 's:/usr/bin:/usr/local/bin:g' ${_SERVICE_DIR}/*.service + sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service return 0 fi @@ -3448,28 +3185,6 @@ install_ubuntu_git() { return 0 } -install_ubuntu_onedir() { - __PACKAGES="" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-master" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-syndic" - fi - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - return 0 -} - install_ubuntu_stable_post() { for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -3505,15 +3220,8 @@ install_ubuntu_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - # Account for new path for services files in later releases - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" - else - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg" - fi - if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then - __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -3528,8 +3236,8 @@ install_ubuntu_git_post() { if [ ! -f $_upstart_conf ]; then # upstart does not know about our service, let's copy the proper file echowarn "Upstart does not appear to know about salt-$fname" - echodebug "Copying ${_SERVICE_DIR}/salt-$fname.upstart to $_upstart_conf" - __copyfile "${_SERVICE_DIR}/salt-${fname}.upstart" "$_upstart_conf" + echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.upstart to $_upstart_conf" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.upstart" "$_upstart_conf" # Set service to know about virtualenv if [ "${_VIRTUALENV_DIR}" != "null" ]; then echo "SALT_USE_VIRTUALENV=${_VIRTUALENV_DIR}" > /etc/default/salt-${fname} @@ -3641,8 +3349,17 @@ install_ubuntu_check_services() { # Debian Install Functions # __install_saltstack_debian_repository() { - DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" - DEBIAN_CODENAME="$DISTRO_CODENAME" + if [ "$DISTRO_MAJOR_VERSION" -eq 11 ]; then + # Packages for Debian 11 at repo.saltproject.io are not yet available + # Set up repository for Debian 10 for Debian 11 for now until support + # is available at repo.saltproject.io for Debian 11. + echowarn "Debian 11 distribution detected, but stable packages requested. Trying packages from Debian 10. You may experience problems." + DEBIAN_RELEASE="10" + DEBIAN_CODENAME="buster" + else + DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" + DEBIAN_CODENAME="$DISTRO_CODENAME" + fi __PY_VERSION_REPO="apt" if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then @@ -3674,50 +3391,6 @@ __install_saltstack_debian_repository() { __wait_for_apt apt-get update || return 1 } -__install_saltstack_debian_onedir_repository() { - DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" - DEBIAN_CODENAME="$DISTRO_CODENAME" - - __PY_VERSION_REPO="apt" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PY_VERSION_REPO="py3" - fi - - # Install downloader backend for GPG keys fetching - __PACKAGES='wget' - - # Required as it is not installed by default on Debian 9+ - if [ "$DISTRO_MAJOR_VERSION" -ge 9 ]; then - __PACKAGES="${__PACKAGES} gnupg2" - fi - - # Make sure https transport is available - if [ "$HTTP_VAL" = "https" ] ; then - __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" - fi - - # shellcheck disable=SC2086,SC2090 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location - SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/${ONEDIR_REV}/" - if [ "${ONEDIR_REV}" = "nightly" ] ; then - SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/" - fi - echo "$__REPO_ARCH_DEB $SALTSTACK_DEBIAN_URL $DEBIAN_CODENAME main" > "/etc/apt/sources.list.d/salt.list" - - if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ]; then - __apt_key_fetch "${SALTSTACK_DEBIAN_URL}salt-archive-keyring.gpg" || return 1 - elif [ "$(echo "${ONEDIR_REV}" | grep -E '(latest|nightly)')" != "" ]; then - __apt_key_fetch "${SALTSTACK_DEBIAN_URL}salt-archive-keyring.gpg" || \ - __apt_key_fetch "${SALTSTACK_DEBIAN_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 - else - __apt_key_fetch "${SALTSTACK_DEBIAN_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 - fi - - __wait_for_apt apt-get update || return 1 -} - install_debian_deps() { if [ $_START_DAEMONS -eq $BS_FALSE ]; then echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." @@ -3771,59 +3444,6 @@ install_debian_deps() { return 0 } -install_debian_onedir_deps() { - if [ $_START_DAEMONS -eq $BS_FALSE ]; then - echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." - fi - - # No user interaction, libc6 restart services for example - export DEBIAN_FRONTEND=noninteractive - - __wait_for_apt apt-get update || return 1 - - if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then - # Try to update GPG keys first if allowed - if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then - if [ "$DISTRO_MAJOR_VERSION" -ge 10 ]; then - __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 - else - __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && - apt-key update && apt-get update || return 1 - fi - fi - - __apt_get_upgrade_noinput || return 1 - fi - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - PY_PKG_VER=3 - else - PY_PKG_VER="" - fi - - # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 - __PACKAGES='procps pciutils' - - # YAML module is used for generating custom master/minion configs - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then - __check_dpkg_architecture || return 1 - __install_saltstack_debian_onedir_repository || return 1 - fi - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 -} - install_debian_git_pre() { if ! __check_command_exists git; then __apt_get_install_noinput git || return 1 @@ -4072,15 +3692,7 @@ install_debian_git() { _POST_NEON_PIP_INSTALL_ARGS="" __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 - - # Account for new path for services files in later releases - if [ -d "pkg/common" ]; then - _SERVICE_DIR="pkg/common" - else - _SERVICE_DIR="pkg" - fi - - sed -i 's:/usr/bin:/usr/local/bin:g' ${_SERVICE_DIR}/*.service + sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service return 0 fi @@ -4108,28 +3720,6 @@ install_debian_9_git() { return 0 } -install_debian_onedir() { - __PACKAGES="" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-master" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-syndic" - fi - - # shellcheck disable=SC2086 - __apt_get_install_noinput ${__PACKAGES} || return 1 - - return 0 -} - install_debian_git_post() { for fname in api master minion syndic; do # Skip if not meant to be installed @@ -4139,23 +3729,16 @@ install_debian_git_post() { [ "$fname" = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ "$fname" = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - # Account for new path for services files in later releases - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" - else - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg" - fi - # Configure SystemD for Debian 8 "Jessie" and later if [ -f /bin/systemctl ]; then if [ ! -f /lib/systemd/system/salt-${fname}.service ] || \ { [ -f /lib/systemd/system/salt-${fname}.service ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]; }; then - if [ -f "${_SERVICE_DIR}/salt-${fname}.service" ]; then - __copyfile "${_SERVICE_DIR}/salt-${fname}.service" /lib/systemd/system - __copyfile "${_SERVICE_DIR}/salt-${fname}.environment" "/etc/default/salt-${fname}" + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" ]; then + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.environment" "/etc/default/salt-${fname}" else # workaround before adding Debian-specific unit files to the Salt main repo - __copyfile "${_SERVICE_DIR}/salt-${fname}.service" /lib/systemd/system + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system sed -i -e '/^Type/ s/notify/simple/' /lib/systemd/system/salt-${fname}.service fi fi @@ -4187,13 +3770,6 @@ install_debian_git_post() { done } -install_debian_2021_post() { - # Kali 2021 (debian derivative) disables all network services by default - # Using archlinux post function to enable salt systemd services - install_arch_linux_post || return 1 - return 0 -} - install_debian_restart_daemons() { [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 @@ -4250,41 +3826,6 @@ install_debian_check_services() { # Fedora Install Functions # -__install_saltstack_fedora_onedir_repository() { - if [ "$ITYPE" = "stable" ]; then - REPO_REV="$ONEDIR_REV" - else - REPO_REV="latest" - fi - - __PY_VERSION_REPO="yum" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PY_VERSION_REPO="py3" - fi - - GPG_KEY="SALT-PROJECT-GPG-PUBKEY-2023.pub" - - REPO_FILE="/etc/yum.repos.d/salt.repo" - - if [ ! -s "$REPO_FILE" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then - FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/fedora/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${ONEDIR_REV}" - if [ "${ONEDIR_REV}" = "nightly" ] ; then - FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/fedora/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/" - fi - - __fetch_url "${REPO_FILE}" "${FETCH_URL}.repo" - - __rpm_import_gpg "${FETCH_URL}/${GPG_KEY}" || return 1 - - yum clean metadata || return 1 - elif [ "$REPO_REV" != "latest" ]; then - echowarn "salt.repo already exists, ignoring salt version argument." - echowarn "Use -F (forced overwrite) to install $REPO_REV." - fi - - return 0 -} - install_fedora_deps() { if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then dnf -y update || return 1 @@ -4444,9 +3985,6 @@ install_fedora_git_deps() { done else __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" - if [ "${DISTRO_VERSION}" -ge 35 ]; then - __PACKAGES="${__PACKAGES} gcc-c++" - fi # shellcheck disable=SC2086 __dnf_install_noinput ${__PACKAGES} || return 1 fi @@ -4490,18 +4028,7 @@ install_fedora_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - # Account for new path for services files in later releases - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" - else - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm" - fi - __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" - - # Salt executables are located under `/usr/local/bin/` on Fedora 36+ - #if [ "${DISTRO_VERSION}" -ge 36 ]; then - # sed -i -e 's:/usr/bin/:/usr/local/bin/:g' /lib/systemd/system/salt-*.service - #fi + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -4549,83 +4076,6 @@ install_fedora_check_services() { return 0 } - -install_fedora_onedir_deps() { - - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - yum -y update || return 1 - fi - - if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - echowarn "Detected -r or -R option while installing Salt packages for Python 3." - echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." - echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." - fi - - if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then - __install_saltstack_fedora_onedir_repository || return 1 - fi - - # If -R was passed, we need to configure custom repo url with rsync-ed packages - # Which is still handled in __install_saltstack_rhel_repository. This call has - # its own check in case -r was passed without -R. - if [ "$_CUSTOM_REPO_URL" != "null" ]; then - __install_saltstack_fedora_onedir_repository || return 1 - fi - - if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then - __PACKAGES="dnf-utils chkconfig" - else - __PACKAGES="yum-utils chkconfig" - fi - - __PACKAGES="${__PACKAGES} procps" - - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 - -} - - -install_fedora_onedir() { - STABLE_REV=$ONEDIR_REV - #install_fedora_stable || return 1 - - __PACKAGES="" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-master" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-syndic" - fi - - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - - return 0 -} - -install_fedora_onedir_post() { - STABLE_REV=$ONEDIR_REV - install_fedora_stable_post || return 1 - - return 0 -} # # Ended Fedora Install Functions # @@ -4635,13 +4085,27 @@ install_fedora_onedir_post() { # # CentOS Install Functions # -__install_saltstack_rhel_repository() { - if [ "${DISTRO_MAJOR_VERSION}" -ge 9 ]; then - echoerror "Old stable repository unavailable on RH variants greater than or equal to 9" - echoerror "Use the stable install type." - exit 1 - fi +__install_epel_repository() { + if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_TRUE ]; then + return 0 + fi + # Check if epel repo is already enabled and flag it accordingly + if yum repolist | grep -q "^[!]\\?${_EPEL_REPO}/"; then + _EPEL_REPOS_INSTALLED=$BS_TRUE + return 0 + fi + + # Download latest 'epel-release' package for the distro version directly + epel_repo_url="${HTTP_VAL}://dl.fedoraproject.org/pub/epel/epel-release-latest-${DISTRO_MAJOR_VERSION}.noarch.rpm" + rpm -Uvh --force "$epel_repo_url" || return 1 + + _EPEL_REPOS_INSTALLED=$BS_TRUE + + return 0 +} + +__install_saltstack_rhel_repository() { if [ "$ITYPE" = "stable" ]; then repo_rev="$STABLE_REV" else @@ -4656,19 +4120,7 @@ __install_saltstack_rhel_repository() { # Avoid using '$releasever' variable for yum. # Instead, this should work correctly on all RHEL variants. base_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/${repo_rev}/" - if [ "${DISTRO_MAJOR_VERSION}" -eq 7 ]; then - gpg_key="SALTSTACK-GPG-KEY.pub base/RPM-GPG-KEY-CentOS-7" - elif [ "${DISTRO_MAJOR_VERSION}" -ge 9 ]; then - gpg_key="SALTSTACK-GPG-KEY2.pub" - else - gpg_key="SALTSTACK-GPG-KEY.pub" - fi - - gpg_key_urls="" - for key in $gpg_key; do - gpg_key_urls=$(printf "${base_url}${key},%s" "$gpg_key_urls") - done - + gpg_key="SALTSTACK-GPG-KEY.pub" repo_file="/etc/yum.repos.d/salt.repo" if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then @@ -4678,80 +4130,13 @@ name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever baseurl=${base_url} skip_if_unavailable=True gpgcheck=1 -gpgkey=${gpg_key_urls} +gpgkey=${base_url}${gpg_key} enabled=1 enabled_metadata=1 _eof fetch_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${repo_rev}/" - for key in $gpg_key; do - __rpm_import_gpg "${fetch_url}${key}" || return 1 - done - - yum clean metadata || return 1 - elif [ "$repo_rev" != "latest" ]; then - echowarn "salt.repo already exists, ignoring salt version argument." - echowarn "Use -F (forced overwrite) to install $repo_rev." - fi - - return 0 -} - -__install_saltstack_rhel_onedir_repository() { - if [ "$ITYPE" = "stable" ]; then - repo_rev="$ONEDIR_REV" - else - repo_rev="latest" - fi - - __PY_VERSION_REPO="yum" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PY_VERSION_REPO="py3" - fi - - # Avoid using '$releasever' variable for yum. - # Instead, this should work correctly on all RHEL variants. - base_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/${ONEDIR_REV}/" - if [ "${ONEDIR_REV}" = "nightly" ] ; then - base_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/" - fi - if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ] || [ "${ONEDIR_REV}" = "nightly" ]; then - if [ "${DISTRO_MAJOR_VERSION}" -eq 9 ]; then - gpg_key="SALTSTACK-GPG-KEY2.pub" - else - gpg_key="SALTSTACK-GPG-KEY.pub" - fi - else - gpg_key="SALT-PROJECT-GPG-PUBKEY-2023.pub" - fi - - gpg_key_urls="" - for key in $gpg_key; do - gpg_key_urls=$(printf "${base_url}${key},%s" "$gpg_key_urls") - done - - repo_file="/etc/yum.repos.d/salt.repo" - - if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then - cat <<_eof > "$repo_file" -[saltstack] -name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever -baseurl=${base_url} -skip_if_unavailable=True -gpgcheck=1 -gpgkey=${gpg_key_urls} -enabled=1 -enabled_metadata=1 -_eof - - fetch_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${ONEDIR_REV}/" - if [ "${ONEDIR_REV}" = "nightly" ] ; then - fetch_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/" - fi - for key in $gpg_key; do - __rpm_import_gpg "${fetch_url}${key}" || return 1 - done - + __rpm_import_gpg "${fetch_url}${gpg_key}" || return 1 yum clean metadata || return 1 elif [ "$repo_rev" != "latest" ]; then echowarn "salt.repo already exists, ignoring salt version argument." @@ -4773,6 +4158,7 @@ install_centos_stable_deps() { fi if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then + __install_epel_repository || return 1 __install_saltstack_rhel_repository || return 1 fi @@ -4793,29 +4179,27 @@ install_centos_stable_deps() { if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then # YAML module is used for generating custom master/minion configs if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python3-pyyaml python3-setuptools" + __PACKAGES="${__PACKAGES} python3-pyyaml" else __PACKAGES="${__PACKAGES} python2-pyyaml" fi elif [ "$DISTRO_MAJOR_VERSION" -eq 7 ]; then # YAML module is used for generating custom master/minion configs if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python36-PyYAML python36-setuptools" + __PACKAGES="${__PACKAGES} python36-PyYAML" else __PACKAGES="${__PACKAGES} PyYAML" fi else # YAML module is used for generating custom master/minion configs if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python34-PyYAML python34-setuptools" + __PACKAGES="${__PACKAGES} python34-PyYAML" else __PACKAGES="${__PACKAGES} PyYAML" fi fi fi - __PACKAGES="${__PACKAGES} procps" - # shellcheck disable=SC2086 __yum_install_noinput ${__PACKAGES} || return 1 @@ -4832,29 +4216,40 @@ install_centos_stable_deps() { install_centos_stable() { __PACKAGES="" + local cloud='salt-cloud' + local master='salt-master' + local minion='salt-minion' + local syndic='salt-syndic' + + if echo "$STABLE_REV" | grep -q "archive";then # point release being applied + local ver=$(echo "$STABLE_REV"|awk -F/ '{print $2}') # strip archive/ + elif echo "$STABLE_REV" | egrep -vq "archive|latest";then # latest or major version(3003, 3004, etc) being applie + local ver=$STABLE_REV + fi + + if [ ! -z $ver ]; then + cloud+="-$ver" + master+="-$ver" + minion+="-$ver" + syndic+="-$ver" + fi + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud" + __PACKAGES="${__PACKAGES} $cloud" fi if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-master" + __PACKAGES="${__PACKAGES} $master" fi if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion" + __PACKAGES="${__PACKAGES} $minion" fi if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-syndic" + __PACKAGES="${__PACKAGES} $syndic" fi # shellcheck disable=SC2086 __yum_install_noinput ${__PACKAGES} || return 1 - # Workaround for 3.11 broken on CentOS Stream 8.x - # Re-install Python 3.6 - _py_version=$(${_PY_EXE} -c "import sys; print('{0}.{1}'.format(*sys.version_info))") - if [ "$DISTRO_MAJOR_VERSION" -eq 8 ] && [ "${_py_version}" = "3.11" ]; then - __yum_install_noinput python3 - fi - return 0 } @@ -4890,14 +4285,7 @@ install_centos_stable_post() { } install_centos_git_deps() { - # First try stable deps then fall back to onedir deps if that one fails - # if we're installing on a Red Hat based host that doesn't have the classic - # package repos available. - # Set ONEDIR_REV to STABLE_REV in case we - # end up calling install_centos_onedir_deps - ONEDIR_REV=${STABLE_REV} - install_centos_onedir_deps || \ - return 1 + install_centos_stable_deps || return 1 if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then __yum_install_noinput ca-certificates || return 1 @@ -5057,16 +4445,10 @@ install_centos_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - # Account for new path for services files in later releases - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then - _SERVICE_FILE="${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" - else - _SERVICE_FILE="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" - fi if [ -f /bin/systemctl ]; then if [ ! -f "/usr/lib/systemd/system/salt-${fname}.service" ] || \ { [ -f "/usr/lib/systemd/system/salt-${fname}.service" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then - __copyfile "${_SERVICE_FILE}" /usr/lib/systemd/system + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" /usr/lib/systemd/system fi SYSTEMD_RELOAD=$BS_TRUE @@ -5086,117 +4468,6 @@ install_centos_git_post() { return 0 } -install_centos_onedir_deps() { - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - yum -y update || return 1 - fi - - if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - echowarn "Detected -r or -R option while installing Salt packages for Python 3." - echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." - echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." - fi - - if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then - __install_saltstack_rhel_onedir_repository || return 1 - fi - - # If -R was passed, we need to configure custom repo url with rsync-ed packages - # Which is still handled in __install_saltstack_rhel_repository. This call has - # its own check in case -r was passed without -R. - if [ "$_CUSTOM_REPO_URL" != "null" ]; then - __install_saltstack_rhel_onedir_repository || return 1 - fi - - if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then - __PACKAGES="dnf-utils chkconfig" - else - __PACKAGES="yum-utils chkconfig" - fi - - __PACKAGES="${__PACKAGES} procps" - - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi - - - return 0 -} - -# This function has been modified to allow for specific versions to be installed -# when not using the salt repo -install_centos_onedir() { - __PACKAGES="" - - local cloud='salt-cloud' - local master='salt-master' - local minion='salt-minion' - local syndic='salt-syndic' - local ver="$_ONEDIR_REV" - - if [ ! -z $ver ]; then - cloud+="-$ver" - master+="-$ver" - minion+="-$ver" - syndic+="-$ver" - fi - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $cloud" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $master" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} $minion" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $syndic" - fi - - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - - return 0 -} - -install_centos_onedir_post() { - SYSTEMD_RELOAD=$BS_FALSE - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f /bin/systemctl ]; then - /bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( - /bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && - /bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 - ) - - SYSTEMD_RELOAD=$BS_TRUE - elif [ -f "/etc/init.d/salt-${fname}" ]; then - /sbin/chkconfig salt-${fname} on - fi - done - - if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then - /bin/systemctl daemon-reload - fi - - return 0 -} - install_centos_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return @@ -5296,11 +4567,6 @@ install_red_hat_linux_git_deps() { return 0 } -install_red_hat_linux_onedir_deps() { - install_centos_onedir_deps || return 1 - return 0 -} - install_red_hat_enterprise_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -5311,11 +4577,6 @@ install_red_hat_enterprise_git_deps() { return 0 } -install_red_hat_enterprise_onedir_deps() { - install_red_hat_linux_onedir_deps || return 1 - return 0 -} - install_red_hat_enterprise_linux_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -5326,11 +4587,6 @@ install_red_hat_enterprise_linux_git_deps() { return 0 } -install_red_hat_enterprise_linux_onedir_deps() { - install_red_hat_linux_onedir_deps || return 1 - return 0 -} - install_red_hat_enterprise_server_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -5341,11 +4597,6 @@ install_red_hat_enterprise_server_git_deps() { return 0 } -install_red_hat_enterprise_server_onedir_deps() { - install_red_hat_linux_onedir_deps || return 1 - return 0 -} - install_red_hat_enterprise_workstation_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -5356,11 +4607,6 @@ install_red_hat_enterprise_workstation_git_deps() { return 0 } -install_red_hat_enterprise_workstation_onedir_deps() { - install_red_hat_linux_timat_deps || return 1 - return 0 -} - install_red_hat_linux_stable() { install_centos_stable || return 1 return 0 @@ -5371,11 +4617,6 @@ install_red_hat_linux_git() { return 0 } -install_red_hat_linux_onedir() { - install_centos_onedir || return 1 - return 0 -} - install_red_hat_enterprise_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -5386,11 +4627,6 @@ install_red_hat_enterprise_git() { return 0 } -install_red_hat_enterprise_onedir() { - install_red_hat_linux_onedir || return 1 - return 0 -} - install_red_hat_enterprise_linux_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -5401,11 +4637,6 @@ install_red_hat_enterprise_linux_git() { return 0 } -install_red_hat_enterprise_linux_onedir() { - install_red_hat_linux_onedir || return 1 - return 0 -} - install_red_hat_enterprise_server_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -5416,11 +4647,6 @@ install_red_hat_enterprise_server_git() { return 0 } -install_red_hat_enterprise_server_onedir() { - install_red_hat_linux_onedir || return 1 - return 0 -} - install_red_hat_enterprise_workstation_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -5431,11 +4657,6 @@ install_red_hat_enterprise_workstation_git() { return 0 } -install_red_hat_enterprise_workstation_onedir() { - install_red_hat_linux_onedir || return 1 - return 0 -} - install_red_hat_linux_stable_post() { install_centos_stable_post || return 1 return 0 @@ -5580,15 +4801,6 @@ install_red_hat_enterprise_workstation_testing_post() { # Oracle Linux Install Functions # install_oracle_linux_stable_deps() { - # Install Oracle's EPEL. - if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_FALSE ]; then - _EPEL_REPO=oracle-epel-release-el${DISTRO_MAJOR_VERSION} - if ! rpm -q "${_EPEL_REPO}" > /dev/null; then - __yum_install_noinput "${_EPEL_REPO}" - fi - _EPEL_REPOS_INSTALLED=$BS_TRUE - fi - install_centos_stable_deps || return 1 return 0 } @@ -5598,11 +4810,6 @@ install_oracle_linux_git_deps() { return 0 } -install_oracle_linux_onedir_deps() { - install_centos_onedir_deps || return 1 - return 0 -} - install_oracle_linux_testing_deps() { install_centos_testing_deps || return 1 return 0 @@ -5618,11 +4825,6 @@ install_oracle_linux_git() { return 0 } -install_oracle_linux_onedir() { - install_centos_onedir || return 1 - return 0 -} - install_oracle_linux_testing() { install_centos_testing || return 1 return 0 @@ -5638,11 +4840,6 @@ install_oracle_linux_git_post() { return 0 } -install_oracle_linux_onedir_post() { - install_centos_onedir_post || return 1 - return 0 -} - install_oracle_linux_testing_post() { install_centos_testing_post || return 1 return 0 @@ -5662,162 +4859,6 @@ install_oracle_linux_check_services() { # ####################################################################################################################### -####################################################################################################################### -# -# AlmaLinux Install Functions -# -install_almalinux_stable_deps() { - install_centos_stable_deps || return 1 - return 0 -} - -install_almalinux_git_deps() { - install_centos_git_deps || return 1 - return 0 -} - -install_almalinux_onedir_deps() { - install_centos_onedir_deps || return 1 - return 0 -} - -install_almalinux_testing_deps() { - install_centos_testing_deps || return 1 - return 0 -} - -install_almalinux_stable() { - install_centos_stable || return 1 - return 0 -} - -install_almalinux_git() { - install_centos_git || return 1 - return 0 -} - -install_almalinux_onedir() { - install_centos_onedir || return 1 - return 0 -} - -install_almalinux_testing() { - install_centos_testing || return 1 - return 0 -} - -install_almalinux_stable_post() { - install_centos_stable_post || return 1 - return 0 -} - -install_almalinux_git_post() { - install_centos_git_post || return 1 - return 0 -} - -install_almalinux_onedir_post() { - install_centos_onedir_post || return 1 - return 0 -} - -install_almalinux_testing_post() { - install_centos_testing_post || return 1 - return 0 -} - -install_almalinux_restart_daemons() { - install_centos_restart_daemons || return 1 - return 0 -} - -install_almalinux_check_services() { - install_centos_check_services || return 1 - return 0 -} -# -# Ended AlmaLinux Install Functions -# -####################################################################################################################### - -####################################################################################################################### -# -# Rocky Linux Install Functions -# -install_rocky_linux_stable_deps() { - install_centos_stable_deps || return 1 - return 0 -} - -install_rocky_linux_git_deps() { - install_centos_git_deps || return 1 - return 0 -} - -install_rocky_linux_onedir_deps() { - install_centos_onedir_deps || return 1 - return 0 -} - -install_rocky_linux_testing_deps() { - install_centos_testing_deps || return 1 - return 0 -} - -install_rocky_linux_stable() { - install_centos_stable || return 1 - return 0 -} - -install_rocky_linux_onedir() { - install_centos_onedir || return 1 - return 0 -} - -install_rocky_linux_git() { - install_centos_git || return 1 - return 0 -} - -install_rocky_linux_testing() { - install_centos_testing || return 1 - return 0 -} - -install_rocky_linux_stable_post() { - install_centos_stable_post || return 1 - return 0 -} - -install_rocky_linux_git_post() { - install_centos_git_post || return 1 - return 0 -} - -install_rocky_linux_onedir_post() { - install_centos_onedir_post || return 1 - return 0 -} - -install_rocky_linux_testing_post() { - install_centos_testing_post || return 1 - return 0 -} - -install_rocky_linux_restart_daemons() { - install_centos_restart_daemons || return 1 - return 0 -} - -install_rocky_linux_check_services() { - install_centos_check_services || return 1 - return 0 -} -# -# Ended Rocky Linux Install Functions -# -####################################################################################################################### - ####################################################################################################################### # # Scientific Linux Install Functions @@ -5832,11 +4873,6 @@ install_scientific_linux_git_deps() { return 0 } -install_scientific_linux_onedir_deps() { - install_centos_onedir_deps || return 1 - return 0 -} - install_scientific_linux_testing_deps() { install_centos_testing_deps || return 1 return 0 @@ -5852,11 +4888,6 @@ install_scientific_linux_git() { return 0 } -install_scientific_linux_onedir() { - install_centos_onedir || return 1 - return 0 -} - install_scientific_linux_testing() { install_centos_testing || return 1 return 0 @@ -5872,11 +4903,6 @@ install_scientific_linux_git_post() { return 0 } -install_scientific_linux_onedir_post() { - install_centos_onedir_post || return 1 - return 0 -} - install_scientific_linux_testing_post() { install_centos_testing_post || return 1 return 0 @@ -5910,11 +4936,6 @@ install_cloud_linux_git_deps() { return 0 } -install_cloud_linux_onedir_deps() { - install_centos_onedir_deps || return 1 - return 0 -} - install_cloud_linux_testing_deps() { install_centos_testing_deps || return 1 return 0 @@ -6008,8 +5029,8 @@ install_alpine_linux_git_deps() { fi fi else - apk -U add python3 python3-dev py3-pip py3-setuptools g++ linux-headers zeromq-dev openrc || return 1 - _PY_EXE=python3 + apk -U add python2 py2-pip py2-setuptools || return 1 + _PY_EXE=python2 return 0 fi @@ -6479,100 +5500,6 @@ _eof fi } -install_amazon_linux_ami_2_onedir_deps() { - # Shim to figure out if we're using old (rhel) or new (aws) rpms. - _USEAWS=$BS_FALSE - pkg_append="python" - - if [ "$ITYPE" = "onedir" ]; then - repo_rev="$ONEDIR_REV" - else - repo_rev="latest" - fi - - if echo $repo_rev | grep -E -q '^archive'; then - year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) - else - year=$(echo "$repo_rev" | cut -c1-4) - fi - - # We need to install yum-utils before doing anything else when installing on - # Amazon Linux ECS-optimized images. See issue #974. - __yum_install_noinput yum-utils - - # Do upgrade early - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - yum -y update || return 1 - fi - - if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then - __REPO_FILENAME="salt.repo" - __PY_VERSION_REPO="yum" - PY_PKG_VER="" - repo_label="saltstack-repo" - repo_name="SaltStack repo for Amazon Linux 2" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __REPO_FILENAME="salt.repo" - __PY_VERSION_REPO="py3" - PY_PKG_VER=3 - repo_label="saltstack-py3-repo" - repo_name="SaltStack Python 3 repo for Amazon Linux 2" - fi - - base_url="$HTTP_VAL://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/amazon/2/\$basearch/$repo_rev/" - if [ "${ONEDIR_REV}" = "nightly" ] ; then - base_url="$HTTP_VAL://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/amazon/2/\$basearch/" - fi - - if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ] || [ "${ONEDIR_REV}" = "nightly" ]; then - gpg_key="${base_url}SALTSTACK-GPG-KEY.pub,${base_url}base/RPM-GPG-KEY-CentOS-7" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" - fi - else - gpg_key="${base_url}SALT-PROJECT-GPG-PUBKEY-2023.pub" - fi - - # This should prob be refactored to use __install_saltstack_rhel_repository() - # With args passed in to do the right thing. Reformatted to be more like the - # amazon linux yum file. - if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then - cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" -[$repo_label] -name=$repo_name -failovermethod=priority -priority=10 -gpgcheck=1 -gpgkey=$gpg_key -baseurl=$base_url -_eof - fi - - fi - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 - # which is already installed - if [ -n "${PY_PKG_VER}" ] && [ "${PY_PKG_VER}" -eq 3 ]; then - __PACKAGES="${pkg_append}${PY_PKG_VER}-m2crypto ${pkg_append}${PY_PKG_VER}-pyyaml" - else - __PACKAGES="m2crypto PyYAML ${pkg_append}-futures" - fi - - __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-crypto ${pkg_append}${PY_PKG_VER}-jinja2 procps-ng" - __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-msgpack ${pkg_append}${PY_PKG_VER}-requests ${pkg_append}${PY_PKG_VER}-zmq" - - # shellcheck disable=SC2086 - __yum_install_noinput ${__PACKAGES} || return 1 - fi - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi -} - install_amazon_linux_ami_stable() { install_centos_stable || return 1 return 0 @@ -6648,16 +5575,6 @@ install_amazon_linux_ami_2_check_services() { return 0 } -install_amazon_linux_ami_2_onedir() { - install_centos_stable || return 1 - return 0 -} - -install_amazon_linux_ami_2_onedir_post() { - install_centos_stable_post || return 1 - return 0 -} - # # Ended Amazon Linux AMI Install Functions # @@ -6749,10 +5666,6 @@ install_arch_linux_git_deps() { return 0 } -install_arch_linux_onedir_deps() { - install_arch_linux_stable_deps || return 1 -} - install_arch_linux_stable() { # Pacman does not resolve dependencies on outdated versions # They always need to be updated @@ -6771,8 +5684,6 @@ install_arch_linux_stable() { install_arch_linux_git() { - _POST_NEON_PIP_INSTALL_ARGS="${_POST_NEON_PIP_INSTALL_ARGS} --use-pep517" - _PIP_DOWNLOAD_ARGS="${_PIP_DOWNLOAD_ARGS} --use-pep517" if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 return 0 @@ -6830,15 +5741,8 @@ install_arch_linux_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - # Account for new path for services files in later releases - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" - else - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm" - fi - if [ -f /usr/bin/systemctl ]; then - __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -6905,344 +5809,11 @@ install_arch_check_services() { return 0 } - -install_arch_linux_onedir() { - install_arch_linux_stable || return 1 - - return 0 -} - -install_arch_linux_onedir_post() { - install_arch_linux_post || return 1 - - return 0 -} # # Ended Arch Install Functions # ####################################################################################################################### -####################################################################################################################### -# -# Photon OS Install Functions -# - -__install_saltstack_photon_onedir_repository() { - if [ "$ITYPE" = "stable" ]; then - REPO_REV="$ONEDIR_REV" - else - REPO_REV="latest" - fi - - __PY_VERSION_REPO="yum" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PY_VERSION_REPO="py3" - fi - - REPO_FILE="/etc/yum.repos.d/salt.repo" - - if [ ! -s "$REPO_FILE" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then - FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/photon/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${ONEDIR_REV}" - if [ "${ONEDIR_REV}" = "nightly" ] ; then - FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/photon/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/" - fi - - __fetch_url "${REPO_FILE}" "${FETCH_URL}.repo" - - GPG_KEY="SALT-PROJECT-GPG-PUBKEY-2023.pub" - - __rpm_import_gpg "${FETCH_URL}/${GPG_KEY}" || return 1 - - tdnf makecache || return 1 - elif [ "$REPO_REV" != "latest" ]; then - echowarn "salt.repo already exists, ignoring salt version argument." - echowarn "Use -F (forced overwrite) to install $REPO_REV." - fi - - return 0 -} - -install_photon_deps() { - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - tdnf -y update || return 1 - fi - - __PACKAGES="${__PACKAGES:=}" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -lt 3 ]; then - echoerror "There are no Python 2 stable packages for Fedora, only Py3 packages" - return 1 - fi - - PY_PKG_VER=3 - - __PACKAGES="${__PACKAGES} libyaml procps-ng python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-zmq" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip python${PY_PKG_VER}-m2crypto python${PY_PKG_VER}-pyyaml" - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-systemd" - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - fi - - # shellcheck disable=SC2086 - __tdnf_install_noinput ${__PACKAGES} ${_EXTRA_PACKAGES} || return 1 - - return 0 -} - -install_photon_stable_post() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) - sleep 1 - systemctl daemon-reload - done -} - -install_photon_git_deps() { - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - # Packages are named python3- - PY_PKG_VER=3 - else - PY_PKG_VER=2 - fi - - __PACKAGES="" - if ! __check_command_exists ps; then - __PACKAGES="${__PACKAGES} procps-ng" - fi - if ! __check_command_exists git; then - __PACKAGES="${__PACKAGES} git" - fi - - if [ -n "${__PACKAGES}" ]; then - # shellcheck disable=SC2086 - __tdnf_install_noinput ${__PACKAGES} || return 1 - __PACKAGES="" - fi - - __git_clone_and_checkout || return 1 - - if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - - if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then - __PACKAGES="${__PACKAGES} ca-certificates" - fi - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr" - fi - - install_photon_deps || return 1 - - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - if __check_command_exists python3; then - __python="python3" - fi - elif [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then - if __check_command_exists python2; then - __python="python2" - fi - else - if ! __check_command_exists python; then - echoerror "Unable to find a python binary?!" - return 1 - fi - # Let's hope it's the right one - __python="python" - fi - - grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" | while IFS=' - ' read -r dep; do - echodebug "Running '${__python}' -m pip install '${dep}'" - "${__python}" -m pip install "${dep}" || return 1 - done - else - __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc glibc-devel linux-devel.x86_64" - # shellcheck disable=SC2086 - __tdnf_install_noinput ${__PACKAGES} || return 1 - fi - - if [ "${DISTRO_MAJOR_VERSION}" -gt 3 ]; then - # Need newer version of setuptools on Photon - _setuptools_dep="setuptools>=${_MINIMUM_SETUPTOOLS_VERSION}" - echodebug "Running '${_PY_EXE} -m pip --upgrade install ${_setuptools_dep}'" - ${_PY_EXE} -m pip install --upgrade "${_setuptools_dep}" - fi - - # Let's trigger config_salt() - if [ "$_TEMP_CONFIG_DIR" = "null" ]; then - _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" - CONFIG_SALT_FUNC="config_salt" - fi - - return 0 -} - -install_photon_git() { - if [ "${_PY_EXE}" != "" ]; then - _PYEXE=${_PY_EXE} - echoinfo "Using the following python version: ${_PY_EXE} to install salt" - else - _PYEXE='python2' - fi - - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then - ${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 - else - ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 - fi - return 0 -} - -install_photon_git_post() { - for fname in api master minion syndic; do - # Skip if not meant to be installed - [ $fname = "api" ] && \ - ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - # Account for new path for services files in later releases - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" - else - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm" - fi - __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" - - # Salt executables are located under `/usr/local/bin/` on Fedora 36+ - #if [ "${DISTRO_VERSION}" -ge 36 ]; then - # sed -i -e 's:/usr/bin/:/usr/local/bin/:g' /lib/systemd/system/salt-*.service - #fi - - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) - sleep 1 - systemctl daemon-reload - done -} - -install_photon_restart_daemons() { - [ $_START_DAEMONS -eq $BS_FALSE ] && return - - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - systemctl stop salt-$fname > /dev/null 2>&1 - systemctl start salt-$fname.service && continue - echodebug "Failed to start salt-$fname using systemd" - if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then - systemctl status salt-$fname.service - journalctl -xe - fi - done -} - -install_photon_check_services() { - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - __check_services_systemd salt-$fname || return 1 - done - - return 0 -} - -install_photon_onedir_deps() { - - if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then - tdnf -y update || return 1 - fi - - if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - echowarn "Detected -r or -R option while installing Salt packages for Python 3." - echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." - echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." - fi - - if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then - __install_saltstack_photon_onedir_repository || return 1 - fi - - # If -R was passed, we need to configure custom repo url with rsync-ed packages - # Which is still handled in __install_saltstack_rhel_repository. This call has - # its own check in case -r was passed without -R. - if [ "$_CUSTOM_REPO_URL" != "null" ]; then - __install_saltstack_photon_onedir_repository || return 1 - fi - - __PACKAGES="procps-ng" - - # shellcheck disable=SC2086 - __tdnf_install_noinput ${__PACKAGES} || return 1 - - if [ "${_EXTRA_PACKAGES}" != "" ]; then - echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" - # shellcheck disable=SC2086 - __tdnf_install_noinput ${_EXTRA_PACKAGES} || return 1 - fi - - return 0 - -} - - -install_photon_onedir() { - STABLE_REV=$ONEDIR_REV - - __PACKAGES="" - - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-cloud" - fi - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-master" - fi - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} salt-minion" - fi - if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} salt-syndic" - fi - - # shellcheck disable=SC2086 - __tdnf_install_noinput ${__PACKAGES} || return 1 - - return 0 -} - -install_photon_onedir_post() { - STABLE_REV=$ONEDIR_REV - install_photon_stable_post || return 1 - - return 0 -} -# -# Ended Fedora Install Functions -# -####################################################################################################################### - ####################################################################################################################### # # FreeBSD Install Functions @@ -7270,15 +5841,15 @@ install_freebsd_git_deps() { if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - SALT_DEPENDENCIES=$(/usr/local/sbin/pkg rquery %dn py39-salt) + SALT_DEPENDENCIES=$(/usr/local/sbin/pkg rquery %dn py38-salt) # shellcheck disable=SC2086 /usr/local/sbin/pkg install -y ${SALT_DEPENDENCIES} python || return 1 - /usr/local/sbin/pkg install -y py39-requests || return 1 - /usr/local/sbin/pkg install -y py39-tornado4 || return 1 + /usr/local/sbin/pkg install -y py38-requests || return 1 + /usr/local/sbin/pkg install -y py38-tornado4 || return 1 else - /usr/local/sbin/pkg install -y python py39-pip py39-setuptools libzmq4 libunwind || return 1 + /usr/local/sbin/pkg install -y python py38-pip py38-setuptools libzmq4 libunwind || return 1 fi echodebug "Adapting paths to FreeBSD" @@ -7324,7 +5895,7 @@ install_freebsd_stable() { # installing latest version of salt from FreeBSD CURRENT ports repo # # shellcheck disable=SC2086 - /usr/local/sbin/pkg install -y py39-salt || return 1 + /usr/local/sbin/pkg install -y py38-salt || return 1 return 0 } @@ -7416,15 +5987,6 @@ install_freebsd_restart_daemons() { service salt_$fname start done } - -install_freebsd_onedir() { -# -# call install_freebsd_stable -# - install_freebsd_stable || return 1 - - return 0 -} # # Ended FreeBSD Install Functions # @@ -7459,7 +6021,7 @@ install_openbsd_git_deps() { __git_clone_and_checkout || return 1 if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - pkg_add -I -v py3-pip py3-setuptools + pkg_add -I -v py-pip py-setuptools fi # @@ -7543,14 +6105,6 @@ install_openbsd_restart_daemons() { return 0 } -install_openbsd_onedir() { -# -# Call install_openbsd_stable -# - install_openbsd_stable || return 1 - - return 0 -} # # Ended OpenBSD Install Functions # @@ -7751,14 +6305,6 @@ install_smartos_restart_daemons() { return 0 } -install_smartos_onedir() { -# -# call install_smartos_stable -# - install_smartos_stable || return 1 - - return 0 -} # # Ended SmartOS Install Functions # @@ -7775,16 +6321,19 @@ __set_suse_pkg_repo() { # Set distro repo variable if [ "${DISTRO_MAJOR_VERSION}" -gt 2015 ]; then DISTRO_REPO="openSUSE_Tumbleweed" - elif [ "${DISTRO_MAJOR_VERSION}" -eq 15 ] && [ "${DISTRO_MINOR_VERSION}" -ge 4 ]; then - DISTRO_REPO="${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" elif [ "${DISTRO_MAJOR_VERSION}" -ge 42 ] || [ "${DISTRO_MAJOR_VERSION}" -eq 15 ]; then DISTRO_REPO="openSUSE_Leap_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" else DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}_SP${SUSE_PATCHLEVEL}" fi - suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack" - suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" + if [ "$_DOWNSTREAM_PKG_REPO" -eq $BS_TRUE ]; then + suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack" + suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" + else + suse_pkg_url_base="${HTTP_VAL}://repo.saltproject.io/opensuse" + suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack:products.repo" + fi SUSE_PKG_URL="$suse_pkg_url_base/$suse_pkg_url_path" } @@ -7804,7 +6353,7 @@ __version_lte() { zypper --non-interactive install --auto-agree-with-licenses python || return 1 fi - if [ "$(${_PY_EXE} -c 'import sys; V1=tuple([int(i) for i in sys.argv[1].split(".")]); V2=tuple([int(i) for i in sys.argv[2].split(".")]); print(V1<=V2)' "$1" "$2")" = "True" ]; then + if [ "$(python -c 'import sys; V1=tuple([int(i) for i in sys.argv[1].split(".")]); V2=tuple([int(i) for i in sys.argv[2].split(".")]); print V1<=V2' "$1" "$2")" = "True" ]; then __ZYPPER_REQUIRES_REPLACE_FILES=${BS_TRUE} else __ZYPPER_REQUIRES_REPLACE_FILES=${BS_FALSE} @@ -7921,7 +6470,7 @@ install_opensuse_git_deps() { fi # Check for Tumbleweed elif [ "${DISTRO_MAJOR_VERSION}" -ge 20210101 ]; then - __PACKAGES="python3-pip gcc-c++ python3-pyzmq-devel" + __PACKAGES="python3-pip" else __PACKAGES="python-pip python-setuptools gcc" fi @@ -7938,10 +6487,6 @@ install_opensuse_git_deps() { return 0 } -install_opensuse_onedir_deps() { - install_opensuse_stable_deps || return 1 -} - install_opensuse_stable() { __PACKAGES="" @@ -7974,10 +6519,6 @@ install_opensuse_git() { return 0 } -install_opensuse_onedir() { - install_opensuse_stable || return 1 -} - install_opensuse_stable_post() { for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -8022,17 +6563,10 @@ install_opensuse_git_post() { use_usr_lib=$BS_TRUE fi - # Account for new path for services files in later releases - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" - else - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/" - fi - if [ "${use_usr_lib}" -eq $BS_TRUE ]; then - __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/usr/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/usr/lib/systemd/system/salt-${fname}.service" else - __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" fi continue @@ -8047,10 +6581,6 @@ install_opensuse_git_post() { return 0 } -install_opensuse_onedir_post() { - install_opensuse_stable_post || return 1 -} - install_opensuse_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return @@ -8210,11 +6740,6 @@ install_opensuse_15_git() { return 0 } -install_opensuse_15_onedir_deps() { - __opensuse_prep_install || return 1 - return 0 -} - # # End of openSUSE Leap 15 # @@ -8244,13 +6769,6 @@ install_suse_15_git_deps() { return 0 } -install_suse_15_onedir_deps() { - __opensuse_prep_install || return 1 - install_opensuse_15_onedir_deps || return 1 - - return 0 -} - install_suse_15_stable() { install_opensuse_stable || return 1 return 0 @@ -8261,11 +6779,6 @@ install_suse_15_git() { return 0 } -install_suse_15_onedir() { - install_opensuse_stable || return 1 - return 0 -} - install_suse_15_stable_post() { install_opensuse_stable_post || return 1 return 0 @@ -8276,11 +6789,6 @@ install_suse_15_git_post() { return 0 } -install_suse_15_onedir_post() { - install_opensuse_stable_post || return 1 - return 0 -} - install_suse_15_restart_daemons() { install_opensuse_restart_daemons || return 1 return 0 @@ -8363,11 +6871,6 @@ install_suse_12_git_deps() { return 0 } -install_suse_12_onedir_deps() { - install_suse_12_stable_deps || return 1 - return 0 -} - install_suse_12_stable() { install_opensuse_stable || return 1 return 0 @@ -8378,11 +6881,6 @@ install_suse_12_git() { return 0 } -install_suse_12_onedir() { - install_opensuse_stable || return 1 - return 0 -} - install_suse_12_stable_post() { install_opensuse_stable_post || return 1 return 0 @@ -8393,11 +6891,6 @@ install_suse_12_git_post() { return 0 } -install_suse_12_onedir_post() { - install_opensuse_stable_post || return 1 - return 0 -} - install_suse_12_restart_daemons() { install_opensuse_restart_daemons || return 1 return 0 @@ -8474,11 +6967,6 @@ install_suse_11_git_deps() { return 0 } -install_suse_11_onedir_deps() { - install_suse_11_stable_deps || return 1 - return 0 -} - install_suse_11_stable() { install_opensuse_stable || return 1 return 0 @@ -8489,11 +6977,6 @@ install_suse_11_git() { return 0 } -install_suse_11_onedir() { - install_opensuse_stable || return 1 - return 0 -} - install_suse_11_stable_post() { install_opensuse_stable_post || return 1 return 0 @@ -8504,11 +6987,6 @@ install_suse_11_git_post() { return 0 } -install_suse_11_onedir_post() { - install_opensuse_stable_post || return 1 - return 0 -} - install_suse_11_restart_daemons() { install_opensuse_restart_daemons || return 1 return 0 @@ -8608,6 +7086,11 @@ __gentoo_pre_dep() { mkdir /etc/portage fi + # Enable Python 3.6 target for pre Neon Salt release + if echo "${STABLE_REV}" | grep -q "2019" || [ "${ITYPE}" = "git" ] && [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + EXTRA_PYTHON_TARGET=python3_6 + fi + # Enable Python 3.7 target for Salt Neon using GIT if [ "${ITYPE}" = "git" ] && [ "${GIT_REV}" = "v3000" ]; then EXTRA_PYTHON_TARGET=python3_7 @@ -8703,9 +7186,6 @@ install_gentoo_git_deps() { __emerge ${GENTOO_GIT_PACKAGES} || return 1 fi - echoinfo "Running emerge -v1 setuptools" - __emerge -v1 setuptools || return 1 - __git_clone_and_checkout || return 1 __gentoo_post_dep || return 1 } @@ -8753,11 +7233,6 @@ install_gentoo_git() { return 0 } -install_gentoo_onedir() { - STABLE_REV=${ONEDIR_REV} - install_gentoo_stable || return 1 -} - install_gentoo_post() { for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -8793,15 +7268,8 @@ install_gentoo_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - # Account for new path for services files in later releases - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" - else - _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg" - fi - if __check_command_exists systemctl ; then - __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -8847,10 +7315,6 @@ _eof return 0 } -install_gentoo_onedir_post() { - install_gentoo_post || return 1 -} - install_gentoo_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return @@ -9002,46 +7466,7 @@ __macosx_get_packagesite() { fi PKG="salt-${STABLE_REV}-${__PY_VERSION_REPO}-${DARWIN_ARCH}.pkg" - SALTPKGCONFURL="https://${_REPO_URL}/osx/${PKG}" -} - -__parse_repo_json_python() { - - # Using latest, grab the right - # version from the repo.json - _JSON_VERSION=$(python - <<-EOF -import json, urllib.request -url = "https://repo.saltproject.io/salt/py3/macos/repo.json" -response = urllib.request.urlopen(url) -data = json.loads(response.read()) -version = data["${_ONEDIR_REV}"][list(data["${_ONEDIR_REV}"])[0]]['version'] -print(version) -EOF -) -echo "${_JSON_VERSION}" -} - -__macosx_get_packagesite_onedir() { - DARWIN_ARCH="x86_64" - - __PY_VERSION_REPO="py2" - if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PY_VERSION_REPO="py3" - fi - - if [ "$(echo "$_ONEDIR_REV" | grep -E '^(latest)$')" != "" ]; then - _PKG_VERSION=$(__parse_repo_json_python) - elif [ "$(echo "$_ONEDIR_REV" | grep -E '^([3-9][0-9]{3}(\.[0-9]*))')" != "" ]; then - _PKG_VERSION=$_ONEDIR_REV - else - _PKG_VERSION=$(__parse_repo_json_python) - fi - if [ "$(echo "$_ONEDIR_REV" | grep -E '^(3005)')" != "" ]; then - PKG="salt-${_PKG_VERSION}-macos-${DARWIN_ARCH}.pkg" - else - PKG="salt-${_PKG_VERSION}-${__PY_VERSION_REPO}-${DARWIN_ARCH}.pkg" - fi - SALTPKGCONFURL="https://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/macos/${ONEDIR_REV}/${PKG}" + SALTPKGCONFURL="https://repo.saltproject.io/osx/${PKG}" } # Using a separate conf step to head for idempotent install... @@ -9050,21 +7475,11 @@ __configure_macosx_pkg_details() { return 0 } -__configure_macosx_pkg_details_onedir() { - __macosx_get_packagesite_onedir || return 1 - return 0 -} - install_macosx_stable_deps() { __configure_macosx_pkg_details || return 1 return 0 } -install_macosx_onedir_deps() { - __configure_macosx_pkg_details_onedir || return 1 - return 0 -} - install_macosx_git_deps() { install_macosx_stable_deps || return 1 @@ -9111,16 +7526,6 @@ install_macosx_stable() { return 0 } -install_macosx_onedir() { - install_macosx_onedir_deps || return 1 - - __fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1 - - /usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1 - - return 0 -} - install_macosx_git() { if [ -n "$_PY_EXE" ]; then @@ -9158,11 +7563,6 @@ install_macosx_stable_post() { return 0 } -install_macosx_onedir_post() { - install_macosx_stable_post || return 1 - return 0 -} - install_macosx_git_post() { install_macosx_stable_post || return 1 return 0 @@ -9171,15 +7571,8 @@ install_macosx_git_post() { install_macosx_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return - if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 - /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 - fi - - if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then - /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist || return 1 - /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.master.plist || return 1 - fi + /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 return 0 } @@ -9381,43 +7774,6 @@ preseed_master() { # ####################################################################################################################### -####################################################################################################################### -# -# This function checks if all of the installed daemons are running or not. -# -daemons_running_onedir() { - [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 - - FAILED_DAEMONS=0 - for fname in api master minion syndic; do - # Skip salt-api since the service should be opt-in and not necessarily started on boot - [ $fname = "api" ] && continue - - # Skip if not meant to be installed - [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue - [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue - [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - - if [ -f "/opt/saltstack/salt/run/run" ]; then - salt_path="/opt/saltstack/salt/run/run ${fname}" - else - salt_path="salt-${fname}" - fi - process_running=$(pgrep -f "${salt_path}") - if [ "${process_running}" = "" ]; then - echoerror "${salt_path} was not found running" - FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) - fi - done - - return $FAILED_DAEMONS -} - -# -# Ended daemons running check function -# -####################################################################################################################### - ####################################################################################################################### # # This function checks if all of the installed daemons are running or not. @@ -9518,7 +7874,6 @@ echodebug "PRESEED_MASTER_FUNC=${PRESEED_MASTER_FUNC}" INSTALL_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}" -echodebug "INSTALL_FUNC_NAMES=${INSTALL_FUNC_NAMES}" INSTALL_FUNC="null" for FUNC_NAME in $(__strip_duplicates "$INSTALL_FUNC_NAMES"); do @@ -9570,7 +7925,6 @@ DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}" DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}_${ITYPE}" DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}" -DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${ITYPE}" DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running" DAEMONS_RUNNING_FUNC="null" @@ -9760,11 +8114,6 @@ if [ "$DAEMONS_RUNNING_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; fi fi -if [ "$_AUTO_ACCEPT_MINION_KEYS" -eq "$BS_TRUE" ]; then - echoinfo "Accepting the Salt Minion Keys" - salt-key -yA -fi - # Done! if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then echoinfo "Salt installed!" @@ -9772,13 +8121,6 @@ else echoinfo "Salt configured!" fi -if [ "$_QUICK_START" -eq "$BS_TRUE" ]; then - echoinfo "Congratulations!" - echoinfo "A couple of commands to try:" - echoinfo " salt \* test.ping" - echoinfo " salt \* test.version" -fi - exit 0 # vim: set sts=4 ts=4 et diff --git a/setup/so-functions b/setup/so-functions index b64daaa92..1c9b0f43d 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1258,7 +1258,7 @@ generate_ssl() { # if the install type is a manager then we need to wait for the minion to be ready before trying # to run the ssl state since we need the minion to sign the certs if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then - wait_for_salt_minion "$MINION_ID" "5" "$setup_log" || fail_setup + wait_for_salt_minion fi info "Applying SSL state" logCmd "salt-call state.apply ssl -l info" @@ -1972,7 +1972,6 @@ securityonion_repo() { } repo_sync_local() { - SALTVERSION=$(egrep 'version: [0-9]{4}' ../salt/salt/master.defaults.yaml | sed 's/^.*version: //') info "Repo Sync" if [[ $is_supported ]]; then # Sync the repo from the the SO repo locally. @@ -2022,7 +2021,7 @@ repo_sync_local() { curl -fsSL https://repo.securityonion.net/file/so-repo/prod/2.4/so/so.repo | tee /etc/yum.repos.d/so.repo rpm --import https://repo.saltproject.io/salt/py3/redhat/9/x86_64/SALT-PROJECT-GPG-PUBKEY-2023.pub dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo - curl -fsSL "https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/$SALTVERSION.repo" | tee /etc/yum.repos.d/salt.repo + curl -fsSL https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/3006.1.repo | tee /etc/yum.repos.d/salt.repo dnf repolist curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install else From a887551dad8a9fed9a2b3f4d3b5e506d926a5e68 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 25 Oct 2023 15:22:47 -0400 Subject: [PATCH 271/417] Annotation changes for warm node --- salt/elasticsearch/soc_elasticsearch.yaml | 262 ++++++++++++++++------ 1 file changed, 197 insertions(+), 65 deletions(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index e4de29e00..f04ba08e0 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -33,7 +33,6 @@ elasticsearch: flood_stage: description: The max percentage of used disk space that will cause the node to take protective actions, such as blocking incoming events. helpLink: elasticsearch.html - script: max_compilations_rate: description: Max rate of script compilations permitted in the Elasticsearch cluster. Larger values will consume more resources. @@ -48,6 +47,11 @@ elasticsearch: helpLink: elasticsearch.html index_settings: global_overrides: + index_sorting: + description: Sorts the index by event time, at the cost of additional processing resource consumption. + global: True + advanced: True + helpLink: elasticsearch.html index_template: template: settings: @@ -57,32 +61,6 @@ elasticsearch: forcedType: int global: True helpLink: elasticsearch.html - so-logs: &indexSettings - index_sorting: - description: Sorts the index by event time, at the cost of additional processing resource consumption. - global: True - helpLink: elasticsearch.html - index_template: - index_patterns: - description: Patterns for matching multiple indices or tables. - forceType: "[]string" - multiline: True - global: True - helpLink: elasticsearch.html - template: - settings: - index: - number_of_replicas: - description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs. - forcedType: int - global: True - helpLink: elasticsearch.html - mapping: - total_fields: - limit: - description: Max number of fields that can exist on a single index. Larger values will consume more resources. - global: True - helpLink: elasticsearch.html refresh_interval: description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation. global: True @@ -100,44 +78,7 @@ elasticsearch: description: The order to sort by. Must set index_sorting to True. global: True helpLink: elasticsearch.html - mappings: - _meta: - package: - name: - description: Meta settings for the mapping. - global: True - helpLink: elasticsearch.html - managed_by: - description: Meta settings for the mapping. - global: True - helpLink: elasticsearch.html - managed: - description: Meta settings for the mapping. - forcedType: bool - global: True - helpLink: elasticsearch.html - composed_of: - description: The index template is composed of these component templates. - forcedType: "[]string" - global: True - helpLink: elasticsearch.html - priority: - description: The priority of the index template. - forcedType: int - global: True - helpLink: elasticsearch.html - data_stream: - hidden: - description: Hide the data stream. - forcedType: bool - global: True - helpLink: elasticsearch.html - allow_custom_routing: - description: Allow custom routing for the data stream. - forcedType: bool - global: True - helpLink: elasticsearch.html - policy: + policy: phases: hot: min_age: @@ -160,6 +101,27 @@ elasticsearch: description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. global: True helpLink: elasticsearch.html + warm: + min_age: + description: Minimum age of index. This determines when the index should be moved to the warm tier. + global: True + helpLink: elasticsearch.html + actions: + set_priority: + priority: + description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. + forcedType: int + global: True + helpLink: elasticsearch.html + rollover: + max_age: + description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index. + global: True + helpLink: elasticsearch.html + max_primary_shard_size: + description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. + global: True + helpLink: elasticsearch.html cold: min_age: description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. @@ -177,20 +139,190 @@ elasticsearch: description: Minimum age of index. This determines when the index should be deleted. global: True helpLink: elasticsearch.html + so-logs: &indexSettings + index_sorting: + description: Sorts the index by event time, at the cost of additional processing resource consumption. + global: True + advanced: True + helpLink: elasticsearch.html + index_template: + index_patterns: + description: Patterns for matching multiple indices or tables. + forceType: "[]string" + multiline: True + global: True + advanced: True + helpLink: elasticsearch.html + template: + settings: + index: + number_of_replicas: + description: Number of replicas required for this index. Multiple replicas protects against data loss, but also increases storage costs. + forcedType: int + global: True + advanced: True + helpLink: elasticsearch.html + mapping: + total_fields: + limit: + description: Max number of fields that can exist on a single index. Larger values will consume more resources. + global: True + advanced: True + helpLink: elasticsearch.html + refresh_interval: + description: Seconds between index refreshes. Shorter intervals can cause query performance to suffer since this is a synchronous and resource-intensive operation. + global: True + advanced: True + helpLink: elasticsearch.html + number_of_shards: + description: Number of shards required for this index. Using multiple shards increases fault tolerance, but also increases storage and network costs. + global: True + advanced: True + helpLink: elasticsearch.html + sort: + field: + description: The field to sort by. Must set index_sorting to True. + global: True + advanced: True + helpLink: elasticsearch.html + order: + description: The order to sort by. Must set index_sorting to True. + global: True + advanced: True + helpLink: elasticsearch.html + mappings: + _meta: + package: + name: + description: Meta settings for the mapping. + global: True + advanced: True + helpLink: elasticsearch.html + managed_by: + description: Meta settings for the mapping. + global: True + advanced: True + helpLink: elasticsearch.html + managed: + description: Meta settings for the mapping. + forcedType: bool + global: True + advanced: True + helpLink: elasticsearch.html + composed_of: + description: The index template is composed of these component templates. + forcedType: "[]string" + global: True + advanced: True + helpLink: elasticsearch.html + priority: + description: The priority of the index template. + forcedType: int + global: True + advanced: True + helpLink: elasticsearch.html + data_stream: + hidden: + description: Hide the data stream. + forcedType: bool + global: True + advanced: True + helpLink: elasticsearch.html + allow_custom_routing: + description: Allow custom routing for the data stream. + forcedType: bool + global: True + advanced: True + helpLink: elasticsearch.html + policy: + phases: + hot: + min_age: + description: Minimum age of index. This determines when the index should be moved to the hot tier. + global: True + advanced: True + helpLink: elasticsearch.html + actions: + set_priority: + priority: + description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. + forcedType: int + global: True + advanced: True + helpLink: elasticsearch.html + rollover: + max_age: + description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index. + global: True + advanced: True + helpLink: elasticsearch.html + max_primary_shard_size: + description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. + global: True + advanced: True + helpLink: elasticsearch.html + warm: + min_age: + description: Minimum age of index. This determines when the index should be moved to the hot tier. + global: True + advanced: True + helpLink: elasticsearch.html + actions: + set_priority: + priority: + description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. + forcedType: int + global: True + advanced: True + helpLink: elasticsearch.html + rollover: + max_age: + description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index. + global: True + advanced: True + helpLink: elasticsearch.html + max_primary_shard_size: + description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. + global: True + advanced: True + helpLink: elasticsearch.html + cold: + min_age: + description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + global: True + advanced: True + helpLink: elasticsearch.html + actions: + set_priority: + priority: + description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. + forcedType: int + global: True + advanced: True + helpLink: elasticsearch.html + delete: + min_age: + description: Minimum age of index. This determines when the index should be deleted. + global: True + advanced: True + helpLink: elasticsearch.html _meta: package: name: description: Meta settings for the mapping. global: True + advanced: True helpLink: elasticsearch.html managed_by: description: Meta settings for the mapping. global: True + advanced: True helpLink: elasticsearch.html managed: description: Meta settings for the mapping. forcedType: bool global: True + advanced: True helpLink: elasticsearch.html so-logs-system_x_auth: *indexSettings so-logs-system_x_syslog: *indexSettings From 6fb0c5dbfe5800c59a078a7aed80b0be1a8ed1c8 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 25 Oct 2023 15:37:36 -0400 Subject: [PATCH 272/417] Annotation changes for warm node --- salt/elasticsearch/soc_elasticsearch.yaml | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index f04ba08e0..764de3c44 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -477,3 +477,18 @@ elasticsearch: so-strelka: *indexSettings so-syslog: *indexSettings so-zeek: *indexSettings + so_roles: + so-maanger: &soroleSettings + node: + roles: + description: List of Elasticsearch roles that the node should have. Blank assumes all roles + forcedType: "[]string" + global: False + advanced: True + helpLink: elasticsearch.html + so-managersearch: *soroleSettings + so-standalone: *soroleSettings + so-searchnode: *soroleSettings + so-heavynode: *soroleSettings + so-eval: *soroleSettings + so-import: *soroleSettings \ No newline at end of file From 1ae8896a05b6b07c4bdaf664f72ecc56544c4c4f Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Wed, 25 Oct 2023 15:47:40 -0400 Subject: [PATCH 273/417] Update config.map.jinja --- salt/elasticsearch/config.map.jinja | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/salt/elasticsearch/config.map.jinja b/salt/elasticsearch/config.map.jinja index 37447cabb..c98d96cc0 100644 --- a/salt/elasticsearch/config.map.jinja +++ b/salt/elasticsearch/config.map.jinja @@ -20,20 +20,12 @@ {% for NODE in ES_LOGSTASH_NODES %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.discovery.seed_hosts.append(NODE.keys()|first) %} {% endfor %} - {% if grains.id.split('_') | last == 'manager' %} - {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master','data','remote_cluster_client','transform']}) %} - {% else %} - {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master', 'data_hot', 'remote_cluster_client']}) %} - {% endif %} {% endif %} {% elif grains.id.split('_') | last == 'searchnode' %} - {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['data_hot', 'ingest']}) %} {% if HIGHLANDER %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.roles.extend(['ml', 'master', 'transform']) %} {% endif %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.update({'discovery': {'seed_hosts': [GLOBALS.manager]}}) %} -{% elif grains.id.split('_') | last == 'heavynode' %} - {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.node.update({'roles': ['master', 'data', 'remote_cluster_client', 'ingest']}) %} {% endif %} {% if HIGHLANDER %} {% do ELASTICSEARCHDEFAULTS.elasticsearch.config.xpack.ml.update({'enabled': true}) %} @@ -53,3 +45,5 @@ {% endif %} {% endfor %} {% endif %} + +{% do ELASTICSEARCHMERGED.elasticsearch.config.node.update({'roles': ELASTICSEARCHMERGED.so_roles[GLOBALS.role].node.roles}) %} From af4b34801f958b9110e4de3837b01e3d8d341018 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Wed, 25 Oct 2023 15:48:27 -0400 Subject: [PATCH 274/417] Update defaults.yaml --- salt/elasticsearch/defaults.yaml | 38 ++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 1296ef549..5449df506 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -4920,3 +4920,41 @@ elasticsearch: data_stream: hidden: false allow_custom_routing: false + so_roles: + so-manager: + node: + roles: + - master + - data + - remote_cluster_client + - transform + so-managersearch: + node: + roles: + - master + - data_hot + - remote_cluster_client + so-standalone: + node: + roles: + - master + - data_hot + - remote_cluster_client + so-searchnode: + node: + roles: + - data_hot + - ingest + so-heavynode: + node: + roles: + - master + - data + - remote_cluster_client + - ingest + so-eval: + node: + roles: [] + so-import: + node: + roles: [] From dc53b49f15ed56cfa504193b1f22ee8adc1993c1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 25 Oct 2023 15:53:39 -0400 Subject: [PATCH 275/417] Update soup --- salt/manager/tools/sbin/soup | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index fc07765b8..0666e25ae 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -403,6 +403,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.4 ]] && up_to_2.4.5 [[ "$INSTALLEDVERSION" == 2.4.5 ]] && up_to_2.4.10 [[ "$INSTALLEDVERSION" == 2.4.10 ]] && up_to_2.4.20 + [[ "$INSTALLEDVERSION" == 2.4.20 ]] && up_to_2.4.30 true } @@ -414,7 +415,8 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.3 ]] && post_to_2.4.4 [[ "$POSTVERSION" == 2.4.4 ]] && post_to_2.4.5 [[ "$POSTVERSION" == 2.4.5 ]] && post_to_2.4.10 - [[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20 + [[ "$POSTVERSION" == 2.4.10 ]] && post_to_2.4.20 + [[ "$POSTVERSION" == 2.4.20 ]] && post_to_2.4.30 true } @@ -446,6 +448,11 @@ post_to_2.4.20() { POSTVERSION=2.4.20 } +post_to_2.4.30() { + echo "Nothing to apply" + POSTVERSION=2.4.30 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -523,6 +530,12 @@ up_to_2.4.20() { INSTALLEDVERSION=2.4.20 } +up_to_2.4.30() { + echo "Nothing to do for 2.4.30" + + INSTALLEDVERSION=2.4.30 +} + determine_elastic_agent_upgrade() { if [[ $is_airgap -eq 0 ]]; then update_elastic_agent_airgap From 19fdc9319b900e315427af8364fca73d15289ac1 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 25 Oct 2023 15:58:26 -0400 Subject: [PATCH 276/417] fix role update --- salt/elasticsearch/config.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/config.map.jinja b/salt/elasticsearch/config.map.jinja index c98d96cc0..cd0cd8974 100644 --- a/salt/elasticsearch/config.map.jinja +++ b/salt/elasticsearch/config.map.jinja @@ -46,4 +46,4 @@ {% endfor %} {% endif %} -{% do ELASTICSEARCHMERGED.elasticsearch.config.node.update({'roles': ELASTICSEARCHMERGED.so_roles[GLOBALS.role].node.roles}) %} +{% do ELASTICSEARCHMERGED.config.node.update({'roles': ELASTICSEARCHMERGED.so_roles[GLOBALS.role].node.roles}) %} From d1170cb69f1f197b638151ffef9561d50b3122f9 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 25 Oct 2023 16:05:20 -0400 Subject: [PATCH 277/417] Update soc_elasticsearch.yaml --- salt/elasticsearch/soc_elasticsearch.yaml | 23 +---------------------- 1 file changed, 1 insertion(+), 22 deletions(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 764de3c44..fde8d234f 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -101,27 +101,6 @@ elasticsearch: description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. global: True helpLink: elasticsearch.html - warm: - min_age: - description: Minimum age of index. This determines when the index should be moved to the warm tier. - global: True - helpLink: elasticsearch.html - actions: - set_priority: - priority: - description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. - forcedType: int - global: True - helpLink: elasticsearch.html - rollover: - max_age: - description: Maximum age of index. Once an index reaches this limit, it will be rolled over into a new index. - global: True - helpLink: elasticsearch.html - max_primary_shard_size: - description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. - global: True - helpLink: elasticsearch.html cold: min_age: description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. @@ -491,4 +470,4 @@ elasticsearch: so-searchnode: *soroleSettings so-heavynode: *soroleSettings so-eval: *soroleSettings - so-import: *soroleSettings \ No newline at end of file + so-import: *soroleSettings From 5f168a33edaa8a4c0801340d8f8eaf23d68279ff Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Wed, 25 Oct 2023 16:16:01 -0400 Subject: [PATCH 278/417] Update defaults.yaml --- salt/elasticsearch/defaults.yaml | 67 ++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 30 deletions(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 5449df506..807ca9ea9 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -4922,39 +4922,46 @@ elasticsearch: allow_custom_routing: false so_roles: so-manager: - node: - roles: - - master - - data - - remote_cluster_client - - transform + config: + node: + roles: + - master + - data + - remote_cluster_client + - transform so-managersearch: - node: - roles: - - master - - data_hot - - remote_cluster_client + config: + node: + roles: + - master + - data_hot + - remote_cluster_client so-standalone: - node: - roles: - - master - - data_hot - - remote_cluster_client + config: + node: + roles: + - master + - data_hot + - remote_cluster_client so-searchnode: - node: - roles: - - data_hot - - ingest + config: + node: + roles: + - data_hot + - ingest so-heavynode: - node: - roles: - - master - - data - - remote_cluster_client - - ingest + config: + node: + roles: + - master + - data + - remote_cluster_client + - ingest so-eval: - node: - roles: [] + config: + node: + roles: [] so-import: - node: - roles: [] + config: + node: + roles: [] From 807b40019fc098ea8c0c8d70355e1086dd3dbebc Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Wed, 25 Oct 2023 16:16:48 -0400 Subject: [PATCH 279/417] Update soc_elasticsearch.yaml --- salt/elasticsearch/soc_elasticsearch.yaml | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index fde8d234f..d456dcbfc 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -458,13 +458,14 @@ elasticsearch: so-zeek: *indexSettings so_roles: so-maanger: &soroleSettings - node: - roles: - description: List of Elasticsearch roles that the node should have. Blank assumes all roles - forcedType: "[]string" - global: False - advanced: True - helpLink: elasticsearch.html + config: + node: + roles: + description: List of Elasticsearch roles that the node should have. Blank assumes all roles + forcedType: "[]string" + global: False + advanced: True + helpLink: elasticsearch.html so-managersearch: *soroleSettings so-standalone: *soroleSettings so-searchnode: *soroleSettings From 39abe19cfd9f6ad0553ade33614ae2cee2829a8d Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Wed, 25 Oct 2023 16:17:06 -0400 Subject: [PATCH 280/417] Update config.map.jinja --- salt/elasticsearch/config.map.jinja | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/config.map.jinja b/salt/elasticsearch/config.map.jinja index cd0cd8974..4e57199af 100644 --- a/salt/elasticsearch/config.map.jinja +++ b/salt/elasticsearch/config.map.jinja @@ -46,4 +46,4 @@ {% endfor %} {% endif %} -{% do ELASTICSEARCHMERGED.config.node.update({'roles': ELASTICSEARCHMERGED.so_roles[GLOBALS.role].node.roles}) %} +{% do ELASTICSEARCHMERGED.config.node.update({'roles': ELASTICSEARCHMERGED.so_roles[GLOBALS.role].config.node.roles}) %} From 88fb7d06e673854a2c8bf22a6c2942493cec77a1 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 25 Oct 2023 16:20:28 -0400 Subject: [PATCH 281/417] Annotation changes for warm node --- salt/elasticsearch/soc_elasticsearch.yaml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index d456dcbfc..368f15196 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -47,11 +47,6 @@ elasticsearch: helpLink: elasticsearch.html index_settings: global_overrides: - index_sorting: - description: Sorts the index by event time, at the cost of additional processing resource consumption. - global: True - advanced: True - helpLink: elasticsearch.html index_template: template: settings: From 6d6292714f5053b76b5aff8e6bbea37341772d1e Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 25 Oct 2023 16:21:47 -0400 Subject: [PATCH 282/417] Annotation changes for warm node --- salt/elasticsearch/soc_elasticsearch.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 368f15196..46306203a 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -73,7 +73,6 @@ elasticsearch: description: The order to sort by. Must set index_sorting to True. global: True helpLink: elasticsearch.html - policy: phases: hot: min_age: From 01810a782cde1f49fcd86c153c81e99b5df14ce3 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 25 Oct 2023 16:46:30 -0400 Subject: [PATCH 283/417] Annotation changes for warm node --- salt/elasticsearch/soc_elasticsearch.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 46306203a..e3d257f11 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -451,7 +451,7 @@ elasticsearch: so-syslog: *indexSettings so-zeek: *indexSettings so_roles: - so-maanger: &soroleSettings + so-manger: &soroleSettings config: node: roles: From 891ea997e75fdbecc4a043ba2f3a41be51439cdc Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 26 Oct 2023 12:25:37 +0000 Subject: [PATCH 284/417] Add lifecycle policies and warm settings --- salt/elasticsearch/defaults.yaml | 13704 +++++++++++++++++++---------- 1 file changed, 8973 insertions(+), 4731 deletions(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 807ca9ea9..9aef09876 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1,55 +1,16 @@ elasticsearch: - enabled: False - retention: - retention_pct: 50 config: - node: {} + action: + destructive_requires_name: true cluster: routing: allocation: disk: threshold_enabled: true watermark: - low: 80% - high: 85% flood_stage: 90% - network: - host: 0.0.0.0 - path: - logs: /var/log/elasticsearch - action: - destructive_requires_name: true - transport: - bind_host: 0.0.0.0 - publish_port: 9300 - xpack: - ml: - enabled: false - security: - enabled: true - authc: - anonymous: - authz_exception: true - roles: [] - username: _anonymous - transport: - ssl: - enabled: true - verification_mode: none - key: /usr/share/elasticsearch/config/elasticsearch.key - certificate: /usr/share/elasticsearch/config/elasticsearch.crt - certificate_authorities: - - /usr/share/elasticsearch/config/ca.crt - http: - ssl: - enabled: true - client_authentication: none - key: /usr/share/elasticsearch/config/elasticsearch.key - certificate: /usr/share/elasticsearch/config/elasticsearch.crt - certificate_authorities: - - /usr/share/elasticsearch/config/ca.crt - script: - max_compilations_rate: 20000/1m + high: 85% + low: 80% indices: id_field_data: enabled: false @@ -57,3833 +18,8553 @@ elasticsearch: org: elasticsearch: deprecation: ERROR + network: + host: 0.0.0.0 + node: {} + path: + logs: /var/log/elasticsearch + script: + max_compilations_rate: 20000/1m + transport: + bind_host: 0.0.0.0 + publish_port: 9300 + xpack: + ml: + enabled: false + security: + authc: + anonymous: + authz_exception: true + roles: [] + username: _anonymous + enabled: true + http: + ssl: + certificate: /usr/share/elasticsearch/config/elasticsearch.crt + certificate_authorities: + - /usr/share/elasticsearch/config/ca.crt + client_authentication: none + enabled: true + key: /usr/share/elasticsearch/config/elasticsearch.key + transport: + ssl: + certificate: /usr/share/elasticsearch/config/elasticsearch.crt + certificate_authorities: + - /usr/share/elasticsearch/config/ca.crt + enabled: true + key: /usr/share/elasticsearch/config/elasticsearch.key + verification_mode: none + enabled: false index_settings: global_overrides: index_template: template: settings: index: + lifecycle: + name: global_overrides-logs number_of_replicas: default_placeholder - so-logs: - index_sorting: False - index_template: - index_patterns: - - "logs-*-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5001 - sort: - field: "@timestamp" - order: desc - mappings: - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - composed_of: - - "so-data-streams-mappings" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - - "so-logs-mappings" - - "so-logs-settings" - priority: 225 - data_stream: - hidden: false - allow_custom_routing: false policy: phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb cold: - min_age: 30d actions: set_priority: priority: 0 + min_age: 30d delete: - min_age: 365d actions: delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-system_x_auth: - index_sorting: False - index_template: - index_patterns: - - "logs-system.auth*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "event-mappings" - - "logs-system.auth@package" - - "logs-system.auth@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-system_x_syslog: - index_sorting: False - index_template: - index_patterns: - - "logs-system.syslog*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "event-mappings" - - "logs-system.syslog@package" - - "logs-system.syslog@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-system_x_system: - index_sorting: False - index_template: - index_patterns: - - "logs-system.system*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "event-mappings" - - "logs-system.system@package" - - "logs-system.system@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-system_x_application: - index_sorting: False - index_template: - index_patterns: - - "logs-system.application*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "event-mappings" - - "logs-system.application@package" - - "logs-system.application@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-system_x_security: - index_sorting: False - index_template: - index_patterns: - - "logs-system.security*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "event-mappings" - - "logs-system.security@package" - - "logs-system.security@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-windows_x_forwarded: - index_sorting: False - index_template: - index_patterns: - - "logs-windows.forwarded*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-windows.forwarded@package" - - "logs-windows.forwarded@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-windows_x_powershell: - index_sorting: False - index_template: - index_patterns: - - "logs-windows.powershell-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-windows.powershell@package" - - "logs-windows.powershell@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-windows_x_powershell_operational: - index_sorting: False - index_template: - index_patterns: - - "logs-windows.powershell_operational-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-windows.powershell_operational@package" - - "logs-windows.powershell_operational@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-windows_x_sysmon_operational: - index_sorting: False - index_template: - index_patterns: - - "logs-windows.sysmon_operational-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-windows.sysmon_operational@package" - - "logs-windows.sysmon_operational@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-apache_x_access: - index_sorting: False - index_template: - index_patterns: - - "logs-apache.access-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-apache.access@package" - - "logs-apache.access@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-apache_x_error: - index_sorting: False - index_template: - index_patterns: - - "logs-apache.error-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-apache.error@package" - - "logs-apache.error@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-auditd_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-auditd.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-auditd.log@package" - - "logs-auditd.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-aws_x_cloudtrail: - index_sorting: False - index_template: - index_patterns: - - "logs-aws.cloudtrail-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-aws.cloudtrail@package" - - "logs-aws.cloudtrail@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-aws_x_cloudwatch_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-aws.cloudwatch_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-aws.cloudwatch_logs@package" - - "logs-aws.cloudwatch_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-aws_x_ec2_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-aws.ec2_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-aws.ec2_logs@package" - - "logs-aws.ec2_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-aws_x_elb_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-aws.elb_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-aws.elb_logs@package" - - "logs-aws.elb_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-aws_x_firewall_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-aws.firewall_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-aws.firewall_logs@package" - - "logs-aws.firewall_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-aws_x_route53_public_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-aws.route53_public_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-aws.route53_public_logs@package" - - "logs-aws.route53_public_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-aws_x_route53_resolver_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-aws.route53_resolver_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-aws.route53_resolver_logs@package" - - "logs-aws.route53_resolver_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-aws_x_s3access: - index_sorting: False - index_template: - index_patterns: - - "logs-aws.s3access-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-aws.s3access@package" - - "logs-aws.s3access@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-aws_x_vpcflow: - index_sorting: False - index_template: - index_patterns: - - "logs-aws.vpcflow-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-aws.vpcflow@package" - - "logs-aws.vpcflow@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-aws_x_waf: - index_sorting: False - index_template: - index_patterns: - - "logs-aws.waf-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-aws.waf@package" - - "logs-aws.waf@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-azure_x_activitylogs: - index_sorting: False - index_template: - index_patterns: - - "logs-azure.activitylogs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-azure.activitylogs@package" - - "logs-azure.activitylogs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-azure_x_application_gateway: - index_sorting: False - index_template: - index_patterns: - - "logs-azure.application_gateway-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-azure.application_gateway@package" - - "logs-azure.application_gateway@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-azure_x_auditlogs: - index_sorting: False - index_template: - index_patterns: - - "logs-azure.auditlogs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-azure.auditlogs@package" - - "logs-azure.auditlogs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-azure_x_eventhub: - index_sorting: False - index_template: - index_patterns: - - "logs-azure.eventhub-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-azure.eventhub@package" - - "logs-azure.eventhub@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-azure_x_firewall_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-azure.firewall_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-azure.firewall_logs@package" - - "logs-azure.firewall_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-azure_x_identity_protection: - index_sorting: False - index_template: - index_patterns: - - "logs-azure.identity_protection-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-azure.identity_protection@package" - - "logs-azure.identity_protection@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-azure_x_platformlogs: - index_sorting: False - index_template: - index_patterns: - - "logs-azure.platformlogs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-azure.platformlogs@package" - - "logs-azure.platformlogs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-azure_x_provisioning: - index_sorting: False - index_template: - index_patterns: - - "logs-azure.provisioning-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-azure.provisioning@package" - - "logs-azure.provisioning@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-azure_x_signinlogs: - index_sorting: False - index_template: - index_patterns: - - "logs-azure.signinlogs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-azure.signinlogs@package" - - "logs-azure.signinlogs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-azure_x_springcloudlogs: - index_sorting: False - index_template: - index_patterns: - - "logs-azure.springcloudlogs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-azure.springcloudlogs@package" - - "logs-azure.springcloudlogs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-barracuda_x_waf: - index_sorting: False - index_template: - index_patterns: - - "logs-barracuda.waf-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-barracuda.waf@package" - - "logs-barracuda.waf@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cisco_asa_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-cisco_asa.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cisco_asa.log@package" - - "logs-cisco_asa.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cloudflare_x_audit: - index_sorting: False - index_template: - index_patterns: - - "logs-cloudflare.audit-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cloudflare.audit@package" - - "logs-cloudflare.audit@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cloudflare_x_logpull: - index_sorting: False - index_template: - index_patterns: - - "logs-cloudflare.logpull-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cloudflare.logpull@package" - - "logs-cloudflare.logpull@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-crowdstrike_x_falcon: - index_sorting: False - index_template: - index_patterns: - - "logs-crowdstrike.falcon-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-crowdstrike.falcon@package" - - "logs-crowdstrike.falcon@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-crowdstrike_x_fdr: - index_sorting: False - index_template: - index_patterns: - - "logs-crowdstrike.fdr-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-crowdstrike.fdr@package" - - "logs-crowdstrike.fdr@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-darktrace_x_ai_analyst_alert: - index_sorting: False - index_template: - index_patterns: - - "logs-darktrace.ai_analyst_alert-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-darktrace.ai_analyst_alert@package" - - "logs-darktrace.ai_analyst_alert@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-darktrace_x_model_breach_alert: - index_sorting: False - index_template: - index_patterns: - - "logs-darktrace.model_breach_alert-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-darktrace.model_breach_alert@package" - - "logs-darktrace.model_breach_alert@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-darktrace_x_system_status_alert: - index_sorting: False - index_template: - index_patterns: - - "logs-darktrace.system_status_alert-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-darktrace.system_status_alert@package" - - "logs-darktrace.system_status_alert@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-f5_bigip_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-f5_bigip.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-f5_bigip.log@package" - - "logs-f5_bigip.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-fim_x_event: - index_sorting: False - index_template: - index_patterns: - - "logs-fim.event-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-fim.event@package" - - "logs-fim.event@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-fortinet_x_clientendpoint: - index_sorting: False - index_template: - index_patterns: - - "logs-fortinet.clientendpoint-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-fortinet.clientendpoint@package" - - "logs-fortinet.clientendpoint@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-fortinet_x_firewall: - index_sorting: False - index_template: - index_patterns: - - "logs-fortinet.firewall-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-fortinet.firewall@package" - - "logs-fortinet.firewall@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-fortinet_x_fortimail: - index_sorting: False - index_template: - index_patterns: - - "logs-fortinet.fortimail-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-fortinet.fortimail@package" - - "logs-fortinet.fortimail@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-fortinet_x_fortimanager: - index_sorting: False - index_template: - index_patterns: - - "logs-fortinet.fortimanager-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-fortinet.fortimanager@package" - - "logs-fortinet.fortimanager@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-fortinet_fortigate_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-fortinet_fortigate.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-fortinet_fortigate.log@package" - - "logs-fortinet_fortigate.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-gcp_x_audit: - index_sorting: False - index_template: - index_patterns: - - "logs-gcp.audit-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-gcp.audit@package" - - "logs-gcp.audit@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-gcp_x_dns: - index_sorting: False - index_template: - index_patterns: - - "logs-gcp.dns-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-gcp.dns@package" - - "logs-gcp.dns@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-gcp_x_firewall: - index_sorting: False - index_template: - index_patterns: - - "logs-gcp.firewall-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-gcp.firewall@package" - - "logs-gcp.firewall@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-gcp_x_loadbalancing_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-gcp.loadbalancing_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-gcp.loadbalancing_logs@package" - - "logs-gcp.loadbalancing_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-gcp_x_vpcflow: - index_sorting: False - index_template: - index_patterns: - - "logs-gcp.vpcflow-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-gcp.vpcflow@package" - - "logs-gcp.vpcflow@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-github_x_audit: - index_sorting: False - index_template: - index_patterns: - - "logs-github.audit-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-github.audit@package" - - "logs-github.audit@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-github_x_code_scanning: - index_sorting: False - index_template: - index_patterns: - - "logs-github.code_scanning-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-github.code_scanning@package" - - "logs-github.code_scanning@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-github_x_dependabot: - index_sorting: False - index_template: - index_patterns: - - "logs-github.dependabot-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-github.dependabot@package" - - "logs-github.dependabot@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-github_x_issues: - index_sorting: False - index_template: - index_patterns: - - "logs-github.issues-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-github.issues@package" - - "logs-github.issues@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-github_x_secret_scanning: - index_sorting: False - index_template: - index_patterns: - - "logs-github.secret_scanning-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-github.secret_scanning@package" - - "logs-github.secret_scanning@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_access_transparency: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.access_transparency-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.access_transparency@package" - - "logs-google_workspace.access_transparency@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_admin: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.admin-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.admin@package" - - "logs-google_workspace.admin@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_alert: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.alert-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.alert@package" - - "logs-google_workspace.alert@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_context_aware_access: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.context_aware_access-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.context_aware_access@package" - - "logs-google_workspace.context_aware_access@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_device: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.device-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.device@package" - - "logs-google_workspace.device@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_drive: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.drive-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.drive@package" - - "logs-google_workspace.drive@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_gcp: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.gcp-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.gcp@package" - - "logs-google_workspace.gcp@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_group_enterprise: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.group_enterprise-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.group_enterprise@package" - - "logs-google_workspace.group_enterprise@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_groups: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.groups-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.groups@package" - - "logs-google_workspace.groups@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_login: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.login-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.login@package" - - "logs-google_workspace.login@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_rules: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.rules-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.rules@package" - - "logs-google_workspace.rules@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_saml: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.saml-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.saml@package" - - "logs-google_workspace.saml@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_token: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.token-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.token@package" - - "logs-google_workspace.token@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-google_workspace_x_user_accounts: - index_sorting: False - index_template: - index_patterns: - - "logs-google_workspace.user_accounts-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-google_workspace.user_accounts@package" - - "logs-google_workspace.user_accounts@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-http_endpoint_x_generic: - index_sorting: False - index_template: - index_patterns: - - "logs-http_endpoint.generic-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-http_endpoint.generic@package" - - "logs-http_endpoint.generic@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-httpjson_x_generic: - index_sorting: False - index_template: - index_patterns: - - "logs-httpjson.generic-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-httpjson.generic@package" - - "logs-httpjson.generic@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-juniper_x_junos: - index_sorting: False - index_template: - index_patterns: - - "logs-juniper.junos-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-juniper.junos@package" - - "logs-juniper.junos@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-juniper_x_netscreen: - index_sorting: False - index_template: - index_patterns: - - "logs-juniper.netscreen-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-juniper.netscreen@package" - - "logs-juniper.netscreen@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-juniper_x_srx: - index_sorting: False - index_template: - index_patterns: - - "logs-juniper.srx-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-juniper.srx@package" - - "logs-juniper.srx@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-juniper_srx_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-juniper_srx.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-juniper_srx.log@package" - - "logs-juniper_srx.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-kafka_log_x_generic: - index_sorting: False - index_template: - index_patterns: - - "logs-kafka_log.generic-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-kafka_log.generic@package" - - "logs-kafka_log.generic@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-lastpass_x_detailed_shared_folder: - index_sorting: False - index_template: - index_patterns: - - "logs-lastpass.detailed_shared_folder-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-lastpass.detailed_shared_folder@package" - - "logs-lastpass.detailed_shared_folder@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-lastpass_x_event_report: - index_sorting: False - index_template: - index_patterns: - - "logs-lastpass.event_report-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-lastpass.event_report@package" - - "logs-lastpass.event_report@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-lastpass_x_user: - index_sorting: False - index_template: - index_patterns: - - "logs-lastpass.user-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-lastpass.user@package" - - "logs-lastpass.user@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-m365_defender_x_event: - index_sorting: False - index_template: - index_patterns: - - "logs-m365_defender.event-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-m365_defender.event@package" - - "logs-m365_defender.event@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-m365_defender_x_incident: - index_sorting: False - index_template: - index_patterns: - - "logs-m365_defender.incident-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-m365_defender.incident@package" - - "logs-m365_defender.incident@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-m365_defender_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-m365_defender.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-m365_defender.log@package" - - "logs-m365_defender.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-microsoft_defender_endpoint_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-microsoft_defender_endpoint.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-microsoft_defender_endpoint.log@package" - - "logs-microsoft_defender_endpoint.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-microsoft_dhcp_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-microsoft_dhcp.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-microsoft_dhcp.log@package" - - "logs-microsoft_dhcp.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-netflow_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-netflow.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-netflow.log@package" - - "logs-netflow.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-o365_x_audit: - index_sorting: False - index_template: - index_patterns: - - "logs-o365.audit-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-o365.audit@package" - - "logs-o365.audit@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-okta_x_system: - index_sorting: False - index_template: - index_patterns: - - "logs-okta.system-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-okta.system@package" - - "logs-okta.system@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-panw_x_panos: - index_sorting: False - index_template: - index_patterns: - - "logs-panw.panos-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-panw.panos@package" - - "logs-panw.panos@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-pfsense_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-pfsense.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-pfsense.log@package" - - "logs-pfsense.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-sentinel_one_x_activity: - index_sorting: False - index_template: - index_patterns: - - "logs-sentinel_one.activity-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-sentinel_one.activity@package" - - "logs-sentinel_one.activity@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-sentinel_one_x_agent: - index_sorting: False - index_template: - index_patterns: - - "logs-sentinel_one.agent-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-sentinel_one.agent@package" - - "logs-sentinel_one.agent@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-sentinel_one_x_alert: - index_sorting: False - index_template: - index_patterns: - - "logs-sentinel_one.alert-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-sentinel_one.alert@package" - - "logs-sentinel_one.alert@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-sentinel_one_x_group: - index_sorting: False - index_template: - index_patterns: - - "logs-sentinel_one.group-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-sentinel_one.group@package" - - "logs-sentinel_one.group@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-sentinel_one_x_threat: - index_sorting: False - index_template: - index_patterns: - - "logs-sentinel_one.threat-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-sentinel_one.threat@package" - - "logs-sentinel_one.threat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-sonicwall_firewall_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-sonicwall_firewall.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-sonicwall_firewall.log@package" - - "logs-sonicwall_firewall.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-symantec_endpoint_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-symantec_endpoint.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-symantec_endpoint.log@package" - - "logs-symantec_endpoint.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-ti_abusech_x_malware: - index_sorting: False - index_template: - index_patterns: - - "logs-ti_abusech.malware-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-ti_abusech.malware@package" - - "logs-ti_abusech.malware@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-ti_abusech_x_malwarebazaar: - index_sorting: False - index_template: - index_patterns: - - "logs-ti_abusech.malwarebazaar-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-ti_abusech.malwarebazaar@package" - - "logs-ti_abusech.malwarebazaar@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-ti_abusech_x_threatfox: - index_sorting: False - index_template: - index_patterns: - - "logs-ti_abusech.threatfox-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-ti_abusech.threatfox@package" - - "logs-ti_abusech.threatfox@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-ti_abusech_x_url: - index_sorting: False - index_template: - index_patterns: - - "logs-ti_abusech.url-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-ti_abusech.url@package" - - "logs-ti_abusech.url@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-ti_misp_x_threat: - index_sorting: False - index_template: - index_patterns: - - "logs-ti_misp.threat-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-ti_misp.threat@package" - - "logs-ti_misp.threat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-ti_misp_x_threat_attributes: - index_sorting: False - index_template: - index_patterns: - - "logs-ti_misp.threat_attributes-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-ti_misp.threat_attributes@package" - - "logs-ti_misp.threat_attributes@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-ti_otx_x_threat: - index_sorting: False - index_template: - index_patterns: - - "logs-ti_otx.threat-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-ti_otx.threat@package" - - "logs-ti_otx.threat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-ti_recordedfuture_x_latest_ioc-template: - index_sorting: False - index_template: - index_patterns: - - "logs-ti_recordedfuture.latest_ioc-template-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-ti_recordedfuture.latest_ioc-template@package" - - "logs-ti_recordedfuture.latest_ioc-template@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-ti_recordedfuture_x_threat: - index_sorting: False - index_template: - index_patterns: - - "logs-ti_recordedfuture.threat-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-ti_recordedfuture.threat@package" - - "logs-ti_recordedfuture.threat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-zscaler_zia_x_alerts: - index_sorting: False - index_template: - index_patterns: - - "logs-zscaler_zia.alerts-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-zscaler_zia.alerts@package" - - "logs-zscaler_zia.alerts@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-zscaler_zia_x_dns: - index_sorting: False - index_template: - index_patterns: - - "logs-zscaler_zia.dns-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-zscaler_zia.dns@package" - - "logs-zscaler_zia.dns@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-zscaler_zia_x_firewall: - index_sorting: False - index_template: - index_patterns: - - "logs-zscaler_zia.firewall-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-zscaler_zia.firewall@package" - - "logs-zscaler_zia.firewall@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-zscaler_zia_x_tunnel: - index_sorting: False - index_template: - index_patterns: - - "logs-zscaler_zia.tunnel-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-zscaler_zia.tunnel@package" - - "logs-zscaler_zia.tunnel@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-zscaler_zia_x_web: - index_sorting: False - index_template: - index_patterns: - - "logs-zscaler_zia.web-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-zscaler_zia.web@package" - - "logs-zscaler_zia.web@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-zscaler_zpa_x_app_connector_status: - index_sorting: False - index_template: - index_patterns: - - "logs-zscaler_zpa.app_connector_status-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-zscaler_zpa.app_connector_status@package" - - "logs-zscaler_zpa.app_connector_status@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-zscaler_zpa_x_audit: - index_sorting: False - index_template: - index_patterns: - - "logs-zscaler_zpa.audit-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-zscaler_zpa.audit@package" - - "logs-zscaler_zpa.audit@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-zscaler_zpa_x_browser_access: - index_sorting: False - index_template: - index_patterns: - - "logs-zscaler_zpa.browser_access-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-zscaler_zpa.browser_access@package" - - "logs-zscaler_zpa.browser_access@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-zscaler_zpa_x_user_activity: - index_sorting: False - index_template: - index_patterns: - - "logs-zscaler_zpa.user_activity-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-zscaler_zpa.user_activity@package" - - "logs-zscaler_zpa.user_activity@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-zscaler_zpa_x_user_status: - index_sorting: False - index_template: - index_patterns: - - "logs-zscaler_zpa.user_status-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-zscaler_zpa.user_status@package" - - "logs-zscaler_zpa.user_status@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-1password_x_item_usages: - index_sorting: False - index_template: - index_patterns: - - "logs-1password.item_usages-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-1password.item_usages@package" - - "logs-1password.item_usages@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-1password_x_signin_attempts: - index_sorting: False - index_template: - index_patterns: - - "logs-1password.signin_attempts-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-1password.signin_attempts@package" - - "logs-1password.signin_attempts@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-osquery-manager-actions: - index_sorting: False - index_template: - index_patterns: - - ".logs-osquery_manager.actions*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-osquery_manager.actions" - priority: 501 - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-osquery-manager-action_x_responses: - index_sorting: False - index_template: - index_patterns: - - ".logs-osquery_manager.action.responses*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-osquery_manager.action.responses" - priority: 501 - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent_x_apm_server: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent.apm_server-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - mappings: - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - composed_of: - - "logs-elastic_agent.apm_server@package" - - "logs-elastic_agent.apm_server@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: + min_age: 365d hot: - min_age: 0ms actions: - set_priority: - priority: 100 rollover: max_age: 30d max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent_x_auditbeat: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent.auditbeat-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - mappings: - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - composed_of: - - "logs-elastic_agent.auditbeat@package" - - "logs-elastic_agent.auditbeat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: set_priority: priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent_x_cloudbeat: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent.cloudbeat-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - mappings: - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - composed_of: - - "logs-elastic_agent.cloudbeat@package" - - "logs-elastic_agent.cloudbeat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - policy: - phases: - hot: min_age: 0ms + warm: actions: set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: + priority: 50 min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent_x_endpoint_security: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent.endpoint_security-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-elastic_agent.endpoint_security@package" - - "logs-elastic_agent.endpoint_security@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-endpoint_x_alerts: - index_sorting: False - index_template: - index_patterns: - - "logs-endpoint.alerts-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-endpoint.alerts@custom" - - "logs-endpoint.alerts@package" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-endpoint_x_events_x_api: - index_sorting: False - index_template: - index_patterns: - - "logs-endpoint.events.api-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-endpoint.events.api@custom" - - "logs-endpoint.events.api@package" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-endpoint_x_events_x_file: - index_sorting: False - index_template: - index_patterns: - - "logs-endpoint.events.file-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-endpoint.events.file@custom" - - "logs-endpoint.events.file@package" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-endpoint_x_events_x_library: - index_sorting: False - index_template: - index_patterns: - - "logs-endpoint.events.library-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-endpoint.events.library@custom" - - "logs-endpoint.events.library@package" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-endpoint_x_events_x_network: - index_sorting: False - index_template: - index_patterns: - - "logs-endpoint.events.network-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-endpoint.events.network@custom" - - "logs-endpoint.events.network@package" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-endpoint_x_events_x_process: - index_sorting: False - index_template: - index_patterns: - - "logs-endpoint.events.process-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-endpoint.events.process@custom" - - "logs-endpoint.events.process@package" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-endpoint_x_events_x_registry: - index_sorting: False - index_template: - index_patterns: - - "logs-endpoint.events.registry-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-endpoint.events.registry@custom" - - "logs-endpoint.events.registry@package" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-endpoint_x_events_x_security: - index_sorting: False - index_template: - index_patterns: - - "logs-endpoint.events.security-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-endpoint.events.security@custom" - - "logs-endpoint.events.security@package" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent_x_filebeat: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent.filebeat-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-elastic_agent.filebeat@package" - - "logs-elastic_agent.filebeat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent_x_fleet_server: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent.fleet_server-*" - template: - settings: - index: - number_of_replicas: 0 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-elastic_agent.fleet_server@package" - - "logs-elastic_agent.fleet_server@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent_x_heartbeat: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent.heartbeat-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - mappings: - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - composed_of: - - "logs-elastic_agent.heartbeat@package" - - "logs-elastic_agent.heartbeat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - mappings: - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - composed_of: - - "event-mappings" - - "logs-elastic_agent@package" - - "logs-elastic_agent@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent_x_metricbeat: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent.metricbeat-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-elastic_agent.metricbeat@package" - - "logs-elastic_agent.metricbeat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent_x_osquerybeat: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent.osquerybeat-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - composed_of: - - "event-mappings" - - "logs-elastic_agent.osquerybeat@package" - - "logs-elastic_agent.osquerybeat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - so-logs-elastic_agent_x_packetbeat: - index_sorting: False - index_template: - index_patterns: - - "logs-elastic_agent.packetbeat-*" - template: - settings: - index: - number_of_replicas: 0 - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - mappings: - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true - composed_of: - - "logs-elastic_agent.packetbeat@package" - - "logs-elastic_agent.packetbeat@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - policy: - phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb - cold: - min_age: 30d - actions: - set_priority: - priority: 0 - delete: - min_age: 365d - actions: - delete: {} - _meta: - package: - name: elastic_agent - managed_by: security_onion - managed: true so-case: - index_sorting: False + index_sorting: false index_template: + composed_of: + - case-mappings + - case-settings index_patterns: - - so-case* - template: - mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string - date_detection: false - settings: - index: - mapping: - total_fields: - limit: 1500 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 - number_of_replicas: 0 - composed_of: - - case-mappings - - case-settings + - so-case* priority: 500 - so-common: - warm: 7 - close: 30 - delete: 365 - index_sorting: False - index_template: - data_stream: {} - index_patterns: - - logs-*-so* template: mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string date_detection: false - settings: - index: - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 - number_of_replicas: 0 - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - cloud-mappings - - container-mappings - - data_stream-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - orchestrator-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - registry-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - syslog-mappings - - dtc-syslog-mappings - - threat-mappings - - tls-mappings - - tracing-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - vulnerability-mappings - - common-settings - - common-dynamic-mappings - - winlog-mappings - priority: 1 - so-endgame: - index_sorting: False - index_template: - index_patterns: - - endgame* - template: - mappings: dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string - date_detection: false - settings: - index: - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 - number_of_replicas: 0 - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - cloud-mappings - - container-mappings - - data_stream-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - endgame-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - orchestrator-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - registry-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - threat-mappings - - tls-mappings - - tracing-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - vulnerability-mappings - - common-settings - - common-dynamic-mappings - - winlog-mappings - priority: 500 - so-idh: - warm: 7 - close: 30 - delete: 365 - index_sorting: False - index_template: - index_patterns: - - so-idh-* - template: - mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string - date_detection: false - settings: - index: - mapping: - total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 - number_of_replicas: 0 - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - container-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - threat-mappings - - tls-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - common-settings - - common-dynamic-mappings - priority: 500 - so-suricata: - index_sorting: False - index_template: - data_stream: {} - index_patterns: - - logs-suricata-so* - template: - mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string - date_detection: false + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string settings: index: lifecycle: - name: so-suricata-logs + name: so-case-logs mapping: total_fields: - limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 + limit: 1500 number_of_replicas: 0 - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - cloud-mappings - - container-mappings - - data_stream-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - orchestrator-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - registry-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - suricata-mappings - - threat-mappings - - tls-mappings - - tracing-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - vulnerability-mappings - - common-settings - - common-dynamic-mappings - priority: 500 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc policy: phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb cold: - min_age: 30d actions: set_priority: priority: 0 + min_age: 30d delete: - min_age: 365d actions: delete: {} - so-import: - index_sorting: False + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-common: + close: 30 + delete: 365 + index_sorting: false index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - syslog-mappings + - dtc-syslog-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - common-settings + - common-dynamic-mappings + - winlog-mappings data_stream: {} index_patterns: - - logs-import-so* + - logs-*-so* + priority: 1 template: mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string settings: index: + lifecycle: + name: so-common-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + warm: 7 + so-endgame: + index_sorting: false + index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - endgame-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - common-settings + - common-dynamic-mappings + - winlog-mappings + index_patterns: + - endgame* + priority: 500 + template: + mappings: + date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + settings: + index: + lifecycle: + name: so-endgame-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-idh: + close: 30 + delete: 365 + index_sorting: false + index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - container-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - threat-mappings + - tls-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - common-settings + - common-dynamic-mappings + index_patterns: + - so-idh-* + priority: 500 + template: + mappings: + date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + settings: + index: + lifecycle: + name: so-idh-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + warm: 7 + so-import: + index_sorting: false + index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - common-settings + - common-dynamic-mappings + - winlog-mappings + data_stream: {} + index_patterns: + - logs-import-so* + priority: 500 + template: + mappings: + date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + settings: + index: + final_pipeline: .fleet_final_pipeline-1 lifecycle: name: so-import-logs mapping: total_fields: limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 number_of_replicas: 0 - final_pipeline: ".fleet_final_pipeline-1" - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - cloud-mappings - - container-mappings - - data_stream-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - orchestrator-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - registry-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - threat-mappings - - tls-mappings - - tracing-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - vulnerability-mappings - - common-settings - - common-dynamic-mappings - - winlog-mappings - priority: 500 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc policy: phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb cold: - min_age: 30d actions: set_priority: priority: 0 + min_age: 30d delete: - min_age: 365d actions: delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-kratos: - warm: 7 close: 30 delete: 365 - index_sorting: False + index_sorting: false index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - container-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - threat-mappings + - tls-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - common-settings + - common-dynamic-mappings data_stream: - hidden: false allow_custom_routing: false + hidden: false index_patterns: - - logs-kratos-so* + - logs-kratos-so* + priority: 500 template: mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string settings: index: + lifecycle: + name: so-kratos-logs mapping: total_fields: limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 number_of_replicas: 0 - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - container-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - threat-mappings - - tls-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - common-settings - - common-dynamic-mappings - priority: 500 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc policy: phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb cold: - min_age: 30d actions: set_priority: priority: 0 + min_age: 30d delete: - min_age: 365d actions: delete: {} - so-logstash: - index_sorting: False + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + warm: 7 + so-logs: + index_sorting: false index_template: + composed_of: + - so-data-streams-mappings + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + - so-logs-mappings + - so-logs-settings + data_stream: + allow_custom_routing: false + hidden: false index_patterns: - - logs-logstash-default* + - logs-*-* + priority: 225 + template: + mappings: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + settings: + index: + lifecycle: + name: so-logs-logs + mapping: + total_fields: + limit: 5001 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-1password_x_item_usages: + index_sorting: false + index_template: + composed_of: + - logs-1password.item_usages@package + - logs-1password.item_usages@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-1password.item_usages-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-1password.item_usages-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-1password_x_signin_attempts: + index_sorting: false + index_template: + composed_of: + - logs-1password.signin_attempts@package + - logs-1password.signin_attempts@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-1password.signin_attempts-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-1password.signin_attempts-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-apache_x_access: + index_sorting: false + index_template: + composed_of: + - logs-apache.access@package + - logs-apache.access@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-apache.access-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-apache.access-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-apache_x_error: + index_sorting: false + index_template: + composed_of: + - logs-apache.error@package + - logs-apache.error@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-apache.error-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-apache.error-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-auditd_x_log: + index_sorting: false + index_template: + composed_of: + - logs-auditd.log@package + - logs-auditd.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-auditd.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-auditd.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-auth0_x_logs: + index_sorting: false + index_template: + composed_of: + - logs-auth0.logs@package + - logs-auth0.logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-auth0.logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-auth0.logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_cloudtrail: + index_sorting: false + index_template: + composed_of: + - logs-aws.cloudtrail@package + - logs-aws.cloudtrail@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-aws.cloudtrail-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-aws.cloudtrail-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_cloudwatch_logs: + index_sorting: false + index_template: + composed_of: + - logs-aws.cloudwatch_logs@package + - logs-aws.cloudwatch_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-aws.cloudwatch_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-aws.cloudwatch_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_ec2_logs: + index_sorting: false + index_template: + composed_of: + - logs-aws.ec2_logs@package + - logs-aws.ec2_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-aws.ec2_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-aws.ec2_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_elb_logs: + index_sorting: false + index_template: + composed_of: + - logs-aws.elb_logs@package + - logs-aws.elb_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-aws.elb_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-aws.elb_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_firewall_logs: + index_sorting: false + index_template: + composed_of: + - logs-aws.firewall_logs@package + - logs-aws.firewall_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-aws.firewall_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-aws.firewall_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_route53_public_logs: + index_sorting: false + index_template: + composed_of: + - logs-aws.route53_public_logs@package + - logs-aws.route53_public_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-aws.route53_public_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-aws.route53_public_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_route53_resolver_logs: + index_sorting: false + index_template: + composed_of: + - logs-aws.route53_resolver_logs@package + - logs-aws.route53_resolver_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-aws.route53_resolver_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-aws.route53_resolver_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_s3access: + index_sorting: false + index_template: + composed_of: + - logs-aws.s3access@package + - logs-aws.s3access@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-aws.s3access-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-aws.s3access-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_vpcflow: + index_sorting: false + index_template: + composed_of: + - logs-aws.vpcflow@package + - logs-aws.vpcflow@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-aws.vpcflow-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-aws.vpcflow-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-aws_x_waf: + index_sorting: false + index_template: + composed_of: + - logs-aws.waf@package + - logs-aws.waf@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-aws.waf-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-aws.waf-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-azure_x_activitylogs: + index_sorting: false + index_template: + composed_of: + - logs-azure.activitylogs@package + - logs-azure.activitylogs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-azure.activitylogs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-azure.activitylogs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-azure_x_application_gateway: + index_sorting: false + index_template: + composed_of: + - logs-azure.application_gateway@package + - logs-azure.application_gateway@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-azure.application_gateway-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-azure.application_gateway-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-azure_x_auditlogs: + index_sorting: false + index_template: + composed_of: + - logs-azure.auditlogs@package + - logs-azure.auditlogs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-azure.auditlogs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-azure.auditlogs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-azure_x_eventhub: + index_sorting: false + index_template: + composed_of: + - logs-azure.eventhub@package + - logs-azure.eventhub@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-azure.eventhub-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-azure.eventhub-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-azure_x_firewall_logs: + index_sorting: false + index_template: + composed_of: + - logs-azure.firewall_logs@package + - logs-azure.firewall_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-azure.firewall_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-azure.firewall_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-azure_x_identity_protection: + index_sorting: false + index_template: + composed_of: + - logs-azure.identity_protection@package + - logs-azure.identity_protection@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-azure.identity_protection-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-azure.identity_protection-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-azure_x_platformlogs: + index_sorting: false + index_template: + composed_of: + - logs-azure.platformlogs@package + - logs-azure.platformlogs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-azure.platformlogs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-azure.platformlogs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-azure_x_provisioning: + index_sorting: false + index_template: + composed_of: + - logs-azure.provisioning@package + - logs-azure.provisioning@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-azure.provisioning-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-azure.provisioning-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-azure_x_signinlogs: + index_sorting: false + index_template: + composed_of: + - logs-azure.signinlogs@package + - logs-azure.signinlogs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-azure.signinlogs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-azure.signinlogs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-azure_x_springcloudlogs: + index_sorting: false + index_template: + composed_of: + - logs-azure.springcloudlogs@package + - logs-azure.springcloudlogs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-azure.springcloudlogs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-azure.springcloudlogs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-barracuda_x_waf: + index_sorting: false + index_template: + composed_of: + - logs-barracuda.waf@package + - logs-barracuda.waf@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-barracuda.waf-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-barracuda.waf-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-carbonblack_edr_x_log: + index_sorting: false + index_template: + composed_of: + - logs-carbonblack_edr.log@package + - logs-carbonblack_edr.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-carbonblack_edr.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-carbonblack_edr.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cisco_asa_x_log: + index_sorting: false + index_template: + composed_of: + - logs-cisco_asa.log@package + - logs-cisco_asa.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cisco_asa.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cisco_asa.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cisco_duo_x_admin: + index_sorting: false + index_template: + composed_of: + - logs-cisco_duo.admin@package + - logs-cisco_duo.admin@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cisco_duo.admin-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cisco_duo.admin-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cisco_duo_x_auth: + index_sorting: false + index_template: + composed_of: + - logs-cisco_duo.auth@package + - logs-cisco_duo.auth@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cisco_duo.auth-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cisco_duo.auth-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cisco_duo_x_offline_enrollment: + index_sorting: false + index_template: + composed_of: + - logs-cisco_duo.offline_enrollment@package + - logs-cisco_duo.offline_enrollment@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cisco_duo.offline_enrollment-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cisco_duo.offline_enrollment-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cisco_duo_x_summary: + index_sorting: false + index_template: + composed_of: + - logs-cisco_duo.summary@package + - logs-cisco_duo.summary@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cisco_duo.summary-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cisco_duo.summary-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cisco_duo_x_telephony: + index_sorting: false + index_template: + composed_of: + - logs-cisco_duo.telephony@package + - logs-cisco_duo.telephony@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cisco_duo.telephony-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cisco_duo.telephony-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cisco_meraki_x_events: + index_sorting: false + index_template: + composed_of: + - logs-cisco_meraki.events@package + - logs-cisco_meraki.events@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cisco_meraki.events-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cisco_meraki.events-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cisco_meraki_x_log: + index_sorting: false + index_template: + composed_of: + - logs-cisco_meraki.log@package + - logs-cisco_meraki.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cisco_meraki.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cisco_meraki.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cisco_umbrella_x_log: + index_sorting: false + index_template: + composed_of: + - logs-cisco_umbrella.log@package + - logs-cisco_umbrella.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cisco_umbrella.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cisco_umbrella.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cloudflare_x_audit: + index_sorting: false + index_template: + composed_of: + - logs-cloudflare.audit@package + - logs-cloudflare.audit@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cloudflare.audit-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cloudflare.audit-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-cloudflare_x_logpull: + index_sorting: false + index_template: + composed_of: + - logs-cloudflare.logpull@package + - logs-cloudflare.logpull@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-cloudflare.logpull-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-cloudflare.logpull-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-crowdstrike_x_falcon: + index_sorting: false + index_template: + composed_of: + - logs-crowdstrike.falcon@package + - logs-crowdstrike.falcon@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-crowdstrike.falcon-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-crowdstrike.falcon-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-crowdstrike_x_fdr: + index_sorting: false + index_template: + composed_of: + - logs-crowdstrike.fdr@package + - logs-crowdstrike.fdr@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-crowdstrike.fdr-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-crowdstrike.fdr-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-darktrace_x_ai_analyst_alert: + index_sorting: false + index_template: + composed_of: + - logs-darktrace.ai_analyst_alert@package + - logs-darktrace.ai_analyst_alert@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-darktrace.ai_analyst_alert-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-darktrace.ai_analyst_alert-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-darktrace_x_model_breach_alert: + index_sorting: false + index_template: + composed_of: + - logs-darktrace.model_breach_alert@package + - logs-darktrace.model_breach_alert@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-darktrace.model_breach_alert-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-darktrace.model_breach_alert-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-darktrace_x_system_status_alert: + index_sorting: false + index_template: + composed_of: + - logs-darktrace.system_status_alert@package + - logs-darktrace.system_status_alert@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-darktrace.system_status_alert-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-darktrace.system_status_alert-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-elastic_agent@package + - logs-elastic_agent@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-elastic_agent-* + priority: 501 + template: + mappings: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + settings: + index: + lifecycle: + name: so-logs-elastic_agent-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent_x_apm_server: + index_sorting: false + index_template: + composed_of: + - logs-elastic_agent.apm_server@package + - logs-elastic_agent.apm_server@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-elastic_agent.apm_server-* + priority: 501 + template: + mappings: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + settings: + index: + lifecycle: + name: so-logs-elastic_agent.apm_server-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent_x_auditbeat: + index_sorting: false + index_template: + composed_of: + - logs-elastic_agent.auditbeat@package + - logs-elastic_agent.auditbeat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-elastic_agent.auditbeat-* + priority: 501 + template: + mappings: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + settings: + index: + lifecycle: + name: so-logs-elastic_agent.auditbeat-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent_x_cloudbeat: + index_sorting: false + index_template: + composed_of: + - logs-elastic_agent.cloudbeat@package + - logs-elastic_agent.cloudbeat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + index_patterns: + - logs-elastic_agent.cloudbeat-* + priority: 501 + template: + mappings: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + settings: + index: + lifecycle: + name: so-logs-elastic_agent.cloudbeat-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent_x_endpoint_security: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-elastic_agent.endpoint_security@package + - logs-elastic_agent.endpoint_security@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-elastic_agent.endpoint_security-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-elastic_agent.endpoint_security-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent_x_filebeat: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-elastic_agent.filebeat@package + - logs-elastic_agent.filebeat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-elastic_agent.filebeat-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-elastic_agent.filebeat-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent_x_fleet_server: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-elastic_agent.fleet_server@package + - logs-elastic_agent.fleet_server@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-elastic_agent.fleet_server-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-elastic_agent.fleet_server-logs + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent_x_heartbeat: + index_sorting: false + index_template: + composed_of: + - logs-elastic_agent.heartbeat@package + - logs-elastic_agent.heartbeat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + index_patterns: + - logs-elastic_agent.heartbeat-* + priority: 501 + template: + mappings: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + settings: + index: + lifecycle: + name: so-logs-elastic_agent.heartbeat-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent_x_metricbeat: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-elastic_agent.metricbeat@package + - logs-elastic_agent.metricbeat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-elastic_agent.metricbeat-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-elastic_agent.metricbeat-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent_x_osquerybeat: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-elastic_agent.osquerybeat@package + - logs-elastic_agent.osquerybeat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-elastic_agent.osquerybeat-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-elastic_agent.osquerybeat-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-elastic_agent_x_packetbeat: + index_sorting: false + index_template: + composed_of: + - logs-elastic_agent.packetbeat@package + - logs-elastic_agent.packetbeat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-elastic_agent.packetbeat-* + priority: 501 + template: + mappings: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + settings: + index: + lifecycle: + name: so-logs-elastic_agent.packetbeat-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-endpoint_x_alerts: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-endpoint.alerts@custom + - logs-endpoint.alerts@package + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-endpoint.alerts-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-endpoint.alerts-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-endpoint_x_events_x_api: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-endpoint.events.api@custom + - logs-endpoint.events.api@package + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-endpoint.events.api-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-endpoint.events.api-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-endpoint_x_events_x_file: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-endpoint.events.file@custom + - logs-endpoint.events.file@package + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-endpoint.events.file-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-endpoint.events.file-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-endpoint_x_events_x_library: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-endpoint.events.library@custom + - logs-endpoint.events.library@package + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-endpoint.events.library-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-endpoint.events.library-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-endpoint_x_events_x_network: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-endpoint.events.network@custom + - logs-endpoint.events.network@package + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-endpoint.events.network-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-endpoint.events.network-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-endpoint_x_events_x_process: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-endpoint.events.process@custom + - logs-endpoint.events.process@package + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-endpoint.events.process-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-endpoint.events.process-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-endpoint_x_events_x_registry: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-endpoint.events.registry@custom + - logs-endpoint.events.registry@package + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-endpoint.events.registry-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-endpoint.events.registry-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-endpoint_x_events_x_security: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-endpoint.events.security@custom + - logs-endpoint.events.security@package + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-endpoint.events.security-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-endpoint.events.security-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + sort: + field: '@timestamp' + order: desc + policy: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-f5_bigip_x_log: + index_sorting: false + index_template: + composed_of: + - logs-f5_bigip.log@package + - logs-f5_bigip.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-f5_bigip.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-f5_bigip.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-fim_x_event: + index_sorting: false + index_template: + composed_of: + - logs-fim.event@package + - logs-fim.event@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-fim.event-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-fim.event-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-fireeye_x_nx: + index_sorting: false + index_template: + composed_of: + - logs-fireeye.nx@package + - logs-fireeye.nx@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-fireeye.nx-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-fireeye.nx-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-fortinet_fortigate_x_log: + index_sorting: false + index_template: + composed_of: + - logs-fortinet_fortigate.log@package + - logs-fortinet_fortigate.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-fortinet_fortigate.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-fortinet_fortigate.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-fortinet_x_clientendpoint: + index_sorting: false + index_template: + composed_of: + - logs-fortinet.clientendpoint@package + - logs-fortinet.clientendpoint@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-fortinet.clientendpoint-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-fortinet.clientendpoint-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-fortinet_x_firewall: + index_sorting: false + index_template: + composed_of: + - logs-fortinet.firewall@package + - logs-fortinet.firewall@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-fortinet.firewall-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-fortinet.firewall-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-fortinet_x_fortimail: + index_sorting: false + index_template: + composed_of: + - logs-fortinet.fortimail@package + - logs-fortinet.fortimail@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-fortinet.fortimail-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-fortinet.fortimail-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-fortinet_x_fortimanager: + index_sorting: false + index_template: + composed_of: + - logs-fortinet.fortimanager@package + - logs-fortinet.fortimanager@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-fortinet.fortimanager-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-fortinet.fortimanager-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-gcp_x_audit: + index_sorting: false + index_template: + composed_of: + - logs-gcp.audit@package + - logs-gcp.audit@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-gcp.audit-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-gcp.audit-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-gcp_x_dns: + index_sorting: false + index_template: + composed_of: + - logs-gcp.dns@package + - logs-gcp.dns@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-gcp.dns-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-gcp.dns-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-gcp_x_firewall: + index_sorting: false + index_template: + composed_of: + - logs-gcp.firewall@package + - logs-gcp.firewall@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-gcp.firewall-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-gcp.firewall-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-gcp_x_loadbalancing_logs: + index_sorting: false + index_template: + composed_of: + - logs-gcp.loadbalancing_logs@package + - logs-gcp.loadbalancing_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-gcp.loadbalancing_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-gcp.loadbalancing_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-gcp_x_vpcflow: + index_sorting: false + index_template: + composed_of: + - logs-gcp.vpcflow@package + - logs-gcp.vpcflow@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-gcp.vpcflow-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-gcp.vpcflow-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-github_x_audit: + index_sorting: false + index_template: + composed_of: + - logs-github.audit@package + - logs-github.audit@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-github.audit-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-github.audit-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-github_x_code_scanning: + index_sorting: false + index_template: + composed_of: + - logs-github.code_scanning@package + - logs-github.code_scanning@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-github.code_scanning-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-github.code_scanning-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-github_x_dependabot: + index_sorting: false + index_template: + composed_of: + - logs-github.dependabot@package + - logs-github.dependabot@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-github.dependabot-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-github.dependabot-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-github_x_issues: + index_sorting: false + index_template: + composed_of: + - logs-github.issues@package + - logs-github.issues@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-github.issues-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-github.issues-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-github_x_secret_scanning: + index_sorting: false + index_template: + composed_of: + - logs-github.secret_scanning@package + - logs-github.secret_scanning@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-github.secret_scanning-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-github.secret_scanning-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_access_transparency: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.access_transparency@package + - logs-google_workspace.access_transparency@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.access_transparency-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.access_transparency-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_admin: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.admin@package + - logs-google_workspace.admin@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.admin-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.admin-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_alert: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.alert@package + - logs-google_workspace.alert@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.alert-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.alert-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_context_aware_access: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.context_aware_access@package + - logs-google_workspace.context_aware_access@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.context_aware_access-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.context_aware_access-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_device: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.device@package + - logs-google_workspace.device@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.device-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.device-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_drive: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.drive@package + - logs-google_workspace.drive@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.drive-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.drive-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_gcp: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.gcp@package + - logs-google_workspace.gcp@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.gcp-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.gcp-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_group_enterprise: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.group_enterprise@package + - logs-google_workspace.group_enterprise@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.group_enterprise-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.group_enterprise-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_groups: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.groups@package + - logs-google_workspace.groups@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.groups-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.groups-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_login: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.login@package + - logs-google_workspace.login@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.login-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.login-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_rules: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.rules@package + - logs-google_workspace.rules@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.rules-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.rules-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_saml: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.saml@package + - logs-google_workspace.saml@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.saml-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.saml-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_token: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.token@package + - logs-google_workspace.token@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.token-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.token-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-google_workspace_x_user_accounts: + index_sorting: false + index_template: + composed_of: + - logs-google_workspace.user_accounts@package + - logs-google_workspace.user_accounts@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-google_workspace.user_accounts-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-google_workspace.user_accounts-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-http_endpoint_x_generic: + index_sorting: false + index_template: + composed_of: + - logs-http_endpoint.generic@package + - logs-http_endpoint.generic@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-http_endpoint.generic-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-http_endpoint.generic-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-httpjson_x_generic: + index_sorting: false + index_template: + composed_of: + - logs-httpjson.generic@package + - logs-httpjson.generic@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-httpjson.generic-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-httpjson.generic-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-juniper_srx_x_log: + index_sorting: false + index_template: + composed_of: + - logs-juniper_srx.log@package + - logs-juniper_srx.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-juniper_srx.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-juniper_srx.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-juniper_x_junos: + index_sorting: false + index_template: + composed_of: + - logs-juniper.junos@package + - logs-juniper.junos@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-juniper.junos-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-juniper.junos-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-juniper_x_netscreen: + index_sorting: false + index_template: + composed_of: + - logs-juniper.netscreen@package + - logs-juniper.netscreen@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-juniper.netscreen-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-juniper.netscreen-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-juniper_x_srx: + index_sorting: false + index_template: + composed_of: + - logs-juniper.srx@package + - logs-juniper.srx@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-juniper.srx-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-juniper.srx-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-kafka_log_x_generic: + index_sorting: false + index_template: + composed_of: + - logs-kafka_log.generic@package + - logs-kafka_log.generic@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-kafka_log.generic-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-kafka_log.generic-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-lastpass_x_detailed_shared_folder: + index_sorting: false + index_template: + composed_of: + - logs-lastpass.detailed_shared_folder@package + - logs-lastpass.detailed_shared_folder@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-lastpass.detailed_shared_folder-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-lastpass.detailed_shared_folder-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-lastpass_x_event_report: + index_sorting: false + index_template: + composed_of: + - logs-lastpass.event_report@package + - logs-lastpass.event_report@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-lastpass.event_report-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-lastpass.event_report-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-lastpass_x_user: + index_sorting: false + index_template: + composed_of: + - logs-lastpass.user@package + - logs-lastpass.user@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-lastpass.user-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-lastpass.user-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-m365_defender_x_event: + index_sorting: false + index_template: + composed_of: + - logs-m365_defender.event@package + - logs-m365_defender.event@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-m365_defender.event-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-m365_defender.event-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-m365_defender_x_incident: + index_sorting: false + index_template: + composed_of: + - logs-m365_defender.incident@package + - logs-m365_defender.incident@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-m365_defender.incident-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-m365_defender.incident-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-m365_defender_x_log: + index_sorting: false + index_template: + composed_of: + - logs-m365_defender.log@package + - logs-m365_defender.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-m365_defender.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-m365_defender.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-microsoft_defender_endpoint_x_log: + index_sorting: false + index_template: + composed_of: + - logs-microsoft_defender_endpoint.log@package + - logs-microsoft_defender_endpoint.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-microsoft_defender_endpoint.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-microsoft_defender_endpoint.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-microsoft_dhcp_x_log: + index_sorting: false + index_template: + composed_of: + - logs-microsoft_dhcp.log@package + - logs-microsoft_dhcp.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-microsoft_dhcp.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-microsoft_dhcp.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-mimecast_x_audit_events: + index_sorting: false + index_template: + composed_of: + - logs-mimecast.audit_events@package + - logs-mimecast.audit_events@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-mimecast.audit_events-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-mimecast.audit_events-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-mimecast_x_dlp_logs: + index_sorting: false + index_template: + composed_of: + - logs-mimecast.dlp_logs@package + - logs-mimecast.dlp_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-mimecast.dlp_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-mimecast.dlp_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-mimecast_x_siem_logs: + index_sorting: false + index_template: + composed_of: + - logs-mimecast.siem_logs@package + - logs-mimecast.siem_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-mimecast.siem_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-mimecast.siem_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-mimecast_x_threat_intel_malware_customer: + index_sorting: false + index_template: + composed_of: + - logs-mimecast.threat_intel_malware_customer@package + - logs-mimecast.threat_intel_malware_customer@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-mimecast.threat_intel_malware_customer-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-mimecast.threat_intel_malware_customer-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-mimecast_x_threat_intel_malware_grid: + index_sorting: false + index_template: + composed_of: + - logs-mimecast.threat_intel_malware_grid@package + - logs-mimecast.threat_intel_malware_grid@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-mimecast.threat_intel_malware_grid-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-mimecast.threat_intel_malware_grid-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-mimecast_x_ttp_ap_logs: + index_sorting: false + index_template: + composed_of: + - logs-mimecast.ttp_ap_logs@package + - logs-mimecast.ttp_ap_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-mimecast.ttp_ap_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-mimecast.ttp_ap_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-mimecast_x_ttp_ip_logs: + index_sorting: false + index_template: + composed_of: + - logs-mimecast.ttp_ip_logs@package + - logs-mimecast.ttp_ip_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-mimecast.ttp_ip_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-mimecast.ttp_ip_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-mimecast_x_ttp_url_logs: + index_sorting: false + index_template: + composed_of: + - logs-mimecast.ttp_url_logs@package + - logs-mimecast.ttp_url_logs@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-mimecast.ttp_url_logs-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-mimecast.ttp_url_logs-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-netflow_x_log: + index_sorting: false + index_template: + composed_of: + - logs-netflow.log@package + - logs-netflow.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-netflow.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-netflow.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-o365_x_audit: + index_sorting: false + index_template: + composed_of: + - logs-o365.audit@package + - logs-o365.audit@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-o365.audit-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-o365.audit-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-okta_x_system: + index_sorting: false + index_template: + composed_of: + - logs-okta.system@package + - logs-okta.system@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-okta.system-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-okta.system-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-osquery-manager-action_x_responses: + index_sorting: false + index_template: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + composed_of: + - logs-osquery_manager.action.responses + index_patterns: + - .logs-osquery_manager.action.responses* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-osquery-manager-action.responses-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-osquery-manager-actions: + index_sorting: false + index_template: + _meta: + managed: true + managed_by: security_onion + package: + name: elastic_agent + composed_of: + - logs-osquery_manager.actions + index_patterns: + - .logs-osquery_manager.actions* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-osquery-manager-actions-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-panw_x_panos: + index_sorting: false + index_template: + composed_of: + - logs-panw.panos@package + - logs-panw.panos@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-panw.panos-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-panw.panos-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-pfsense_x_log: + index_sorting: false + index_template: + composed_of: + - logs-pfsense.log@package + - logs-pfsense.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-pfsense.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-pfsense.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-pulse_connect_secure_x_log: + index_sorting: false + index_template: + composed_of: + - logs-pulse_connect_secure.log@package + - logs-pulse_connect_secure.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-pulse_connect_secure.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-pulse_connect_secure.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-sentinel_one_x_activity: + index_sorting: false + index_template: + composed_of: + - logs-sentinel_one.activity@package + - logs-sentinel_one.activity@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-sentinel_one.activity-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-sentinel_one.activity-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-sentinel_one_x_agent: + index_sorting: false + index_template: + composed_of: + - logs-sentinel_one.agent@package + - logs-sentinel_one.agent@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-sentinel_one.agent-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-sentinel_one.agent-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-sentinel_one_x_alert: + index_sorting: false + index_template: + composed_of: + - logs-sentinel_one.alert@package + - logs-sentinel_one.alert@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-sentinel_one.alert-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-sentinel_one.alert-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-sentinel_one_x_group: + index_sorting: false + index_template: + composed_of: + - logs-sentinel_one.group@package + - logs-sentinel_one.group@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-sentinel_one.group-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-sentinel_one.group-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-sentinel_one_x_threat: + index_sorting: false + index_template: + composed_of: + - logs-sentinel_one.threat@package + - logs-sentinel_one.threat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-sentinel_one.threat-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-sentinel_one.threat-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-snyk_x_audit: + index_sorting: false + index_template: + composed_of: + - logs-snyk.audit@package + - logs-snyk.audit@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-snyk.audit-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-snyk.audit-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-snyk_x_vulnerabilities: + index_sorting: false + index_template: + composed_of: + - logs-snyk.vulnerabilities@package + - logs-snyk.vulnerabilities@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-snyk.vulnerabilities-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-snyk.vulnerabilities-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-sonicwall_firewall_x_log: + index_sorting: false + index_template: + composed_of: + - logs-sonicwall_firewall.log@package + - logs-sonicwall_firewall.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-sonicwall_firewall.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-sonicwall_firewall.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-sophos_central_x_alert: + index_sorting: false + index_template: + composed_of: + - logs-sophos_central.alert@package + - logs-sophos_central.alert@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-sophos_central.alert-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-sophos_central.alert-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-sophos_central_x_event: + index_sorting: false + index_template: + composed_of: + - logs-sophos_central.event@package + - logs-sophos_central.event@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-sophos_central.event-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-sophos_central.event-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-sophos_x_utm: + index_sorting: false + index_template: + composed_of: + - logs-sophos.utm@package + - logs-sophos.utm@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-sophos.utm-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-sophos.utm-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-sophos_x_xg: + index_sorting: false + index_template: + composed_of: + - logs-sophos.xg@package + - logs-sophos.xg@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-sophos.xg-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-sophos.xg-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-symantec_endpoint_x_log: + index_sorting: false + index_template: + composed_of: + - logs-symantec_endpoint.log@package + - logs-symantec_endpoint.log@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-symantec_endpoint.log-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-symantec_endpoint.log-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-system_x_application: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-system.application@package + - logs-system.application@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-system.application* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-system.application-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-system_x_auth: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-system.auth@package + - logs-system.auth@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-system.auth* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-system.auth-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-system_x_security: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-system.security@package + - logs-system.security@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-system.security* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-system.security-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-system_x_syslog: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-system.syslog@package + - logs-system.syslog@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-system.syslog* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-system.syslog-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-system_x_system: + index_sorting: false + index_template: + composed_of: + - event-mappings + - logs-system.system@package + - logs-system.system@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-system.system* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-system.system-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-tenable_sc_x_asset: + index_sorting: false + index_template: + composed_of: + - logs-tenable_sc.asset@package + - logs-tenable_sc.asset@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-tenable_sc.asset-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-tenable_sc.asset-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-tenable_sc_x_plugin: + index_sorting: false + index_template: + composed_of: + - logs-tenable_sc.plugin@package + - logs-tenable_sc.plugin@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-tenable_sc.plugin-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-tenable_sc.plugin-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-tenable_sc_x_vulnerability: + index_sorting: false + index_template: + composed_of: + - logs-tenable_sc.vulnerability@package + - logs-tenable_sc.vulnerability@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-tenable_sc.vulnerability-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-tenable_sc.vulnerability-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-ti_abusech_x_malware: + index_sorting: false + index_template: + composed_of: + - logs-ti_abusech.malware@package + - logs-ti_abusech.malware@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-ti_abusech.malware-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-ti_abusech.malware-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-ti_abusech_x_malwarebazaar: + index_sorting: false + index_template: + composed_of: + - logs-ti_abusech.malwarebazaar@package + - logs-ti_abusech.malwarebazaar@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-ti_abusech.malwarebazaar-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-ti_abusech.malwarebazaar-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-ti_abusech_x_threatfox: + index_sorting: false + index_template: + composed_of: + - logs-ti_abusech.threatfox@package + - logs-ti_abusech.threatfox@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-ti_abusech.threatfox-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-ti_abusech.threatfox-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-ti_abusech_x_url: + index_sorting: false + index_template: + composed_of: + - logs-ti_abusech.url@package + - logs-ti_abusech.url@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-ti_abusech.url-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-ti_abusech.url-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-ti_misp_x_threat: + index_sorting: false + index_template: + composed_of: + - logs-ti_misp.threat@package + - logs-ti_misp.threat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-ti_misp.threat-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-ti_misp.threat-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-ti_misp_x_threat_attributes: + index_sorting: false + index_template: + composed_of: + - logs-ti_misp.threat_attributes@package + - logs-ti_misp.threat_attributes@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-ti_misp.threat_attributes-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-ti_misp.threat_attributes-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-ti_otx_x_threat: + index_sorting: false + index_template: + composed_of: + - logs-ti_otx.threat@package + - logs-ti_otx.threat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-ti_otx.threat-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-ti_otx.threat-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-ti_recordedfuture_x_latest_ioc-template: + index_sorting: false + index_template: + composed_of: + - logs-ti_recordedfuture.latest_ioc-template@package + - logs-ti_recordedfuture.latest_ioc-template@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-ti_recordedfuture.latest_ioc-template-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-ti_recordedfuture.latest_ioc-template-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-ti_recordedfuture_x_threat: + index_sorting: false + index_template: + composed_of: + - logs-ti_recordedfuture.threat@package + - logs-ti_recordedfuture.threat@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-ti_recordedfuture.threat-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-ti_recordedfuture.threat-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-windows_x_forwarded: + index_sorting: false + index_template: + composed_of: + - logs-windows.forwarded@package + - logs-windows.forwarded@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-windows.forwarded* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-windows.forwarded-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-windows_x_powershell: + index_sorting: false + index_template: + composed_of: + - logs-windows.powershell@package + - logs-windows.powershell@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-windows.powershell-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-windows.powershell-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-windows_x_powershell_operational: + index_sorting: false + index_template: + composed_of: + - logs-windows.powershell_operational@package + - logs-windows.powershell_operational@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-windows.powershell_operational-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-windows.powershell_operational-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-windows_x_sysmon_operational: + index_sorting: false + index_template: + composed_of: + - logs-windows.sysmon_operational@package + - logs-windows.sysmon_operational@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-windows.sysmon_operational-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-windows.sysmon_operational-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-zscaler_zia_x_alerts: + index_sorting: false + index_template: + composed_of: + - logs-zscaler_zia.alerts@package + - logs-zscaler_zia.alerts@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-zscaler_zia.alerts-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-zscaler_zia.alerts-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-zscaler_zia_x_dns: + index_sorting: false + index_template: + composed_of: + - logs-zscaler_zia.dns@package + - logs-zscaler_zia.dns@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-zscaler_zia.dns-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-zscaler_zia.dns-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-zscaler_zia_x_firewall: + index_sorting: false + index_template: + composed_of: + - logs-zscaler_zia.firewall@package + - logs-zscaler_zia.firewall@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-zscaler_zia.firewall-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-zscaler_zia.firewall-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-zscaler_zia_x_tunnel: + index_sorting: false + index_template: + composed_of: + - logs-zscaler_zia.tunnel@package + - logs-zscaler_zia.tunnel@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-zscaler_zia.tunnel-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-zscaler_zia.tunnel-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-zscaler_zia_x_web: + index_sorting: false + index_template: + composed_of: + - logs-zscaler_zia.web@package + - logs-zscaler_zia.web@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-zscaler_zia.web-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-zscaler_zia.web-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-zscaler_zpa_x_app_connector_status: + index_sorting: false + index_template: + composed_of: + - logs-zscaler_zpa.app_connector_status@package + - logs-zscaler_zpa.app_connector_status@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-zscaler_zpa.app_connector_status-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-zscaler_zpa.app_connector_status-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-zscaler_zpa_x_audit: + index_sorting: false + index_template: + composed_of: + - logs-zscaler_zpa.audit@package + - logs-zscaler_zpa.audit@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-zscaler_zpa.audit-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-zscaler_zpa.audit-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-zscaler_zpa_x_browser_access: + index_sorting: false + index_template: + composed_of: + - logs-zscaler_zpa.browser_access@package + - logs-zscaler_zpa.browser_access@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-zscaler_zpa.browser_access-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-zscaler_zpa.browser_access-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-zscaler_zpa_x_user_activity: + index_sorting: false + index_template: + composed_of: + - logs-zscaler_zpa.user_activity@package + - logs-zscaler_zpa.user_activity@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-zscaler_zpa.user_activity-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-zscaler_zpa.user_activity-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logs-zscaler_zpa_x_user_status: + index_sorting: false + index_template: + composed_of: + - logs-zscaler_zpa.user_status@package + - logs-zscaler_zpa.user_status@custom + - so-fleet_globals-1 + - so-fleet_agent_id_verification-1 + data_stream: + allow_custom_routing: false + hidden: false + index_patterns: + - logs-zscaler_zpa.user_status-* + priority: 501 + template: + settings: + index: + lifecycle: + name: so-logs-zscaler_zpa.user_status-logs + number_of_replicas: 0 + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-logstash: + index_sorting: false + index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - logstash-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - common-settings + - common-dynamic-mappings + index_patterns: + - logs-logstash-default* + priority: 500 template: mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string settings: index: lifecycle: @@ -3891,104 +8572,109 @@ elasticsearch: mapping: total_fields: limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 number_of_replicas: 0 - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - cloud-mappings - - container-mappings - - data_stream-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - logstash-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - orchestrator-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - registry-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - threat-mappings - - tls-mappings - - tracing-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - vulnerability-mappings - - common-settings - - common-dynamic-mappings - priority: 500 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc policy: phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb cold: - min_age: 30d actions: set_priority: priority: 0 + min_age: 30d delete: - min_age: 365d actions: delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-redis: - index_sorting: False + index_sorting: false index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - redis-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - common-settings + - common-dynamic-mappings index_patterns: - - logs-redis-default* + - logs-redis-default* + priority: 500 template: mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string settings: index: lifecycle: @@ -3996,315 +8682,447 @@ elasticsearch: mapping: total_fields: limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 number_of_replicas: 0 - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - cloud-mappings - - container-mappings - - data_stream-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - orchestrator-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - redis-mappings - - registry-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - threat-mappings - - tls-mappings - - tracing-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - vulnerability-mappings - - common-settings - - common-dynamic-mappings - priority: 500 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc policy: phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb cold: - min_age: 30d actions: set_priority: priority: 0 + min_age: 30d delete: - min_age: 365d actions: delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-strelka: - index_sorting: False + index_sorting: false index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - so-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - so-scan-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - common-settings + - common-dynamic-mappings data_stream: {} index_patterns: - - logs-strelka-so* + - logs-strelka-so* + priority: 500 template: mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string settings: index: + lifecycle: + name: so-strelka-logs mapping: total_fields: limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 number_of_replicas: 0 - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - cloud-mappings - - container-mappings - - data_stream-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - so-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - orchestrator-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - registry-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - so-scan-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - threat-mappings - - tls-mappings - - tracing-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - vulnerability-mappings - - common-settings - - common-dynamic-mappings - priority: 500 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc policy: phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb cold: - min_age: 30d actions: set_priority: priority: 0 + min_age: 30d delete: - min_age: 365d actions: delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-suricata: + index_sorting: false + index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - suricata-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - common-settings + - common-dynamic-mappings + data_stream: {} + index_patterns: + - logs-suricata-so* + priority: 500 + template: + mappings: + date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string + settings: + index: + lifecycle: + name: so-suricata-logs + mapping: + total_fields: + limit: 5000 + number_of_replicas: 0 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-syslog: - index_sorting: False + index_sorting: false index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - syslog-mappings + - dtc-syslog-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - common-settings + - common-dynamic-mappings data_stream: {} index_patterns: - - logs-syslog-so* + - logs-syslog-so* + priority: 500 template: mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string settings: index: + lifecycle: + name: so-syslog-logs mapping: total_fields: limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 1 number_of_replicas: 0 - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - cloud-mappings - - container-mappings - - data_stream-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - orchestrator-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - registry-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - syslog-mappings - - dtc-syslog-mappings - - threat-mappings - - tls-mappings - - tracing-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - vulnerability-mappings - - common-settings - - common-dynamic-mappings - priority: 500 + number_of_shards: 1 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc policy: phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb cold: - min_age: 30d actions: set_priority: priority: 0 + min_age: 30d delete: - min_age: 365d actions: delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-zeek: - index_sorting: False + index_sorting: false index_template: + composed_of: + - agent-mappings + - dtc-agent-mappings + - base-mappings + - dtc-base-mappings + - client-mappings + - dtc-client-mappings + - cloud-mappings + - container-mappings + - data_stream-mappings + - destination-mappings + - dtc-destination-mappings + - pb-override-destination-mappings + - dll-mappings + - dns-mappings + - dtc-dns-mappings + - ecs-mappings + - dtc-ecs-mappings + - error-mappings + - event-mappings + - dtc-event-mappings + - file-mappings + - dtc-file-mappings + - group-mappings + - host-mappings + - dtc-host-mappings + - http-mappings + - dtc-http-mappings + - log-mappings + - network-mappings + - dtc-network-mappings + - observer-mappings + - dtc-observer-mappings + - orchestrator-mappings + - organization-mappings + - package-mappings + - process-mappings + - dtc-process-mappings + - registry-mappings + - related-mappings + - rule-mappings + - dtc-rule-mappings + - server-mappings + - service-mappings + - dtc-service-mappings + - source-mappings + - dtc-source-mappings + - pb-override-source-mappings + - syslog-mappings + - dtc-syslog-mappings + - threat-mappings + - tls-mappings + - tracing-mappings + - url-mappings + - user_agent-mappings + - dtc-user_agent-mappings + - vulnerability-mappings + - zeek-mappings + - common-settings + - common-dynamic-mappings data_stream: {} index_patterns: - - logs-zeek-so* + - logs-zeek-so* + priority: 500 template: mappings: - dynamic_templates: - - strings_as_keyword: - mapping: - ignore_above: 1024 - type: keyword - match_mapping_type: string date_detection: false + dynamic_templates: + - strings_as_keyword: + mapping: + ignore_above: 1024 + type: keyword + match_mapping_type: string settings: index: lifecycle: @@ -4312,656 +9130,80 @@ elasticsearch: mapping: total_fields: limit: 5000 - sort: - field: "@timestamp" - order: desc - refresh_interval: 30s - number_of_shards: 2 number_of_replicas: 0 - composed_of: - - agent-mappings - - dtc-agent-mappings - - base-mappings - - dtc-base-mappings - - client-mappings - - dtc-client-mappings - - cloud-mappings - - container-mappings - - data_stream-mappings - - destination-mappings - - dtc-destination-mappings - - pb-override-destination-mappings - - dll-mappings - - dns-mappings - - dtc-dns-mappings - - ecs-mappings - - dtc-ecs-mappings - - error-mappings - - event-mappings - - dtc-event-mappings - - file-mappings - - dtc-file-mappings - - group-mappings - - host-mappings - - dtc-host-mappings - - http-mappings - - dtc-http-mappings - - log-mappings - - network-mappings - - dtc-network-mappings - - observer-mappings - - dtc-observer-mappings - - orchestrator-mappings - - organization-mappings - - package-mappings - - process-mappings - - dtc-process-mappings - - registry-mappings - - related-mappings - - rule-mappings - - dtc-rule-mappings - - server-mappings - - service-mappings - - dtc-service-mappings - - source-mappings - - dtc-source-mappings - - pb-override-source-mappings - - syslog-mappings - - dtc-syslog-mappings - - threat-mappings - - tls-mappings - - tracing-mappings - - url-mappings - - user_agent-mappings - - dtc-user_agent-mappings - - vulnerability-mappings - - zeek-mappings - - common-settings - - common-dynamic-mappings - priority: 500 + number_of_shards: 2 + refresh_interval: 30s + sort: + field: '@timestamp' + order: desc policy: phases: - hot: - min_age: 0ms - actions: - set_priority: - priority: 100 - rollover: - max_age: 30d - max_primary_shard_size: 50gb cold: - min_age: 30d actions: set_priority: priority: 0 + min_age: 30d delete: - min_age: 365d actions: delete: {} - so-logs-auth0_x_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-auth0.logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-auth0.logs@package" - - "logs-auth0.logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-carbonblack_edr_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-carbonblack_edr.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-carbonblack_edr.log@package" - - "logs-carbonblack_edr.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cisco_duo_x_admin: - index_sorting: False - index_template: - index_patterns: - - "logs-cisco_duo.admin-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cisco_duo.admin@package" - - "logs-cisco_duo.admin@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cisco_duo_x_auth: - index_sorting: False - index_template: - index_patterns: - - "logs-cisco_duo.auth-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cisco_duo.auth@package" - - "logs-cisco_duo.auth@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cisco_duo_x_offline_enrollment: - index_sorting: False - index_template: - index_patterns: - - "logs-cisco_duo.offline_enrollment-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cisco_duo.offline_enrollment@package" - - "logs-cisco_duo.offline_enrollment@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cisco_duo_x_summary: - index_sorting: False - index_template: - index_patterns: - - "logs-cisco_duo.summary-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cisco_duo.summary@package" - - "logs-cisco_duo.summary@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cisco_duo_x_telephony: - index_sorting: False - index_template: - index_patterns: - - "logs-cisco_duo.telephony-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cisco_duo.telephony@package" - - "logs-cisco_duo.telephony@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cisco_meraki_x_events: - index_sorting: False - index_template: - index_patterns: - - "logs-cisco_meraki.events-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cisco_meraki.events@package" - - "logs-cisco_meraki.events@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cisco_meraki_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-cisco_meraki.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cisco_meraki.log@package" - - "logs-cisco_meraki.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-cisco_umbrella_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-cisco_umbrella.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-cisco_umbrella.log@package" - - "logs-cisco_umbrella.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-fireeye_x_nx: - index_sorting: False - index_template: - index_patterns: - - "logs-fireeye.nx-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-fireeye.nx@package" - - "logs-fireeye.nx@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-mimecast_x_audit_events: - index_sorting: False - index_template: - index_patterns: - - "logs-mimecast.audit_events-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-mimecast.audit_events@package" - - "logs-mimecast.audit_events@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-mimecast_x_dlp_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-mimecast.dlp_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-mimecast.dlp_logs@package" - - "logs-mimecast.dlp_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-mimecast_x_siem_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-mimecast.siem_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-mimecast.siem_logs@package" - - "logs-mimecast.siem_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-mimecast_x_threat_intel_malware_customer: - index_sorting: False - index_template: - index_patterns: - - "logs-mimecast.threat_intel_malware_customer-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-mimecast.threat_intel_malware_customer@package" - - "logs-mimecast.threat_intel_malware_customer@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-mimecast_x_threat_intel_malware_grid: - index_sorting: False - index_template: - index_patterns: - - "logs-mimecast.threat_intel_malware_grid-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-mimecast.threat_intel_malware_grid@package" - - "logs-mimecast.threat_intel_malware_grid@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-mimecast_x_ttp_ap_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-mimecast.ttp_ap_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-mimecast.ttp_ap_logs@package" - - "logs-mimecast.ttp_ap_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-mimecast_x_ttp_ip_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-mimecast.ttp_ip_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-mimecast.ttp_ip_logs@package" - - "logs-mimecast.ttp_ip_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-mimecast_x_ttp_url_logs: - index_sorting: False - index_template: - index_patterns: - - "logs-mimecast.ttp_url_logs-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-mimecast.ttp_url_logs@package" - - "logs-mimecast.ttp_url_logs@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-pulse_connect_secure_x_log: - index_sorting: False - index_template: - index_patterns: - - "logs-pulse_connect_secure.log-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-pulse_connect_secure.log@package" - - "logs-pulse_connect_secure.log@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-snyk_x_audit: - index_sorting: False - index_template: - index_patterns: - - "logs-snyk.audit-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-snyk.audit@package" - - "logs-snyk.audit@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-snyk_x_vulnerabilities: - index_sorting: False - index_template: - index_patterns: - - "logs-snyk.vulnerabilities-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-snyk.vulnerabilities@package" - - "logs-snyk.vulnerabilities@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-sophos_x_utm: - index_sorting: False - index_template: - index_patterns: - - "logs-sophos.utm-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-sophos.utm@package" - - "logs-sophos.utm@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-sophos_x_xg: - index_sorting: False - index_template: - index_patterns: - - "logs-sophos.xg-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-sophos.xg@package" - - "logs-sophos.xg@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-sophos_central_x_alert: - index_sorting: False - index_template: - index_patterns: - - "logs-sophos_central.alert-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-sophos_central.alert@package" - - "logs-sophos_central.alert@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-sophos_central_x_event: - index_sorting: False - index_template: - index_patterns: - - "logs-sophos_central.event-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-sophos_central.event@package" - - "logs-sophos_central.event@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-tenable_sc_x_asset: - index_sorting: False - index_template: - index_patterns: - - "logs-tenable_sc.asset-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-tenable_sc.asset@package" - - "logs-tenable_sc.asset@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-tenable_sc_x_plugin: - index_sorting: False - index_template: - index_patterns: - - "logs-tenable_sc.plugin-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-tenable_sc.plugin@package" - - "logs-tenable_sc.plugin@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false - so-logs-tenable_sc_x_vulnerability: - index_sorting: False - index_template: - index_patterns: - - "logs-tenable_sc.vulnerability-*" - template: - settings: - index: - number_of_replicas: 0 - composed_of: - - "logs-tenable_sc.vulnerability@package" - - "logs-tenable_sc.vulnerability@custom" - - "so-fleet_globals-1" - - "so-fleet_agent_id_verification-1" - priority: 501 - data_stream: - hidden: false - allow_custom_routing: false + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + retention: + retention_pct: 50 so_roles: - so-manager: - config: - node: - roles: - - master - - data - - remote_cluster_client - - transform - so-managersearch: - config: - node: - roles: - - master - - data_hot - - remote_cluster_client - so-standalone: - config: - node: - roles: - - master - - data_hot - - remote_cluster_client - so-searchnode: - config: - node: - roles: - - data_hot - - ingest - so-heavynode: - config: - node: - roles: - - master - - data - - remote_cluster_client - - ingest so-eval: config: node: roles: [] + so-heavynode: + config: + node: + roles: + - master + - data + - remote_cluster_client + - ingest so-import: config: node: roles: [] + so-manager: + config: + node: + roles: + - master + - data + - remote_cluster_client + - transform + so-managersearch: + config: + node: + roles: + - master + - data_hot + - remote_cluster_client + so-searchnode: + config: + node: + roles: + - data_hot + - ingest + so-standalone: + config: + node: + roles: + - master + - data_hot + - remote_cluster_client From 4942f83d4f001328679b77849fce19b7adc2da7d Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 26 Oct 2023 11:45:39 -0400 Subject: [PATCH 285/417] adjust version to match target branch --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 247af78a9..8ea99f559 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.0-kilo \ No newline at end of file +2.4.30 From 2e0100fd35da3c97ad3dfd292d224cd8d0814d22 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 26 Oct 2023 12:37:55 -0400 Subject: [PATCH 286/417] Update defaults.yaml --- salt/elasticsearch/defaults.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 9aef09876..721db8d99 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -9193,6 +9193,8 @@ elasticsearch: roles: - master - data_hot + - ingest + - transform - remote_cluster_client so-searchnode: config: @@ -9200,10 +9202,13 @@ elasticsearch: roles: - data_hot - ingest + - transform so-standalone: config: node: roles: - master - data_hot + - ingest + - transform - remote_cluster_client From 6891a95254b62c1a5093e6b0d465d91975d6bba0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 26 Oct 2023 13:02:39 -0400 Subject: [PATCH 287/417] remove wait_for_salt_minion from so-functions --- setup/so-functions | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index fd5bc790f..42402ad86 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -2494,20 +2494,6 @@ wait_for_file() { return 1 } -wait_for_salt_minion() { - retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$setup_log" 2>&1 || fail_setup - local attempt=0 - # each attempts would take about 15 seconds - local maxAttempts=20 - until check_salt_minion_status; do - attempt=$((attempt+1)) - if [[ $attempt -eq $maxAttempts ]]; then - fail_setup - fi - sleep 10 - done -} - verify_setup() { info "Verifying setup" set -o pipefail From 47373adad253e6960b16bc1537ed22b028c259c3 Mon Sep 17 00:00:00 2001 From: reyesj2 <94730068+reyesj2@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:15:40 -0400 Subject: [PATCH 288/417] Specify config.yaml in config_path. Otherwise when no influxd.bolt exists influxdb will fail to read the config file and won't create a new db. Signed-off-by: reyesj2 <94730068+reyesj2@users.noreply.github.com> --- salt/influxdb/enabled.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/influxdb/enabled.sls b/salt/influxdb/enabled.sls index c0733c12c..293a917cb 100644 --- a/salt/influxdb/enabled.sls +++ b/salt/influxdb/enabled.sls @@ -22,7 +22,7 @@ so-influxdb: - sobridge: - ipv4_address: {{ DOCKER.containers['so-influxdb'].ip }} - environment: - - INFLUXD_CONFIG_PATH=/conf + - INFLUXD_CONFIG_PATH=/conf/config.yaml - INFLUXDB_HTTP_LOG_ENABLED=false - DOCKER_INFLUXDB_INIT_MODE=setup - DOCKER_INFLUXDB_INIT_USERNAME=so From 7e8f3b753f27df1b0bad2925fe7509d9aeb09ee0 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 26 Oct 2023 13:19:04 -0400 Subject: [PATCH 289/417] add minion name to log, update comment --- salt/common/tools/sbin/so-common | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 87f40c9d4..bfa61f1b7 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -152,12 +152,12 @@ check_salt_master_status() { return 0 } -# this is only intended to be used to check the status of the minion +# this is only intended to be used to check the status of the minion from a salt master check_salt_minion_status() { local minion="$1" local timeout="${2:-5}" local logfile="${3:-'/dev/stdout'}" - echo "Checking if the salt minion will respond to jobs" >> "$logfile" 2>&1 + echo "Checking if the salt minion: $minion will respond to jobs" >> "$logfile" 2>&1 salt "$minion" test.ping -t $timeout > /dev/null 2>&1 local status=$? if [ $status -gt 0 ]; then From cb9d72ebd7edc382b678187e94d1a630101d0a5b Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 26 Oct 2023 14:19:59 -0400 Subject: [PATCH 290/417] switch back to kilo version --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 8ea99f559..7f2e97617 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.30 +2.4.0-kilo From b37e38e3c3f842de8345858948e2c5d6cc7cd2b2 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Thu, 26 Oct 2023 16:03:58 -0400 Subject: [PATCH 291/417] Update defaults.yaml --- salt/elasticsearch/defaults.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 721db8d99..44cb0ea7d 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -9173,6 +9173,7 @@ elasticsearch: roles: - master - data + - data_hot - remote_cluster_client - ingest so-import: @@ -9192,6 +9193,7 @@ elasticsearch: node: roles: - master + - data - data_hot - ingest - transform @@ -9200,6 +9202,7 @@ elasticsearch: config: node: roles: + - data - data_hot - ingest - transform @@ -9208,6 +9211,7 @@ elasticsearch: node: roles: - master + - data - data_hot - ingest - transform From cc3ee431923c89cd41f31a46318a2ff8527ca7a8 Mon Sep 17 00:00:00 2001 From: defensivedepth Date: Fri, 27 Oct 2023 07:49:34 -0400 Subject: [PATCH 292/417] Make dirs as needed --- salt/manager/kibana.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/manager/kibana.sls b/salt/manager/kibana.sls index f9aad3f05..17ac826c2 100644 --- a/salt/manager/kibana.sls +++ b/salt/manager/kibana.sls @@ -5,3 +5,4 @@ kibana_curl_config_distributed: - template: jinja - mode: 600 - show_changes: False + - makedirs: True \ No newline at end of file From 9fc3a730356b333bce63b33237ceb4fdf09c0256 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 27 Oct 2023 08:58:08 -0400 Subject: [PATCH 293/417] Annotation changes for warm node --- salt/elasticsearch/soc_elasticsearch.yaml | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index e3d257f11..189471226 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -75,8 +75,8 @@ elasticsearch: helpLink: elasticsearch.html phases: hot: - min_age: - description: Minimum age of index. This determines when the index should be moved to the hot tier. + max_age: + description: Maximum age of index. ex. 7d - This determines when the index should be moved out of the hot tier. global: True helpLink: elasticsearch.html actions: @@ -97,19 +97,29 @@ elasticsearch: helpLink: elasticsearch.html cold: min_age: - description: Minimum age of index. This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. global: True helpLink: elasticsearch.html actions: set_priority: priority: description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. + global: True + helpLink: elasticsearch.html + warm: + min_age: + description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. + regex: ^[0-9]d$ + actions: + set_priority: + priority: + description: Priority of index. This is used for recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. forcedType: int global: True helpLink: elasticsearch.html delete: min_age: - description: Minimum age of index. This determines when the index should be deleted. + description: Minimum age of index. ex. 90d - This determines when the index should be deleted. global: True helpLink: elasticsearch.html so-logs: &indexSettings From ce1858fe05eedb0ab614e82809cf2e0eccf6532b Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 27 Oct 2023 09:02:39 -0400 Subject: [PATCH 294/417] Annotation changes for warm node --- salt/elasticsearch/soc_elasticsearch.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 189471226..8bee839c1 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -109,7 +109,7 @@ elasticsearch: warm: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. - regex: ^[0-9]d$ + regex: ^\[0-9\]{1-5}d$ actions: set_priority: priority: From 87494f64c78cec81d5633f30b59421f511554100 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 27 Oct 2023 09:06:12 -0400 Subject: [PATCH 295/417] Annotation changes for warm node --- salt/elasticsearch/soc_elasticsearch.yaml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 8bee839c1..5b4d63f40 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -109,7 +109,8 @@ elasticsearch: warm: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. - regex: ^\[0-9\]{1-5}d$ + regex: ^\[0-9\]{1,5}d$ + global: True actions: set_priority: priority: From 25f1a0251f423b801fe084d83f8085c7fe787b12 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 27 Oct 2023 09:08:07 -0400 Subject: [PATCH 296/417] Annotation changes for warm node --- salt/elasticsearch/soc_elasticsearch.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 5b4d63f40..a5170b776 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -110,6 +110,7 @@ elasticsearch: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. regex: ^\[0-9\]{1,5}d$ + forcedType: string global: True actions: set_priority: From 4bbcc5002ad46d9cfe63071f9f5fcec756e4e9e3 Mon Sep 17 00:00:00 2001 From: Josh Patterson Date: Wed, 25 Oct 2023 11:01:13 -0400 Subject: [PATCH 297/417] Revert "Revert "Upgrade/salt3006.3"" This reverts commit c41e19ad0b301f1deae35365e62544e987045fdd. --- salt/common/tools/sbin/so-common | 75 +- salt/manager/tools/sbin/soup | 62 +- salt/salt/map.jinja | 2 +- salt/salt/master.defaults.yaml | 2 +- salt/salt/minion.defaults.yaml | 2 +- salt/salt/scripts/bootstrap-salt.sh | 2106 ++++++++++++++++++++++++--- setup/so-functions | 5 +- 7 files changed, 2001 insertions(+), 253 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index fc14e9d0a..87f40c9d4 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -152,15 +152,18 @@ check_salt_master_status() { return 0 } +# this is only intended to be used to check the status of the minion check_salt_minion_status() { - local timeout="${1:-5}" - echo "Checking if the salt minion will respond to jobs" >> "$setup_log" 2>&1 - salt "$MINION_ID" test.ping -t $timeout > /dev/null 2>&1 + local minion="$1" + local timeout="${2:-5}" + local logfile="${3:-'/dev/stdout'}" + echo "Checking if the salt minion will respond to jobs" >> "$logfile" 2>&1 + salt "$minion" test.ping -t $timeout > /dev/null 2>&1 local status=$? if [ $status -gt 0 ]; then - echo " Minion did not respond" >> "$setup_log" 2>&1 + echo " Minion did not respond" >> "$logfile" 2>&1 else - echo " Received job response from salt minion" >> "$setup_log" 2>&1 + echo " Received job response from salt minion" >> "$logfile" 2>&1 fi return $status @@ -440,6 +443,24 @@ run_check_net_err() { fi } +wait_for_salt_minion() { + local minion="$1" + local timeout="${2:-5}" + local logfile="${3:-'/dev/stdout'}" + retry 60 5 "journalctl -u salt-minion.service | grep 'Minion is ready to receive requests'" >> "$logfile" 2>&1 || fail + local attempt=0 + # each attempts would take about 15 seconds + local maxAttempts=20 + until check_salt_minion_status "$minion" "$timeout" "$logfile"; do + attempt=$((attempt+1)) + if [[ $attempt -eq $maxAttempts ]]; then + return 1 + fi + sleep 10 + done + return 0 +} + salt_minion_count() { local MINIONDIR="/opt/so/saltstack/local/pillar/minions" MINIONCOUNT=$(ls -la $MINIONDIR/*.sls | grep -v adv_ | wc -l) @@ -452,19 +473,51 @@ set_os() { OS=rocky OSVER=9 is_rocky=true + is_rpm=true elif grep -q "CentOS Stream release 9" /etc/redhat-release; then OS=centos OSVER=9 is_centos=true - elif grep -q "Oracle Linux Server release 9" /etc/system-release; then - OS=oel + is_rpm=true + elif grep -q "AlmaLinux release 9" /etc/redhat-release; then + OS=alma OSVER=9 - is_oracle=true + is_alma=true + is_rpm=true + elif grep -q "Red Hat Enterprise Linux release 9" /etc/redhat-release; then + if [ -f /etc/oracle-release ]; then + OS=oracle + OSVER=9 + is_oracle=true + is_rpm=true + else + OS=rhel + OSVER=9 + is_rhel=true + is_rpm=true + fi fi cron_service_name="crond" - else - OS=ubuntu - is_ubuntu=true + elif [ -f /etc/os-release ]; then + if grep -q "UBUNTU_CODENAME=focal" /etc/os-release; then + OSVER=focal + UBVER=20.04 + OS=ubuntu + is_ubuntu=true + is_deb=true + elif grep -q "UBUNTU_CODENAME=jammy" /etc/os-release; then + OSVER=jammy + UBVER=22.04 + OS=ubuntu + is_ubuntu=true + is_deb=true + elif grep -q "VERSION_CODENAME=bookworm" /etc/os-release; then + OSVER=bookworm + DEBVER=12 + is_debian=true + OS=debian + is_deb=true + fi cron_service_name="cron" fi } diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 0666e25ae..f30c3f15d 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -467,7 +467,6 @@ stop_salt_master() { echo "" echo "Killing any queued Salt jobs on the manager." pkill -9 -ef "/usr/bin/python3 /bin/salt" >> $SOUP_LOG 2>&1 - set -e echo "" echo "Storing salt-master pid." @@ -475,6 +474,7 @@ stop_salt_master() { echo "Found salt-master PID $MASTERPID" systemctl_func "stop" "salt-master" timeout 30 tail --pid=$MASTERPID -f /dev/null || echo "salt-master still running at $(date +"%T.%6N") after waiting 30s. We cannot kill due to systemd restart option." + set -e } stop_salt_minion() { @@ -487,14 +487,12 @@ stop_salt_minion() { echo "" echo "Killing Salt jobs on this node." salt-call saltutil.kill_all_jobs --local - set -e echo "Storing salt-minion pid." MINIONPID=$(pgrep -f '/opt/saltstack/salt/bin/python3.10 /usr/bin/salt-minion' | head -1) echo "Found salt-minion PID $MINIONPID" systemctl_func "stop" "salt-minion" - set +e timeout 30 tail --pid=$MINIONPID -f /dev/null || echo "Killing salt-minion at $(date +"%T.%6N") after waiting 30s" && pkill -9 -ef /usr/bin/salt-minion set -e } @@ -633,6 +631,7 @@ upgrade_check_salt() { if [ "$INSTALLEDSALTVERSION" == "$NEWSALTVERSION" ]; then echo "You are already running the correct version of Salt for Security Onion." else + echo "Salt needs to be upgraded to $NEWSALTVERSION." UPGRADESALT=1 fi } @@ -641,22 +640,48 @@ upgrade_salt() { SALTUPGRADED=True echo "Performing upgrade of Salt from $INSTALLEDSALTVERSION to $NEWSALTVERSION." echo "" - # If CentOS - if [[ $OS == 'centos' ]]; then + # If rhel family + if [[ $is_rpm ]]; then echo "Removing yum versionlock for Salt." echo "" yum versionlock delete "salt-*" echo "Updating Salt packages." echo "" set +e - run_check_net_err \ - "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \ - "Could not update salt, please check $SOUP_LOG for details." + # if oracle run with -r to ignore repos set by bootstrap + if [[ $OS == 'oracle' ]]; then + run_check_net_err \ + "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -r -F -M -x python3 stable \"$NEWSALTVERSION\"" \ + "Could not update salt, please check $SOUP_LOG for details." + # if another rhel family variant we want to run without -r to allow the bootstrap script to manage repos + else + run_check_net_err \ + "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \ + "Could not update salt, please check $SOUP_LOG for details." + fi set -e echo "Applying yum versionlock for Salt." echo "" yum versionlock add "salt-*" # Else do Ubuntu things + elif [[ $is_deb ]]; then + echo "Removing apt hold for Salt." + echo "" + apt-mark unhold "salt-common" + apt-mark unhold "salt-master" + apt-mark unhold "salt-minion" + echo "Updating Salt packages." + echo "" + set +e + run_check_net_err \ + "sh $UPDATE_DIR/salt/salt/scripts/bootstrap-salt.sh -X -F -M -x python3 stable \"$NEWSALTVERSION\"" \ + "Could not update salt, please check $SOUP_LOG for details." + set -e + echo "Applying apt hold for Salt." + echo "" + apt-mark hold "salt-common" + apt-mark hold "salt-master" + apt-mark hold "salt-minion" fi echo "Checking if Salt was upgraded." @@ -668,7 +693,7 @@ upgrade_salt() { echo "Once the issue is resolved, run soup again." echo "Exiting." echo "" - exit 0 + exit 1 else echo "Salt upgrade success." echo "" @@ -798,7 +823,7 @@ main() { if [[ $is_airgap -eq 0 ]]; then yum clean all check_os_updates - elif [[ $OS == 'oel' ]]; then + elif [[ $OS == 'oracle' ]]; then # sync remote repo down to local if not airgap repo_sync check_os_updates @@ -815,7 +840,8 @@ main() { echo "Hotfix applied" update_version enable_highstate - salt-call state.highstate -l info queue=True + (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" + highstate else echo "" echo "Performing upgrade from Security Onion $INSTALLEDVERSION to Security Onion $NEWVERSION." @@ -851,6 +877,14 @@ main() { echo "Upgrading Salt" # Update the repo files so it can actually upgrade upgrade_salt + + # for Debian based distro, we need to stop salt again after upgrade output below is from bootstrap-salt + # * WARN: Not starting daemons on Debian based distributions + # is not working mostly because starting them is the default behaviour. + if [[ $is_deb ]]; then + stop_salt_minion + stop_salt_master + fi fi preupgrade_changes @@ -913,7 +947,8 @@ main() { echo "" echo "Running a highstate. This could take several minutes." set +e - salt-call state.highstate -l info queue=True + (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" + highstate set -e stop_salt_master @@ -928,7 +963,8 @@ main() { set -e echo "Running a highstate to complete the Security Onion upgrade on this manager. This could take several minutes." - salt-call state.highstate -l info queue=True + (wait_for_salt_minion "$MINIONID" "5" '/dev/stdout' || fail "Salt minion was not running or ready.") 2>&1 | tee -a "$SOUP_LOG" + highstate postupgrade_changes [[ $is_airgap -eq 0 ]] && unmount_update diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 1120685fb..131ff46ca 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -23,7 +23,7 @@ {% if grains.os|lower in ['Rocky', 'redhat', 'CentOS Stream'] %} {% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} {% elif grains.os_family|lower == 'debian' %} - {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} + {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F -x python3 stable ' ~ SALTVERSION %} {% endif %} {% else %} {% set UPGRADECOMMAND = 'echo Already running Salt Minion version ' ~ SALTVERSION %} diff --git a/salt/salt/master.defaults.yaml b/salt/salt/master.defaults.yaml index 126039802..40b6f5268 100644 --- a/salt/salt/master.defaults.yaml +++ b/salt/salt/master.defaults.yaml @@ -2,4 +2,4 @@ # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: master: - version: 3006.1 + version: 3006.3 diff --git a/salt/salt/minion.defaults.yaml b/salt/salt/minion.defaults.yaml index 7e1540d17..71fd18f96 100644 --- a/salt/salt/minion.defaults.yaml +++ b/salt/salt/minion.defaults.yaml @@ -2,6 +2,6 @@ # When updating the salt version, also update the version in securityonion-builds/images/iso-task/Dockerfile and saltify function in so-functions salt: minion: - version: 3006.1 + version: 3006.3 check_threshold: 3600 # in seconds, threshold used for so-salt-minion-check. any value less than 600 seconds may cause a lot of salt-minion restarts since the job to touch the file occurs every 5-8 minutes by default service_start_delay: 30 # in seconds. diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh index 47d25949c..a016524e6 100644 --- a/salt/salt/scripts/bootstrap-salt.sh +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -14,7 +14,7 @@ # # BUGS: https://github.com/saltstack/salt-bootstrap/issues # -# COPYRIGHT: (c) 2012-2021 by the SaltStack Team, see AUTHORS.rst for more +# COPYRIGHT: (c) 2012-2022 by the SaltStack Team, see AUTHORS.rst for more # details. # # LICENSE: Apache 2.0 @@ -23,7 +23,7 @@ #====================================================================================================================== set -o nounset # Treat unset variables as an error -__ScriptVersion="2021.09.17" +__ScriptVersion="2023.08.03" __ScriptName="bootstrap-salt.sh" __ScriptFullName="$0" @@ -224,7 +224,6 @@ _KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE} _TEMP_CONFIG_DIR="null" _SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git" _SALT_REPO_URL=${_SALTSTACK_REPO_URL} -_DOWNSTREAM_PKG_REPO=$BS_FALSE _TEMP_KEYS_DIR="null" _SLEEP="${__DEFAULT_SLEEP}" _INSTALL_MASTER=$BS_FALSE @@ -268,6 +267,8 @@ _CUSTOM_MASTER_CONFIG="null" _CUSTOM_MINION_CONFIG="null" _QUIET_GIT_INSTALLATION=$BS_FALSE _REPO_URL="repo.saltproject.io" +_ONEDIR_DIR="salt" +_ONEDIR_NIGHTLY_DIR="salt-dev/${_ONEDIR_DIR}" _PY_EXE="python3" _INSTALL_PY="$BS_FALSE" _TORNADO_MAX_PY3_VERSION="5.0" @@ -275,6 +276,9 @@ _POST_NEON_INSTALL=$BS_FALSE _MINIMUM_PIP_VERSION="9.0.1" _MINIMUM_SETUPTOOLS_VERSION="9.1" _POST_NEON_PIP_INSTALL_ARGS="--prefix=/usr" +_PIP_DOWNLOAD_ARGS="" +_QUICK_START="$BS_FALSE" +_AUTO_ACCEPT_MINION_KEYS="$BS_FALSE" # Defaults for install arguments ITYPE="stable" @@ -290,110 +294,130 @@ __usage() { Usage : ${__ScriptName} [options] [install-type-args] Installation types: - - stable Install latest stable release. This is the default - install type - - stable [branch] Install latest version on a branch. Only supported - for packages available at repo.saltproject.io - - stable [version] Install a specific version. Only supported for - packages available at repo.saltproject.io - To pin a 3xxx minor version, specify it as 3xxx.0 - - testing RHEL-family specific: configure EPEL testing repo - - git Install from the head of the master branch - - git [ref] Install from any git ref (such as a branch, tag, or - commit) + - stable Install latest stable release. This is the default + install type + - stable [branch] Install latest version on a branch. Only supported + for packages available at repo.saltproject.io + - stable [version] Install a specific version. Only supported for + packages available at repo.saltproject.io + To pin a 3xxx minor version, specify it as 3xxx.0 + - testing RHEL-family specific: configure EPEL testing repo + - git Install from the head of the master branch + - git [ref] Install from any git ref (such as a branch, tag, or + commit) + - onedir Install latest onedir release. + - onedir [version] Install a specific version. Only supported for + onedir packages available at repo.saltproject.io + + - onedir_rc Install latest onedir RC release. + - onedir_rc [version] Install a specific version. Only supported for + onedir RC packages available at repo.saltproject.io + - old-stable Install latest old stable release. + - old-stable [branch] Install latest version on a branch. Only supported + for packages available at repo.saltproject.io + - old-stable [version] Install a specific version. Only supported for + packages available at repo.saltproject.io + To pin a 3xxx minor version, specify it as 3xxx.0 Examples: - ${__ScriptName} - ${__ScriptName} stable - - ${__ScriptName} stable 2017.7 - - ${__ScriptName} stable 2017.7.2 + - ${__ScriptName} stable 3006 + - ${__ScriptName} stable 3006.1 - ${__ScriptName} testing - ${__ScriptName} git - ${__ScriptName} git 2017.7 - ${__ScriptName} git v2017.7.2 - ${__ScriptName} git 06f249901a2e2f1ed310d58ea3921a129f214358 + - ${__ScriptName} onedir + - ${__ScriptName} onedir 3006 + - ${__ScriptName} onedir_rc + - ${__ScriptName} onedir_rc 3006 + - ${__ScriptName} old-stable + - ${__ScriptName} old-stable 3005 + - ${__ScriptName} old-stable 3005.1 + Options: - -h Display this message - -v Display script version - -n No colours - -D Show debug output + -a Pip install all Python pkg dependencies for Salt. Requires -V to install + all pip pkgs into the virtualenv. + (Only available for Ubuntu based distributions) + -A Pass the salt-master DNS name or IP. This will be stored under + \${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf + -b Assume that dependencies are already installed and software sources are + set up. If git is selected, git tree is still checked out as dependency + step. -c Temporary configuration directory - -g Salt Git repository URL. Default: ${_SALTSTACK_REPO_URL} - -w Install packages from downstream package repository rather than - upstream, saltstack package repository. This is currently only - implemented for SUSE. - -k Temporary directory holding the minion keys which will pre-seed - the master. - -s Sleep time used when waiting for daemons to start, restart and when - checking for the services running. Default: ${__DEFAULT_SLEEP} - -L Also install salt-cloud and required python-libcloud package - -M Also install salt-master - -S Also install salt-syndic - -N Do not install salt-minion - -X Do not start daemons after installation - -d Disables checking if Salt services are enabled to start on system boot. - You can also do this by touching /tmp/disable_salt_checks on the target - host. Default: \${BS_FALSE} - -P Allow pip based installations. On some distributions the required salt - packages or its dependencies are not available as a package for that - distribution. Using this flag allows the script to use pip as a last - resort method. NOTE: This only works for functions which actually - implement pip based installations. - -U If set, fully upgrade the system prior to bootstrapping Salt - -I If set, allow insecure connections while downloading any files. For - example, pass '--no-check-certificate' to 'wget' or '--insecure' to - 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining - GnuPG archive keys insecurely if distro has changed release signatures. - -F Allow copied files to overwrite existing (config, init.d, etc) - -K If set, keep the temporary files in the temporary directories specified - with -c and -k -C Only run the configuration function. Implies -F (forced overwrite). To overwrite Master or Syndic configs, -M or -S, respectively, must also be specified. Salt installation will be ommitted, but some of the dependencies could be installed to write configuration with -j or -J. - -A Pass the salt-master DNS name or IP. This will be stored under - \${BS_SALT_ETC_DIR}/minion.d/99-master-address.conf - -i Pass the salt-minion id. This will be stored under - \${BS_SALT_ETC_DIR}/minion_id - -p Extra-package to install while installing Salt dependencies. One package - per -p flag. You are responsible for providing the proper package name. - -H Use the specified HTTP proxy for all download URLs (including https://). - For example: http://myproxy.example.com:3128 - -b Assume that dependencies are already installed and software sources are - set up. If git is selected, git tree is still checked out as dependency - step. + -d Disables checking if Salt services are enabled to start on system boot. + You can also do this by touching /tmp/disable_salt_checks on the target + host. Default: \${BS_FALSE} + -D Show debug output -f Force shallow cloning for git installations. This may result in an "n/a" in the version number. - -l Disable ssl checks. When passed, switches "https" calls to "http" where - possible. - -V Install Salt into virtualenv - (only available for Ubuntu based distributions) - -a Pip install all Python pkg dependencies for Salt. Requires -V to install - all pip pkgs into the virtualenv. - (Only available for Ubuntu based distributions) - -r Disable all repository configuration performed by this script. This - option assumes all necessary repository configuration is already present - on the system. - -R Specify a custom repository URL. Assumes the custom repository URL - points to a repository that mirrors Salt packages located at - repo.saltproject.io. The option passed with -R replaces the - "repo.saltproject.io". If -R is passed, -r is also set. Currently only - works on CentOS/RHEL and Debian based distributions. - -J Replace the Master config file with data passed in as a JSON string. If - a Master config file is found, a reasonable effort will be made to save - the file with a ".bak" extension. If used in conjunction with -C or -F, - no ".bak" file will be created as either of those options will force - a complete overwrite of the file. + -F Allow copied files to overwrite existing (config, init.d, etc) + -g Salt Git repository URL. Default: ${_SALTSTACK_REPO_URL} + -h Display this message + -H Use the specified HTTP proxy for all download URLs (including https://). + For example: http://myproxy.example.com:3128 + -i Pass the salt-minion id. This will be stored under + \${BS_SALT_ETC_DIR}/minion_id + -I If set, allow insecure connections while downloading any files. For + example, pass '--no-check-certificate' to 'wget' or '--insecure' to + 'curl'. On Debian and Ubuntu, using this option with -U allows obtaining + GnuPG archive keys insecurely if distro has changed release signatures. -j Replace the Minion config file with data passed in as a JSON string. If a Minion config file is found, a reasonable effort will be made to save the file with a ".bak" extension. If used in conjunction with -C or -F, no ".bak" file will be created as either of those options will force a complete overwrite of the file. + -J Replace the Master config file with data passed in as a JSON string. If + a Master config file is found, a reasonable effort will be made to save + the file with a ".bak" extension. If used in conjunction with -C or -F, + no ".bak" file will be created as either of those options will force + a complete overwrite of the file. + -k Temporary directory holding the minion keys which will pre-seed + the master. + -K If set, keep the temporary files in the temporary directories specified + with -c and -k + -l Disable ssl checks. When passed, switches "https" calls to "http" where + possible. + -L Also install salt-cloud and required python-libcloud package + -M Also install salt-master + -n No colours + -N Do not install salt-minion + -p Extra-package to install while installing Salt dependencies. One package + per -p flag. You are responsible for providing the proper package name. + -P Allow pip based installations. On some distributions the required salt + packages or its dependencies are not available as a package for that + distribution. Using this flag allows the script to use pip as a last + resort method. NOTE: This only works for functions which actually + implement pip based installations. -q Quiet salt installation from git (setup.py install -q) + -Q Quickstart, install the Salt master and the Salt minion. + And automatically accept the minion key. + -R Specify a custom repository URL. Assumes the custom repository URL + points to a repository that mirrors Salt packages located at + repo.saltproject.io. The option passed with -R replaces the + "repo.saltproject.io". If -R is passed, -r is also set. Currently only + works on CentOS/RHEL and Debian based distributions and macOS. + -s Sleep time used when waiting for daemons to start, restart and when + checking for the services running. Default: ${__DEFAULT_SLEEP} + -S Also install salt-syndic + -r Disable all repository configuration performed by this script. This + option assumes all necessary repository configuration is already present + on the system. + -U If set, fully upgrade the system prior to bootstrapping Salt + -v Display script version + -V Install Salt into virtualenv + (only available for Ubuntu based distributions) -x Changes the Python version used to install Salt. For CentOS 6 git installations python2.7 is supported. - Fedora git installation, CentOS 7, Debian 9, Ubuntu 16.04 and 18.04 support python3. + Fedora git installation, CentOS 7, Ubuntu 18.04 support python3. + -X Do not start daemons after installation -y Installs a different python version on host. Currently this has only been tested with CentOS 6 and is considered experimental. This will install the ius repo on the box if disable repo is false. This must be used in conjunction @@ -406,7 +430,7 @@ EOT } # ---------- end of function __usage ---------- -while getopts ':hvnDc:g:Gyx:wk:s:MSNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aq' opt +while getopts ':hvnDc:g:Gyx:k:s:MSNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aqQ' opt do case "${opt}" in @@ -422,7 +446,6 @@ do echowarn "No need to provide this option anymore, now it is a default behavior." ;; - w ) _DOWNSTREAM_PKG_REPO=$BS_TRUE ;; k ) _TEMP_KEYS_DIR="$OPTARG" ;; s ) _SLEEP=$OPTARG ;; M ) _INSTALL_MASTER=$BS_TRUE ;; @@ -451,6 +474,7 @@ do J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;; j ) _CUSTOM_MINION_CONFIG=$OPTARG ;; q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;; + Q ) _QUICK_START=$BS_TRUE ;; x ) _PY_EXE="$OPTARG" ;; y ) _INSTALL_PY="$BS_TRUE" ;; @@ -572,7 +596,7 @@ fi echoinfo "Running version: ${__ScriptVersion}" echoinfo "Executed by: ${CALLER}" echoinfo "Command line: '${__ScriptFullName} ${__ScriptArgs}'" -#echowarn "Running the unstable version of ${__ScriptName}" +echowarn "Running the unstable version of ${__ScriptName}" # Define installation type if [ "$#" -gt 0 ];then @@ -582,11 +606,17 @@ if [ "$#" -gt 0 ];then fi # Check installation type -if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git)')" = "" ]; then +if [ "$(echo "$ITYPE" | grep -E '(stable|testing|git|onedir|onedir_rc|old-stable)')" = "" ]; then echoerror "Installation type \"$ITYPE\" is not known..." exit 1 fi +# Due to our modifications to install_centos_onedir it is easiest to just lock down to only allowing stable install +if [ "$(echo "$ITYPE" | grep stable)" = "" ]; then + echoerror "This script has been modified to only support stable installation type. Installation type \"$ITYPE\" is not allowed..." + exit 1 +fi + # If doing a git install, check what branch/tag/sha will be checked out if [ "$ITYPE" = "git" ]; then if [ "$#" -eq 0 ];then @@ -602,23 +632,123 @@ if [ "$ITYPE" = "git" ]; then # If doing stable install, check if version specified elif [ "$ITYPE" = "stable" ]; then if [ "$#" -eq 0 ];then - STABLE_REV="latest" + ONEDIR_REV="latest" + _ONEDIR_REV="latest" + ITYPE="onedir" else - if [ "$(echo "$1" | grep -E '^(latest|1\.6|1\.7|2014\.1|2014\.7|2015\.5|2015\.8|2016\.3|2016\.11|2017\.7|2018\.3|2019\.2|3000|3001|3002|3003|3004)$')" != "" ]; then - STABLE_REV="$1" + if [ "$(echo "$1" | grep -E '^(nightly|latest|3005|3006)$')" != "" ]; then + ONEDIR_REV="$1" + _ONEDIR_REV="$1" + ITYPE="onedir" shift - elif [ "$(echo "$1" | grep -E '^(2[0-9]*\.[0-9]*\.[0-9]*|[3-9][0-9]{3}(\.[0-9]*)?)$')" != "" ]; then + elif [ "$(echo "$1" | grep -E '^([3-9][0-5]{2}[5-9](\.[0-9]*)?)')" != "" ]; then + ONEDIR_REV="minor/$1" + _ONEDIR_REV="$1" + ITYPE="onedir" + shift + else + echo "Unknown stable version: $1 (valid: 3005, 3006, latest)" + exit 1 + fi + fi + +# If doing old-stable install, check if version specified +elif [ "$ITYPE" = "old-stable" ]; then + if [ "$#" -eq 0 ];then + ITYPE="stable" + else + if [ "$(echo "$1" | grep -E '^(3003|3004|3005)$')" != "" ]; then + STABLE_REV="$1" + ITYPE="stable" + shift + elif [ "$(echo "$1" | grep -E '^([3-9][0-5]{3}(\.[0-9]*)?)$')" != "" ]; then # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix + ITYPE="stable" STABLE_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') if [ "$(uname)" != "Darwin" ]; then STABLE_REV="archive/$STABLE_REV" fi shift else - echo "Unknown stable version: $1 (valid: 1.6, 1.7, 2014.1, 2014.7, 2015.5, 2015.8, 2016.3, 2016.11, 2017.7, 2018.3, 2019.2, 3000, 3001, 3002, 3003, 3004, latest, \$MAJOR.\$MINOR.\$PATCH until 2019.2, \$MAJOR or \$MAJOR.\$PATCH starting from 3000)" + echo "Unknown old stable version: $1 (valid: 3003, 3004, 3005)" exit 1 fi fi + +elif [ "$ITYPE" = "onedir" ]; then + if [ "$#" -eq 0 ];then + ONEDIR_REV="latest" + else + if [ "$(echo "$1" | grep -E '^(nightly|latest|3005|3006)$')" != "" ]; then + ONEDIR_REV="$1" + shift + elif [ "$(echo "$1" | grep -E '^(3005(\.[0-9]*)?)')" != "" ]; then + # Handle the 3005.0 version as 3005 archive (pin to minor) and strip the fake ".0" suffix + ONEDIR_REV=$(echo "$1" | sed -E 's/^(3005)\.0$/\1/') + ONEDIR_REV="minor/$ONEDIR_REV" + shift + elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}(\.[0-9]*)?)')" != "" ]; then + ONEDIR_REV="minor/$1" + shift + else + echo "Unknown onedir version: $1 (valid: 3005, 3006, latest, nightly.)" + exit 1 + fi + fi + +elif [ "$ITYPE" = "onedir_rc" ]; then + # Change the _ONEDIR_DIR to be the location for the RC packages + _ONEDIR_DIR="salt_rc/salt" + + # Change ITYPE to onedir so we use the regular onedir functions + ITYPE="onedir" + + if [ "$#" -eq 0 ];then + ONEDIR_REV="latest" + else + if [ "$(echo "$1" | grep -E '^(latest)$')" != "" ]; then + ONEDIR_REV="$1" + shift + elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}?rc[0-9]-[0-9]$)')" != "" ]; then + # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix + #ONEDIR_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') + ONEDIR_REV="minor/$1" + shift + elif [ "$(echo "$1" | grep -E '^([3-9][0-9]{3}\.[0-9]?rc[0-9]$)')" != "" ]; then + # Handle the 3xxx.0 version as 3xxx archive (pin to minor) and strip the fake ".0" suffix + #ONEDIR_REV=$(echo "$1" | sed -E 's/^([3-9][0-9]{3})\.0$/\1/') + ONEDIR_REV="minor/$1" + shift + else + echo "Unknown onedir_rc version: $1 (valid: 3005-1, latest.)" + exit 1 + fi + fi +fi + +# Doing a quick start, so install master +# set master address to 127.0.0.1 +if [ "$_QUICK_START" -eq "$BS_TRUE" ]; then + # make install type is stable + ITYPE="stable" + + # make sure the revision is latest + STABLE_REV="latest" + ONEDIR_REV="latest" + + # make sure we're installing the master + _INSTALL_MASTER=$BS_TRUE + + # override incase install minion + # is set to false + _INSTALL_MINION=$BS_TRUE + + # Set master address to loopback IP + _SALT_MASTER_ADDRESS="127.0.0.1" + + # Auto accept the minion key + # when the install is done. + _AUTO_ACCEPT_MINION_KEYS=$BS_TRUE fi # Check for any unparsed arguments. Should be an error. @@ -824,6 +954,18 @@ __fetch_verify() { return 1 } +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __check_url_exists +# DESCRIPTION: Checks if a URL exists +#---------------------------------------------------------------------------------------------------------------------- +__check_url_exists() { + _URL="$1" + if curl --output /dev/null --silent --fail "${_URL}"; then + return 0 + else + return 1 + fi +} #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __gather_hardware_info # DESCRIPTION: Discover hardware information @@ -945,7 +1087,7 @@ __strip_duplicates() { __sort_release_files() { KNOWN_RELEASE_FILES=$(echo "(arch|alpine|centos|debian|ubuntu|fedora|redhat|suse|\ mandrake|mandriva|gentoo|slackware|turbolinux|unitedlinux|void|lsb|system|\ - oracle|os)(-|_)(release|version)" | sed -E 's:[[:space:]]::g') + oracle|os|almalinux|rocky)(-|_)(release|version)" | sed -E 's:[[:space:]]::g') primary_release_files="" secondary_release_files="" # Sort know VS un-known files first @@ -959,7 +1101,7 @@ __sort_release_files() { done # Now let's sort by know files importance, max important goes last in the max_prio list - max_prio="redhat-release centos-release oracle-release fedora-release" + max_prio="redhat-release centos-release oracle-release fedora-release almalinux-release rocky-release" for entry in $max_prio; do if [ "$(echo "${primary_release_files}" | grep "$entry")" != "" ]; then primary_release_files=$(echo "${primary_release_files}" | sed -e "s:\\(.*\\)\\($entry\\)\\(.*\\):\\2 \\1 \\3:g") @@ -1028,6 +1170,8 @@ __gather_linux_system_info() { elif [ "${DISTRO_NAME}" = "Arch" ]; then DISTRO_NAME="Arch Linux" return + elif [ "${DISTRO_NAME}" = "Rocky" ]; then + DISTRO_NAME="Rocky Linux" fi rv=$(lsb_release -sr) [ "${rv}" != "" ] && DISTRO_VERSION=$(__parse_version_string "$rv") @@ -1086,6 +1230,8 @@ __gather_linux_system_info() { unitedlinux ) n="UnitedLinux" ;; void ) n="VoidLinux" ;; oracle ) n="Oracle Linux" ;; + almalinux ) n="AlmaLinux" ;; + rocky ) n="Rocky Linux" ;; system ) while read -r line; do [ "${n}x" != "systemx" ] && break @@ -1308,7 +1454,7 @@ __gather_system_info() { #---------------------------------------------------------------------------------------------------------------------- # shellcheck disable=SC2034 __ubuntu_derivatives_translation() { - UBUNTU_DERIVATIVES="(trisquel|linuxmint|linaro|elementary_os|neon)" + UBUNTU_DERIVATIVES="(trisquel|linuxmint|linaro|elementary_os|neon|pop)" # Mappings trisquel_6_ubuntu_base="12.04" linuxmint_13_ubuntu_base="12.04" @@ -1321,6 +1467,8 @@ __ubuntu_derivatives_translation() { neon_16_ubuntu_base="16.04" neon_18_ubuntu_base="18.04" neon_20_ubuntu_base="20.04" + neon_22_ubuntu_base="22.04" + pop_22_ubuntu_base="22.04" # Translate Ubuntu derivatives to their base Ubuntu version match=$(echo "$DISTRO_NAME_L" | grep -E ${UBUNTU_DERIVATIVES}) @@ -1380,9 +1528,13 @@ __check_dpkg_architecture() { if [ "$_CUSTOM_REPO_URL" != "null" ]; then warn_msg="Support for arm64 is experimental, make sure the custom repository used has the expected structure and contents." else - # Saltstack official repository does not yet have arm64 metadata, - # use amd64 repositories on arm64, since all pkgs are arch-independent - __REPO_ARCH="amd64" + # Saltstack official repository has arm64 metadata beginning with Debian 11, + # use amd64 repositories on arm64 for anything older, since all pkgs are arch-independent + if [ "$DISTRO_NAME_L" = "debian" ] && [ "$DISTRO_MAJOR_VERSION" -lt 11 ]; then + __REPO_ARCH="amd64" + else + __REPO_ARCH="arm64" + fi __REPO_ARCH_DEB="deb [signed-by=/usr/share/keyrings/salt-archive-keyring.gpg arch=$__REPO_ARCH]" warn_msg="Support for arm64 packages is experimental and might rely on architecture-independent packages from the amd64 repository." fi @@ -1462,6 +1614,9 @@ __ubuntu_codename_translation() { "21") DISTRO_CODENAME="hirsute" ;; + "22") + DISTRO_CODENAME="jammy" + ;; *) DISTRO_CODENAME="trusty" ;; @@ -1488,10 +1643,12 @@ __debian_derivatives_translation() { devuan_1_debian_base="8.0" devuan_2_debian_base="9.0" kali_1_debian_base="7.0" + kali_2021_debian_base="10.0" linuxmint_1_debian_base="8.0" raspbian_8_debian_base="8.0" raspbian_9_debian_base="9.0" raspbian_10_debian_base="10.0" + raspbian_11_debian_base="11.0" bunsenlabs_9_debian_base="9.0" turnkey_9_debian_base="9.0" @@ -1559,6 +1716,14 @@ __debian_codename_translation() { "11") DISTRO_CODENAME="bullseye" ;; + "12") + DISTRO_CODENAME="bookworm" + # FIXME - TEMPORARY + # use bullseye packages until bookworm packages are available + DISTRO_CODENAME="bullseye" + DISTRO_MAJOR_VERSION=11 + rv=11 + ;; *) DISTRO_CODENAME="stretch" ;; @@ -1590,11 +1755,13 @@ __check_end_of_life_versions() { # = 17.04, 17.10 # = 18.10 # = 19.04, 19.10 + # = 20.10 if [ "$DISTRO_MAJOR_VERSION" -lt 16 ] || \ [ "$DISTRO_MAJOR_VERSION" -eq 17 ] || \ [ "$DISTRO_MAJOR_VERSION" -eq 19 ] || \ { [ "$DISTRO_MAJOR_VERSION" -eq 16 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 18 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then + { [ "$DISTRO_MAJOR_VERSION" -eq 18 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ + { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; }; then echoerror "End of life distributions are not supported." echoerror "Please consider upgrading to the next stable. See:" echoerror " https://wiki.ubuntu.com/Releases" @@ -1812,14 +1979,14 @@ elif [ "${DISTRO_NAME_L}" = "debian" ]; then __debian_codename_translation fi -if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(debian|ubuntu|centos|gentoo|red_hat|oracle|scientific|amazon|fedora|macosx)')" = "" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]; then +if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(debian|ubuntu|centos|gentoo|red_hat|oracle|scientific|amazon|fedora|macosx|almalinux|rocky)')" = "" ] && [ "$ITYPE" = "stable" ] && [ "$STABLE_REV" != "latest" ]; then echoerror "${DISTRO_NAME} does not have major version pegged packages support" exit 1 fi # Only RedHat based distros have testing support if [ "${ITYPE}" = "testing" ]; then - if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(centos|red_hat|amazon|oracle)')" = "" ]; then + if [ "$(echo "${DISTRO_NAME_L}" | grep -E '(centos|red_hat|amazon|oracle|almalinux|rocky)')" = "" ]; then echoerror "${DISTRO_NAME} does not have testing packages support" exit 1 fi @@ -1850,10 +2017,6 @@ if [ "$ITYPE" = "git" ]; then if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then _POST_NEON_INSTALL=$BS_TRUE __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" - if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then - # We do this to properly clone tags - GIT_REV="v${GIT_REV}" - fi echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" else __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed -E 's/^(v?[0-9]{1,4}\.[0-9]{1,2})(\.[0-9]{1,2})?.*$/MATCH/') @@ -1865,10 +2028,6 @@ if [ "$ITYPE" = "git" ]; then if [ "$__NEW_VS_TAG_REGEX_MATCH" = "MATCH" ]; then _POST_NEON_INSTALL=$BS_TRUE __TAG_REGEX_MATCH="${__NEW_VS_TAG_REGEX_MATCH}" - if [ "$(echo "${GIT_REV}" | cut -c -1)" != "v" ]; then - # We do this to properly clone tags - GIT_REV="v${GIT_REV}" - fi echodebug "Post Neon Tag Regex Match On: ${GIT_REV}" else __TAG_REGEX_MATCH=$(echo "${GIT_REV}" | sed 's/^.*\(v\?[[:digit:]]\{1,4\}\.[[:digit:]]\{1,2\}\)\(\.[[:digit:]]\{1,2\}\)\?.*$/MATCH/') @@ -2031,20 +2190,13 @@ __rpm_import_gpg() { #---------------------------------------------------------------------------------------------------------------------- __yum_install_noinput() { - ENABLE_EPEL_CMD="" - # Skip Amazon Linux for the first round, since EPEL is no longer required. - # See issue #724 - if [ $_DISABLE_REPOS -eq $BS_FALSE ] && [ "$DISTRO_NAME_L" != "amazon_linux_ami" ]; then - ENABLE_EPEL_CMD="--enablerepo=${_EPEL_REPO}" - fi - if [ "$DISTRO_NAME_L" = "oracle_linux" ]; then # We need to install one package at a time because --enablerepo=X disables ALL OTHER REPOS!!!! for package in "${@}"; do - yum -y install "${package}" || yum -y install "${package}" ${ENABLE_EPEL_CMD} || return $? + yum -y install "${package}" || yum -y install "${package}" || return $? done else - yum -y install "${@}" ${ENABLE_EPEL_CMD} || return $? + yum -y install "${@}" || return $? fi } # ---------- end of function __yum_install_noinput ---------- @@ -2057,6 +2209,15 @@ __dnf_install_noinput() { dnf -y install "${@}" || return $? } # ---------- end of function __dnf_install_noinput ---------- +#--- FUNCTION ------------------------------------------------------------------------------------------------------- +# NAME: __tdnf_install_noinput +# DESCRIPTION: (DRY) tdnf install with noinput options +#---------------------------------------------------------------------------------------------------------------------- +__tdnf_install_noinput() { + + tdnf -y install "${@}" || return $? +} # ---------- end of function __tdnf_install_noinput ---------- + #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __git_clone_and_checkout # DESCRIPTION: (DRY) Helper function to clone and checkout salt to a @@ -2582,7 +2743,7 @@ __activate_virtualenv() { # NAME: __install_pip_pkgs # DESCRIPTION: Return 0 or 1 if successfully able to install pip packages. Can provide a different python version to # install pip packages with. If $py_ver is not specified it will use the default python version. -# PARAMETERS: pkgs, py_ver +# PARAMETERS: pkgs, py_ver, upgrade #---------------------------------------------------------------------------------------------------------------------- __install_pip_pkgs() { @@ -2751,15 +2912,15 @@ EOM fi echodebug "Running '${_pip_cmd} install wheel ${_setuptools_dep}'" - ${_pip_cmd} install ${_POST_NEON_PIP_INSTALL_ARGS} wheel "${_setuptools_dep}" + ${_pip_cmd} install --upgrade ${_POST_NEON_PIP_INSTALL_ARGS} wheel "${_setuptools_dep}" echoinfo "Installing salt using ${_py_exe}" cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 mkdir /tmp/git/deps echoinfo "Downloading Salt Dependencies from PyPi" - echodebug "Running '${_pip_cmd} download -d /tmp/git/deps .'" - ${_pip_cmd} download -d /tmp/git/deps . || (echo "Failed to download salt dependencies" && return 1) + echodebug "Running '${_pip_cmd} download -d /tmp/git/deps ${_PIP_DOWNLOAD_ARGS} .'" + ${_pip_cmd} download -d /tmp/git/deps ${_PIP_DOWNLOAD_ARGS} . || (echo "Failed to download salt dependencies" && return 1) echoinfo "Installing Downloaded Salt Dependencies" echodebug "Running '${_pip_cmd} install --ignore-installed ${_POST_NEON_PIP_INSTALL_ARGS} /tmp/git/deps/*'" @@ -2918,7 +3079,8 @@ __enable_universe_repository() { __install_saltstack_ubuntu_repository() { # Workaround for latest non-LTS Ubuntu if { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ - { [ "$DISTRO_MAJOR_VERSION" -eq 21 ] && [ "$DISTRO_MINOR_VERSION" -eq 04 ]; }; then + # remove 22 version when salt packages for 22.04 are available + [ "$DISTRO_MAJOR_VERSION" -eq 21 ] || [ "$DISTRO_MAJOR_VERSION" -eq 22 ]; then echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages for previous LTS release. You may experience problems." UBUNTU_VERSION=20.04 UBUNTU_CODENAME="focal" @@ -2957,6 +3119,58 @@ __install_saltstack_ubuntu_repository() { __wait_for_apt apt-get update || return 1 } +__install_saltstack_ubuntu_onedir_repository() { + # Workaround for latest non-LTS Ubuntu + if { [ "$DISTRO_MAJOR_VERSION" -eq 20 ] && [ "$DISTRO_MINOR_VERSION" -eq 10 ]; } || \ + [ "$DISTRO_MAJOR_VERSION" -eq 21 ]; then + echowarn "Non-LTS Ubuntu detected, but stable packages requested. Trying packages for previous LTS release. You may experience problems." + UBUNTU_VERSION=20.04 + UBUNTU_CODENAME="focal" + else + UBUNTU_VERSION=${DISTRO_VERSION} + UBUNTU_CODENAME=${DISTRO_CODENAME} + fi + + # Install downloader backend for GPG keys fetching + __PACKAGES='wget' + + # Required as it is not installed by default on Ubuntu 18+ + if [ "$DISTRO_MAJOR_VERSION" -ge 18 ]; then + __PACKAGES="${__PACKAGES} gnupg" + fi + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" + fi + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + __PY_VERSION_REPO="apt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # SaltStack's stable Ubuntu repository: + SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/${ONEDIR_REV}/" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + SALTSTACK_UBUNTU_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/ubuntu/${UBUNTU_VERSION}/${__REPO_ARCH}/" + fi + echo "$__REPO_ARCH_DEB $SALTSTACK_UBUNTU_URL $UBUNTU_CODENAME main" > /etc/apt/sources.list.d/salt.list + + if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ]; then + __apt_key_fetch "${SALTSTACK_UBUNTU_URL}salt-archive-keyring.gpg" || return 1 + elif [ "$(echo "${ONEDIR_REV}" | grep -E '(latest|nightly)')" != "" ]; then + __apt_key_fetch "${SALTSTACK_UBUNTU_URL}salt-archive-keyring.gpg" || \ + __apt_key_fetch "${SALTSTACK_UBUNTU_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 + else + __apt_key_fetch "${SALTSTACK_UBUNTU_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 + fi + + __wait_for_apt apt-get update || return 1 +} + install_ubuntu_deps() { if [ $_DISABLE_REPOS -eq $BS_FALSE ]; then # Install add-apt-repository @@ -3032,7 +3246,7 @@ install_ubuntu_stable_deps() { if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then - if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ] || [ "$DISTRO_MAJOR_VERSION" -ge 22 ]; then __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 else __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && @@ -3113,6 +3327,9 @@ install_ubuntu_git_deps() { fi else __PACKAGES="python${PY_PKG_VER}-dev python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + if [ "$DISTRO_MAJOR_VERSION" -ge 22 ]; then + __PACKAGES="${__PACKAGES} g++" + fi # shellcheck disable=SC2086 __apt_get_install_noinput ${__PACKAGES} || return 1 fi @@ -3126,6 +3343,44 @@ install_ubuntu_git_deps() { return 0 } +install_ubuntu_onedir_deps() { + if [ "${_SLEEP}" -eq "${__DEFAULT_SLEEP}" ] && [ "$DISTRO_MAJOR_VERSION" -lt 16 ]; then + # The user did not pass a custom sleep value as an argument, let's increase the default value + echodebug "On Ubuntu systems we increase the default sleep value to 10." + echodebug "See https://github.com/saltstack/salt/issues/12248 for more info." + _SLEEP=10 + fi + + if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." + fi + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + __wait_for_apt apt-get update || return 1 + + if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then + if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 20 ] || [ "$DISTRO_MAJOR_VERSION" -ge 21 ]; then + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 + else + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && + apt-key update && apt-get update || return 1 + fi + fi + + __apt_get_upgrade_noinput || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __check_dpkg_architecture || return 1 + __install_saltstack_ubuntu_onedir_repository || return 1 + fi + + install_ubuntu_deps || return 1 +} + install_ubuntu_stable() { __PACKAGES="" @@ -3170,7 +3425,15 @@ install_ubuntu_git() { _POST_NEON_PIP_INSTALL_ARGS="" __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 - sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service + + # Account for new path for services files in later releases + if [ -d "pkg/common" ]; then + _SERVICE_DIR="pkg/common" + else + _SERVICE_DIR="pkg" + fi + + sed -i 's:/usr/bin:/usr/local/bin:g' ${_SERVICE_DIR}/*.service return 0 fi @@ -3185,6 +3448,28 @@ install_ubuntu_git() { return 0 } +install_ubuntu_onedir() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + install_ubuntu_stable_post() { for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -3220,8 +3505,15 @@ install_ubuntu_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg" + fi + if [ -f /bin/systemctl ] && [ "$DISTRO_MAJOR_VERSION" -ge 16 ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -3236,8 +3528,8 @@ install_ubuntu_git_post() { if [ ! -f $_upstart_conf ]; then # upstart does not know about our service, let's copy the proper file echowarn "Upstart does not appear to know about salt-$fname" - echodebug "Copying ${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-$fname.upstart to $_upstart_conf" - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.upstart" "$_upstart_conf" + echodebug "Copying ${_SERVICE_DIR}/salt-$fname.upstart to $_upstart_conf" + __copyfile "${_SERVICE_DIR}/salt-${fname}.upstart" "$_upstart_conf" # Set service to know about virtualenv if [ "${_VIRTUALENV_DIR}" != "null" ]; then echo "SALT_USE_VIRTUALENV=${_VIRTUALENV_DIR}" > /etc/default/salt-${fname} @@ -3349,17 +3641,8 @@ install_ubuntu_check_services() { # Debian Install Functions # __install_saltstack_debian_repository() { - if [ "$DISTRO_MAJOR_VERSION" -eq 11 ]; then - # Packages for Debian 11 at repo.saltproject.io are not yet available - # Set up repository for Debian 10 for Debian 11 for now until support - # is available at repo.saltproject.io for Debian 11. - echowarn "Debian 11 distribution detected, but stable packages requested. Trying packages from Debian 10. You may experience problems." - DEBIAN_RELEASE="10" - DEBIAN_CODENAME="buster" - else - DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" - DEBIAN_CODENAME="$DISTRO_CODENAME" - fi + DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" + DEBIAN_CODENAME="$DISTRO_CODENAME" __PY_VERSION_REPO="apt" if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then @@ -3391,6 +3674,50 @@ __install_saltstack_debian_repository() { __wait_for_apt apt-get update || return 1 } +__install_saltstack_debian_onedir_repository() { + DEBIAN_RELEASE="$DISTRO_MAJOR_VERSION" + DEBIAN_CODENAME="$DISTRO_CODENAME" + + __PY_VERSION_REPO="apt" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # Install downloader backend for GPG keys fetching + __PACKAGES='wget' + + # Required as it is not installed by default on Debian 9+ + if [ "$DISTRO_MAJOR_VERSION" -ge 9 ]; then + __PACKAGES="${__PACKAGES} gnupg2" + fi + + # Make sure https transport is available + if [ "$HTTP_VAL" = "https" ] ; then + __PACKAGES="${__PACKAGES} apt-transport-https ca-certificates" + fi + + # shellcheck disable=SC2086,SC2090 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + # amd64 is just a part of repository URI, 32-bit pkgs are hosted under the same location + SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/${ONEDIR_REV}/" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + SALTSTACK_DEBIAN_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/debian/${DEBIAN_RELEASE}/${__REPO_ARCH}/" + fi + echo "$__REPO_ARCH_DEB $SALTSTACK_DEBIAN_URL $DEBIAN_CODENAME main" > "/etc/apt/sources.list.d/salt.list" + + if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ]; then + __apt_key_fetch "${SALTSTACK_DEBIAN_URL}salt-archive-keyring.gpg" || return 1 + elif [ "$(echo "${ONEDIR_REV}" | grep -E '(latest|nightly)')" != "" ]; then + __apt_key_fetch "${SALTSTACK_DEBIAN_URL}salt-archive-keyring.gpg" || \ + __apt_key_fetch "${SALTSTACK_DEBIAN_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 + else + __apt_key_fetch "${SALTSTACK_DEBIAN_URL}SALT-PROJECT-GPG-PUBKEY-2023.gpg" || return 1 + fi + + __wait_for_apt apt-get update || return 1 +} + install_debian_deps() { if [ $_START_DAEMONS -eq $BS_FALSE ]; then echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." @@ -3444,6 +3771,59 @@ install_debian_deps() { return 0 } +install_debian_onedir_deps() { + if [ $_START_DAEMONS -eq $BS_FALSE ]; then + echowarn "Not starting daemons on Debian based distributions is not working mostly because starting them is the default behaviour." + fi + + # No user interaction, libc6 restart services for example + export DEBIAN_FRONTEND=noninteractive + + __wait_for_apt apt-get update || return 1 + + if [ "${_UPGRADE_SYS}" -eq $BS_TRUE ]; then + # Try to update GPG keys first if allowed + if [ "${_INSECURE_DL}" -eq $BS_TRUE ]; then + if [ "$DISTRO_MAJOR_VERSION" -ge 10 ]; then + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && apt-get update || return 1 + else + __apt_get_install_noinput --allow-unauthenticated debian-archive-keyring && + apt-key update && apt-get update || return 1 + fi + fi + + __apt_get_upgrade_noinput || return 1 + fi + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + PY_PKG_VER=3 + else + PY_PKG_VER="" + fi + + # Additionally install procps and pciutils which allows for Docker bootstraps. See 366#issuecomment-39666813 + __PACKAGES='procps pciutils' + + # YAML module is used for generating custom master/minion configs + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-yaml" + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __check_dpkg_architecture || return 1 + __install_saltstack_debian_onedir_repository || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __apt_get_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 +} + install_debian_git_pre() { if ! __check_command_exists git; then __apt_get_install_noinput git || return 1 @@ -3692,7 +4072,15 @@ install_debian_git() { _POST_NEON_PIP_INSTALL_ARGS="" __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 - sed -i 's:/usr/bin:/usr/local/bin:g' pkg/*.service + + # Account for new path for services files in later releases + if [ -d "pkg/common" ]; then + _SERVICE_DIR="pkg/common" + else + _SERVICE_DIR="pkg" + fi + + sed -i 's:/usr/bin:/usr/local/bin:g' ${_SERVICE_DIR}/*.service return 0 fi @@ -3720,6 +4108,28 @@ install_debian_9_git() { return 0 } +install_debian_onedir() { + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __apt_get_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + install_debian_git_post() { for fname in api master minion syndic; do # Skip if not meant to be installed @@ -3729,16 +4139,23 @@ install_debian_git_post() { [ "$fname" = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ "$fname" = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg" + fi + # Configure SystemD for Debian 8 "Jessie" and later if [ -f /bin/systemctl ]; then if [ ! -f /lib/systemd/system/salt-${fname}.service ] || \ { [ -f /lib/systemd/system/salt-${fname}.service ] && [ $_FORCE_OVERWRITE -eq $BS_TRUE ]; }; then - if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.environment" "/etc/default/salt-${fname}" + if [ -f "${_SERVICE_DIR}/salt-${fname}.service" ]; then + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" /lib/systemd/system + __copyfile "${_SERVICE_DIR}/salt-${fname}.environment" "/etc/default/salt-${fname}" else # workaround before adding Debian-specific unit files to the Salt main repo - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" /lib/systemd/system + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" /lib/systemd/system sed -i -e '/^Type/ s/notify/simple/' /lib/systemd/system/salt-${fname}.service fi fi @@ -3770,6 +4187,13 @@ install_debian_git_post() { done } +install_debian_2021_post() { + # Kali 2021 (debian derivative) disables all network services by default + # Using archlinux post function to enable salt systemd services + install_arch_linux_post || return 1 + return 0 +} + install_debian_restart_daemons() { [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 @@ -3826,6 +4250,41 @@ install_debian_check_services() { # Fedora Install Functions # +__install_saltstack_fedora_onedir_repository() { + if [ "$ITYPE" = "stable" ]; then + REPO_REV="$ONEDIR_REV" + else + REPO_REV="latest" + fi + + __PY_VERSION_REPO="yum" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + GPG_KEY="SALT-PROJECT-GPG-PUBKEY-2023.pub" + + REPO_FILE="/etc/yum.repos.d/salt.repo" + + if [ ! -s "$REPO_FILE" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/fedora/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${ONEDIR_REV}" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/fedora/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/" + fi + + __fetch_url "${REPO_FILE}" "${FETCH_URL}.repo" + + __rpm_import_gpg "${FETCH_URL}/${GPG_KEY}" || return 1 + + yum clean metadata || return 1 + elif [ "$REPO_REV" != "latest" ]; then + echowarn "salt.repo already exists, ignoring salt version argument." + echowarn "Use -F (forced overwrite) to install $REPO_REV." + fi + + return 0 +} + install_fedora_deps() { if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then dnf -y update || return 1 @@ -3985,6 +4444,9 @@ install_fedora_git_deps() { done else __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc" + if [ "${DISTRO_VERSION}" -ge 35 ]; then + __PACKAGES="${__PACKAGES} gcc-c++" + fi # shellcheck disable=SC2086 __dnf_install_noinput ${__PACKAGES} || return 1 fi @@ -4028,7 +4490,18 @@ install_fedora_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm" + fi + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Salt executables are located under `/usr/local/bin/` on Fedora 36+ + #if [ "${DISTRO_VERSION}" -ge 36 ]; then + # sed -i -e 's:/usr/bin/:/usr/local/bin/:g' /lib/systemd/system/salt-*.service + #fi # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -4076,6 +4549,83 @@ install_fedora_check_services() { return 0 } + +install_fedora_onedir_deps() { + + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + echowarn "Detected -r or -R option while installing Salt packages for Python 3." + echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." + echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then + __install_saltstack_fedora_onedir_repository || return 1 + fi + + # If -R was passed, we need to configure custom repo url with rsync-ed packages + # Which is still handled in __install_saltstack_rhel_repository. This call has + # its own check in case -r was passed without -R. + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_fedora_onedir_repository || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __PACKAGES="dnf-utils chkconfig" + else + __PACKAGES="yum-utils chkconfig" + fi + + __PACKAGES="${__PACKAGES} procps" + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 + +} + + +install_fedora_onedir() { + STABLE_REV=$ONEDIR_REV + #install_fedora_stable || return 1 + + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_fedora_onedir_post() { + STABLE_REV=$ONEDIR_REV + install_fedora_stable_post || return 1 + + return 0 +} # # Ended Fedora Install Functions # @@ -4085,27 +4635,13 @@ install_fedora_check_services() { # # CentOS Install Functions # -__install_epel_repository() { - if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_TRUE ]; then - return 0 - fi - - # Check if epel repo is already enabled and flag it accordingly - if yum repolist | grep -q "^[!]\\?${_EPEL_REPO}/"; then - _EPEL_REPOS_INSTALLED=$BS_TRUE - return 0 - fi - - # Download latest 'epel-release' package for the distro version directly - epel_repo_url="${HTTP_VAL}://dl.fedoraproject.org/pub/epel/epel-release-latest-${DISTRO_MAJOR_VERSION}.noarch.rpm" - rpm -Uvh --force "$epel_repo_url" || return 1 - - _EPEL_REPOS_INSTALLED=$BS_TRUE - - return 0 -} - __install_saltstack_rhel_repository() { + if [ "${DISTRO_MAJOR_VERSION}" -ge 9 ]; then + echoerror "Old stable repository unavailable on RH variants greater than or equal to 9" + echoerror "Use the stable install type." + exit 1 + fi + if [ "$ITYPE" = "stable" ]; then repo_rev="$STABLE_REV" else @@ -4120,7 +4656,19 @@ __install_saltstack_rhel_repository() { # Avoid using '$releasever' variable for yum. # Instead, this should work correctly on all RHEL variants. base_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/${repo_rev}/" - gpg_key="SALTSTACK-GPG-KEY.pub" + if [ "${DISTRO_MAJOR_VERSION}" -eq 7 ]; then + gpg_key="SALTSTACK-GPG-KEY.pub base/RPM-GPG-KEY-CentOS-7" + elif [ "${DISTRO_MAJOR_VERSION}" -ge 9 ]; then + gpg_key="SALTSTACK-GPG-KEY2.pub" + else + gpg_key="SALTSTACK-GPG-KEY.pub" + fi + + gpg_key_urls="" + for key in $gpg_key; do + gpg_key_urls=$(printf "${base_url}${key},%s" "$gpg_key_urls") + done + repo_file="/etc/yum.repos.d/salt.repo" if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then @@ -4130,13 +4678,80 @@ name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever baseurl=${base_url} skip_if_unavailable=True gpgcheck=1 -gpgkey=${base_url}${gpg_key} +gpgkey=${gpg_key_urls} enabled=1 enabled_metadata=1 _eof fetch_url="${HTTP_VAL}://${_REPO_URL}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${repo_rev}/" - __rpm_import_gpg "${fetch_url}${gpg_key}" || return 1 + for key in $gpg_key; do + __rpm_import_gpg "${fetch_url}${key}" || return 1 + done + + yum clean metadata || return 1 + elif [ "$repo_rev" != "latest" ]; then + echowarn "salt.repo already exists, ignoring salt version argument." + echowarn "Use -F (forced overwrite) to install $repo_rev." + fi + + return 0 +} + +__install_saltstack_rhel_onedir_repository() { + if [ "$ITYPE" = "stable" ]; then + repo_rev="$ONEDIR_REV" + else + repo_rev="latest" + fi + + __PY_VERSION_REPO="yum" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + # Avoid using '$releasever' variable for yum. + # Instead, this should work correctly on all RHEL variants. + base_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/${ONEDIR_REV}/" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + base_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/\$basearch/" + fi + if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ] || [ "${ONEDIR_REV}" = "nightly" ]; then + if [ "${DISTRO_MAJOR_VERSION}" -eq 9 ]; then + gpg_key="SALTSTACK-GPG-KEY2.pub" + else + gpg_key="SALTSTACK-GPG-KEY.pub" + fi + else + gpg_key="SALT-PROJECT-GPG-PUBKEY-2023.pub" + fi + + gpg_key_urls="" + for key in $gpg_key; do + gpg_key_urls=$(printf "${base_url}${key},%s" "$gpg_key_urls") + done + + repo_file="/etc/yum.repos.d/salt.repo" + + if [ ! -s "$repo_file" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + cat <<_eof > "$repo_file" +[saltstack] +name=SaltStack ${repo_rev} Release Channel for RHEL/CentOS \$releasever +baseurl=${base_url} +skip_if_unavailable=True +gpgcheck=1 +gpgkey=${gpg_key_urls} +enabled=1 +enabled_metadata=1 +_eof + + fetch_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${ONEDIR_REV}/" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + fetch_url="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/redhat/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/" + fi + for key in $gpg_key; do + __rpm_import_gpg "${fetch_url}${key}" || return 1 + done + yum clean metadata || return 1 elif [ "$repo_rev" != "latest" ]; then echowarn "salt.repo already exists, ignoring salt version argument." @@ -4158,7 +4773,6 @@ install_centos_stable_deps() { fi if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then - __install_epel_repository || return 1 __install_saltstack_rhel_repository || return 1 fi @@ -4179,27 +4793,29 @@ install_centos_stable_deps() { if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then # YAML module is used for generating custom master/minion configs if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python3-pyyaml" + __PACKAGES="${__PACKAGES} python3-pyyaml python3-setuptools" else __PACKAGES="${__PACKAGES} python2-pyyaml" fi elif [ "$DISTRO_MAJOR_VERSION" -eq 7 ]; then # YAML module is used for generating custom master/minion configs if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python36-PyYAML" + __PACKAGES="${__PACKAGES} python36-PyYAML python36-setuptools" else __PACKAGES="${__PACKAGES} PyYAML" fi else # YAML module is used for generating custom master/minion configs if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then - __PACKAGES="${__PACKAGES} python34-PyYAML" + __PACKAGES="${__PACKAGES} python34-PyYAML python34-setuptools" else __PACKAGES="${__PACKAGES} PyYAML" fi fi fi + __PACKAGES="${__PACKAGES} procps" + # shellcheck disable=SC2086 __yum_install_noinput ${__PACKAGES} || return 1 @@ -4216,40 +4832,29 @@ install_centos_stable_deps() { install_centos_stable() { __PACKAGES="" - local cloud='salt-cloud' - local master='salt-master' - local minion='salt-minion' - local syndic='salt-syndic' - - if echo "$STABLE_REV" | grep -q "archive";then # point release being applied - local ver=$(echo "$STABLE_REV"|awk -F/ '{print $2}') # strip archive/ - elif echo "$STABLE_REV" | egrep -vq "archive|latest";then # latest or major version(3003, 3004, etc) being applie - local ver=$STABLE_REV - fi - - if [ ! -z $ver ]; then - cloud+="-$ver" - master+="-$ver" - minion+="-$ver" - syndic+="-$ver" - fi - if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $cloud" + __PACKAGES="${__PACKAGES} salt-cloud" fi if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $master" + __PACKAGES="${__PACKAGES} salt-master" fi if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then - __PACKAGES="${__PACKAGES} $minion" + __PACKAGES="${__PACKAGES} salt-minion" fi if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then - __PACKAGES="${__PACKAGES} $syndic" + __PACKAGES="${__PACKAGES} salt-syndic" fi # shellcheck disable=SC2086 __yum_install_noinput ${__PACKAGES} || return 1 + # Workaround for 3.11 broken on CentOS Stream 8.x + # Re-install Python 3.6 + _py_version=$(${_PY_EXE} -c "import sys; print('{0}.{1}'.format(*sys.version_info))") + if [ "$DISTRO_MAJOR_VERSION" -eq 8 ] && [ "${_py_version}" = "3.11" ]; then + __yum_install_noinput python3 + fi + return 0 } @@ -4285,7 +4890,14 @@ install_centos_stable_post() { } install_centos_git_deps() { - install_centos_stable_deps || return 1 + # First try stable deps then fall back to onedir deps if that one fails + # if we're installing on a Red Hat based host that doesn't have the classic + # package repos available. + # Set ONEDIR_REV to STABLE_REV in case we + # end up calling install_centos_onedir_deps + ONEDIR_REV=${STABLE_REV} + install_centos_onedir_deps || \ + return 1 if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then __yum_install_noinput ca-certificates || return 1 @@ -4445,10 +5057,16 @@ install_centos_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_FILE="${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" + else + _SERVICE_FILE="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" + fi if [ -f /bin/systemctl ]; then if [ ! -f "/usr/lib/systemd/system/salt-${fname}.service" ] || \ { [ -f "/usr/lib/systemd/system/salt-${fname}.service" ] && [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; }; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" /usr/lib/systemd/system + __copyfile "${_SERVICE_FILE}" /usr/lib/systemd/system fi SYSTEMD_RELOAD=$BS_TRUE @@ -4468,6 +5086,117 @@ install_centos_git_post() { return 0 } +install_centos_onedir_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + echowarn "Detected -r or -R option while installing Salt packages for Python 3." + echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." + echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then + __install_saltstack_rhel_onedir_repository || return 1 + fi + + # If -R was passed, we need to configure custom repo url with rsync-ed packages + # Which is still handled in __install_saltstack_rhel_repository. This call has + # its own check in case -r was passed without -R. + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_rhel_onedir_repository || return 1 + fi + + if [ "$DISTRO_MAJOR_VERSION" -ge 8 ]; then + __PACKAGES="dnf-utils chkconfig" + else + __PACKAGES="yum-utils chkconfig" + fi + + __PACKAGES="${__PACKAGES} procps" + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + + return 0 +} + +# This function has been modified to allow for specific versions to be installed +# when not using the salt repo +install_centos_onedir() { + __PACKAGES="" + + local cloud='salt-cloud' + local master='salt-master' + local minion='salt-minion' + local syndic='salt-syndic' + local ver="$_ONEDIR_REV" + + if [ ! -z $ver ]; then + cloud+="-$ver" + master+="-$ver" + minion+="-$ver" + syndic+="-$ver" + fi + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} $cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} $master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} $minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} $syndic" + fi + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_centos_onedir_post() { + SYSTEMD_RELOAD=$BS_FALSE + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f /bin/systemctl ]; then + /bin/systemctl is-enabled salt-${fname}.service > /dev/null 2>&1 || ( + /bin/systemctl preset salt-${fname}.service > /dev/null 2>&1 && + /bin/systemctl enable salt-${fname}.service > /dev/null 2>&1 + ) + + SYSTEMD_RELOAD=$BS_TRUE + elif [ -f "/etc/init.d/salt-${fname}" ]; then + /sbin/chkconfig salt-${fname} on + fi + done + + if [ "$SYSTEMD_RELOAD" -eq $BS_TRUE ]; then + /bin/systemctl daemon-reload + fi + + return 0 +} + install_centos_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return @@ -4567,6 +5296,11 @@ install_red_hat_linux_git_deps() { return 0 } +install_red_hat_linux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + install_red_hat_enterprise_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -4577,6 +5311,11 @@ install_red_hat_enterprise_git_deps() { return 0 } +install_red_hat_enterprise_onedir_deps() { + install_red_hat_linux_onedir_deps || return 1 + return 0 +} + install_red_hat_enterprise_linux_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -4587,6 +5326,11 @@ install_red_hat_enterprise_linux_git_deps() { return 0 } +install_red_hat_enterprise_linux_onedir_deps() { + install_red_hat_linux_onedir_deps || return 1 + return 0 +} + install_red_hat_enterprise_server_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -4597,6 +5341,11 @@ install_red_hat_enterprise_server_git_deps() { return 0 } +install_red_hat_enterprise_server_onedir_deps() { + install_red_hat_linux_onedir_deps || return 1 + return 0 +} + install_red_hat_enterprise_workstation_stable_deps() { install_red_hat_linux_stable_deps || return 1 return 0 @@ -4607,6 +5356,11 @@ install_red_hat_enterprise_workstation_git_deps() { return 0 } +install_red_hat_enterprise_workstation_onedir_deps() { + install_red_hat_linux_timat_deps || return 1 + return 0 +} + install_red_hat_linux_stable() { install_centos_stable || return 1 return 0 @@ -4617,6 +5371,11 @@ install_red_hat_linux_git() { return 0 } +install_red_hat_linux_onedir() { + install_centos_onedir || return 1 + return 0 +} + install_red_hat_enterprise_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -4627,6 +5386,11 @@ install_red_hat_enterprise_git() { return 0 } +install_red_hat_enterprise_onedir() { + install_red_hat_linux_onedir || return 1 + return 0 +} + install_red_hat_enterprise_linux_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -4637,6 +5401,11 @@ install_red_hat_enterprise_linux_git() { return 0 } +install_red_hat_enterprise_linux_onedir() { + install_red_hat_linux_onedir || return 1 + return 0 +} + install_red_hat_enterprise_server_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -4647,6 +5416,11 @@ install_red_hat_enterprise_server_git() { return 0 } +install_red_hat_enterprise_server_onedir() { + install_red_hat_linux_onedir || return 1 + return 0 +} + install_red_hat_enterprise_workstation_stable() { install_red_hat_linux_stable || return 1 return 0 @@ -4657,6 +5431,11 @@ install_red_hat_enterprise_workstation_git() { return 0 } +install_red_hat_enterprise_workstation_onedir() { + install_red_hat_linux_onedir || return 1 + return 0 +} + install_red_hat_linux_stable_post() { install_centos_stable_post || return 1 return 0 @@ -4801,6 +5580,15 @@ install_red_hat_enterprise_workstation_testing_post() { # Oracle Linux Install Functions # install_oracle_linux_stable_deps() { + # Install Oracle's EPEL. + if [ ${_EPEL_REPOS_INSTALLED} -eq $BS_FALSE ]; then + _EPEL_REPO=oracle-epel-release-el${DISTRO_MAJOR_VERSION} + if ! rpm -q "${_EPEL_REPO}" > /dev/null; then + __yum_install_noinput "${_EPEL_REPO}" + fi + _EPEL_REPOS_INSTALLED=$BS_TRUE + fi + install_centos_stable_deps || return 1 return 0 } @@ -4810,6 +5598,11 @@ install_oracle_linux_git_deps() { return 0 } +install_oracle_linux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + install_oracle_linux_testing_deps() { install_centos_testing_deps || return 1 return 0 @@ -4825,6 +5618,11 @@ install_oracle_linux_git() { return 0 } +install_oracle_linux_onedir() { + install_centos_onedir || return 1 + return 0 +} + install_oracle_linux_testing() { install_centos_testing || return 1 return 0 @@ -4840,6 +5638,11 @@ install_oracle_linux_git_post() { return 0 } +install_oracle_linux_onedir_post() { + install_centos_onedir_post || return 1 + return 0 +} + install_oracle_linux_testing_post() { install_centos_testing_post || return 1 return 0 @@ -4859,6 +5662,162 @@ install_oracle_linux_check_services() { # ####################################################################################################################### +####################################################################################################################### +# +# AlmaLinux Install Functions +# +install_almalinux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_almalinux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_almalinux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + +install_almalinux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_almalinux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_almalinux_git() { + install_centos_git || return 1 + return 0 +} + +install_almalinux_onedir() { + install_centos_onedir || return 1 + return 0 +} + +install_almalinux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_almalinux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_almalinux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_almalinux_onedir_post() { + install_centos_onedir_post || return 1 + return 0 +} + +install_almalinux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_almalinux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_almalinux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# Ended AlmaLinux Install Functions +# +####################################################################################################################### + +####################################################################################################################### +# +# Rocky Linux Install Functions +# +install_rocky_linux_stable_deps() { + install_centos_stable_deps || return 1 + return 0 +} + +install_rocky_linux_git_deps() { + install_centos_git_deps || return 1 + return 0 +} + +install_rocky_linux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + +install_rocky_linux_testing_deps() { + install_centos_testing_deps || return 1 + return 0 +} + +install_rocky_linux_stable() { + install_centos_stable || return 1 + return 0 +} + +install_rocky_linux_onedir() { + install_centos_onedir || return 1 + return 0 +} + +install_rocky_linux_git() { + install_centos_git || return 1 + return 0 +} + +install_rocky_linux_testing() { + install_centos_testing || return 1 + return 0 +} + +install_rocky_linux_stable_post() { + install_centos_stable_post || return 1 + return 0 +} + +install_rocky_linux_git_post() { + install_centos_git_post || return 1 + return 0 +} + +install_rocky_linux_onedir_post() { + install_centos_onedir_post || return 1 + return 0 +} + +install_rocky_linux_testing_post() { + install_centos_testing_post || return 1 + return 0 +} + +install_rocky_linux_restart_daemons() { + install_centos_restart_daemons || return 1 + return 0 +} + +install_rocky_linux_check_services() { + install_centos_check_services || return 1 + return 0 +} +# +# Ended Rocky Linux Install Functions +# +####################################################################################################################### + ####################################################################################################################### # # Scientific Linux Install Functions @@ -4873,6 +5832,11 @@ install_scientific_linux_git_deps() { return 0 } +install_scientific_linux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + install_scientific_linux_testing_deps() { install_centos_testing_deps || return 1 return 0 @@ -4888,6 +5852,11 @@ install_scientific_linux_git() { return 0 } +install_scientific_linux_onedir() { + install_centos_onedir || return 1 + return 0 +} + install_scientific_linux_testing() { install_centos_testing || return 1 return 0 @@ -4903,6 +5872,11 @@ install_scientific_linux_git_post() { return 0 } +install_scientific_linux_onedir_post() { + install_centos_onedir_post || return 1 + return 0 +} + install_scientific_linux_testing_post() { install_centos_testing_post || return 1 return 0 @@ -4936,6 +5910,11 @@ install_cloud_linux_git_deps() { return 0 } +install_cloud_linux_onedir_deps() { + install_centos_onedir_deps || return 1 + return 0 +} + install_cloud_linux_testing_deps() { install_centos_testing_deps || return 1 return 0 @@ -5029,8 +6008,8 @@ install_alpine_linux_git_deps() { fi fi else - apk -U add python2 py2-pip py2-setuptools || return 1 - _PY_EXE=python2 + apk -U add python3 python3-dev py3-pip py3-setuptools g++ linux-headers zeromq-dev openrc || return 1 + _PY_EXE=python3 return 0 fi @@ -5500,6 +6479,100 @@ _eof fi } +install_amazon_linux_ami_2_onedir_deps() { + # Shim to figure out if we're using old (rhel) or new (aws) rpms. + _USEAWS=$BS_FALSE + pkg_append="python" + + if [ "$ITYPE" = "onedir" ]; then + repo_rev="$ONEDIR_REV" + else + repo_rev="latest" + fi + + if echo $repo_rev | grep -E -q '^archive'; then + year=$(echo "$repo_rev" | cut -d '/' -f 2 | cut -c1-4) + else + year=$(echo "$repo_rev" | cut -c1-4) + fi + + # We need to install yum-utils before doing anything else when installing on + # Amazon Linux ECS-optimized images. See issue #974. + __yum_install_noinput yum-utils + + # Do upgrade early + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + yum -y update || return 1 + fi + + if [ $_DISABLE_REPOS -eq $BS_FALSE ] || [ "$_CUSTOM_REPO_URL" != "null" ]; then + __REPO_FILENAME="salt.repo" + __PY_VERSION_REPO="yum" + PY_PKG_VER="" + repo_label="saltstack-repo" + repo_name="SaltStack repo for Amazon Linux 2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __REPO_FILENAME="salt.repo" + __PY_VERSION_REPO="py3" + PY_PKG_VER=3 + repo_label="saltstack-py3-repo" + repo_name="SaltStack Python 3 repo for Amazon Linux 2" + fi + + base_url="$HTTP_VAL://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/amazon/2/\$basearch/$repo_rev/" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + base_url="$HTTP_VAL://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/amazon/2/\$basearch/" + fi + + if [ "$(echo "${ONEDIR_REV}" | grep -E '(3004|3005)')" != "" ] || [ "${ONEDIR_REV}" = "nightly" ]; then + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub,${base_url}base/RPM-GPG-KEY-CentOS-7" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + gpg_key="${base_url}SALTSTACK-GPG-KEY.pub" + fi + else + gpg_key="${base_url}SALT-PROJECT-GPG-PUBKEY-2023.pub" + fi + + # This should prob be refactored to use __install_saltstack_rhel_repository() + # With args passed in to do the right thing. Reformatted to be more like the + # amazon linux yum file. + if [ ! -s "/etc/yum.repos.d/${__REPO_FILENAME}" ]; then + cat <<_eof > "/etc/yum.repos.d/${__REPO_FILENAME}" +[$repo_label] +name=$repo_name +failovermethod=priority +priority=10 +gpgcheck=1 +gpgkey=$gpg_key +baseurl=$base_url +_eof + fi + + fi + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + # Package python-ordereddict-1.1-2.el6.noarch is obsoleted by python26-2.6.9-2.88.amzn1.x86_64 + # which is already installed + if [ -n "${PY_PKG_VER}" ] && [ "${PY_PKG_VER}" -eq 3 ]; then + __PACKAGES="${pkg_append}${PY_PKG_VER}-m2crypto ${pkg_append}${PY_PKG_VER}-pyyaml" + else + __PACKAGES="m2crypto PyYAML ${pkg_append}-futures" + fi + + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-crypto ${pkg_append}${PY_PKG_VER}-jinja2 procps-ng" + __PACKAGES="${__PACKAGES} ${pkg_append}${PY_PKG_VER}-msgpack ${pkg_append}${PY_PKG_VER}-requests ${pkg_append}${PY_PKG_VER}-zmq" + + # shellcheck disable=SC2086 + __yum_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __yum_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi +} + install_amazon_linux_ami_stable() { install_centos_stable || return 1 return 0 @@ -5575,6 +6648,16 @@ install_amazon_linux_ami_2_check_services() { return 0 } +install_amazon_linux_ami_2_onedir() { + install_centos_stable || return 1 + return 0 +} + +install_amazon_linux_ami_2_onedir_post() { + install_centos_stable_post || return 1 + return 0 +} + # # Ended Amazon Linux AMI Install Functions # @@ -5666,6 +6749,10 @@ install_arch_linux_git_deps() { return 0 } +install_arch_linux_onedir_deps() { + install_arch_linux_stable_deps || return 1 +} + install_arch_linux_stable() { # Pacman does not resolve dependencies on outdated versions # They always need to be updated @@ -5684,6 +6771,8 @@ install_arch_linux_stable() { install_arch_linux_git() { + _POST_NEON_PIP_INSTALL_ARGS="${_POST_NEON_PIP_INSTALL_ARGS} --use-pep517" + _PIP_DOWNLOAD_ARGS="${_PIP_DOWNLOAD_ARGS} --use-pep517" if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then __install_salt_from_repo_post_neon "${_PY_EXE}" || return 1 return 0 @@ -5741,8 +6830,15 @@ install_arch_linux_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm" + fi + if [ -f /usr/bin/systemctl ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -5809,11 +6905,344 @@ install_arch_check_services() { return 0 } + +install_arch_linux_onedir() { + install_arch_linux_stable || return 1 + + return 0 +} + +install_arch_linux_onedir_post() { + install_arch_linux_post || return 1 + + return 0 +} # # Ended Arch Install Functions # ####################################################################################################################### +####################################################################################################################### +# +# Photon OS Install Functions +# + +__install_saltstack_photon_onedir_repository() { + if [ "$ITYPE" = "stable" ]; then + REPO_REV="$ONEDIR_REV" + else + REPO_REV="latest" + fi + + __PY_VERSION_REPO="yum" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + REPO_FILE="/etc/yum.repos.d/salt.repo" + + if [ ! -s "$REPO_FILE" ] || [ "$_FORCE_OVERWRITE" -eq $BS_TRUE ]; then + FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/photon/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/${ONEDIR_REV}" + if [ "${ONEDIR_REV}" = "nightly" ] ; then + FETCH_URL="${HTTP_VAL}://${_REPO_URL}/${_ONEDIR_NIGHTLY_DIR}/${__PY_VERSION_REPO}/photon/${DISTRO_MAJOR_VERSION}/${CPU_ARCH_L}/" + fi + + __fetch_url "${REPO_FILE}" "${FETCH_URL}.repo" + + GPG_KEY="SALT-PROJECT-GPG-PUBKEY-2023.pub" + + __rpm_import_gpg "${FETCH_URL}/${GPG_KEY}" || return 1 + + tdnf makecache || return 1 + elif [ "$REPO_REV" != "latest" ]; then + echowarn "salt.repo already exists, ignoring salt version argument." + echowarn "Use -F (forced overwrite) to install $REPO_REV." + fi + + return 0 +} + +install_photon_deps() { + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + tdnf -y update || return 1 + fi + + __PACKAGES="${__PACKAGES:=}" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -lt 3 ]; then + echoerror "There are no Python 2 stable packages for Fedora, only Py3 packages" + return 1 + fi + + PY_PKG_VER=3 + + __PACKAGES="${__PACKAGES} libyaml procps-ng python${PY_PKG_VER}-crypto python${PY_PKG_VER}-jinja2" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-msgpack python${PY_PKG_VER}-requests python${PY_PKG_VER}-zmq" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-pip python${PY_PKG_VER}-m2crypto python${PY_PKG_VER}-pyyaml" + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-systemd" + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + fi + + # shellcheck disable=SC2086 + __tdnf_install_noinput ${__PACKAGES} ${_EXTRA_PACKAGES} || return 1 + + return 0 +} + +install_photon_stable_post() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + done +} + +install_photon_git_deps() { + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + # Packages are named python3- + PY_PKG_VER=3 + else + PY_PKG_VER=2 + fi + + __PACKAGES="" + if ! __check_command_exists ps; then + __PACKAGES="${__PACKAGES} procps-ng" + fi + if ! __check_command_exists git; then + __PACKAGES="${__PACKAGES} git" + fi + + if [ -n "${__PACKAGES}" ]; then + # shellcheck disable=SC2086 + __tdnf_install_noinput ${__PACKAGES} || return 1 + __PACKAGES="" + fi + + __git_clone_and_checkout || return 1 + + if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then + + if [ "$_INSECURE_DL" -eq $BS_FALSE ] && [ "${_SALT_REPO_URL%%://*}" = "https" ]; then + __PACKAGES="${__PACKAGES} ca-certificates" + fi + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} python${PY_PKG_VER}-libcloud python${PY_PKG_VER}-netaddr" + fi + + install_photon_deps || return 1 + + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + if __check_command_exists python3; then + __python="python3" + fi + elif [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 2 ]; then + if __check_command_exists python2; then + __python="python2" + fi + else + if ! __check_command_exists python; then + echoerror "Unable to find a python binary?!" + return 1 + fi + # Let's hope it's the right one + __python="python" + fi + + grep tornado "${_SALT_GIT_CHECKOUT_DIR}/requirements/base.txt" | while IFS=' + ' read -r dep; do + echodebug "Running '${__python}' -m pip install '${dep}'" + "${__python}" -m pip install "${dep}" || return 1 + done + else + __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc glibc-devel linux-devel.x86_64" + # shellcheck disable=SC2086 + __tdnf_install_noinput ${__PACKAGES} || return 1 + fi + + if [ "${DISTRO_MAJOR_VERSION}" -gt 3 ]; then + # Need newer version of setuptools on Photon + _setuptools_dep="setuptools>=${_MINIMUM_SETUPTOOLS_VERSION}" + echodebug "Running '${_PY_EXE} -m pip --upgrade install ${_setuptools_dep}'" + ${_PY_EXE} -m pip install --upgrade "${_setuptools_dep}" + fi + + # Let's trigger config_salt() + if [ "$_TEMP_CONFIG_DIR" = "null" ]; then + _TEMP_CONFIG_DIR="${_SALT_GIT_CHECKOUT_DIR}/conf/" + CONFIG_SALT_FUNC="config_salt" + fi + + return 0 +} + +install_photon_git() { + if [ "${_PY_EXE}" != "" ]; then + _PYEXE=${_PY_EXE} + echoinfo "Using the following python version: ${_PY_EXE} to install salt" + else + _PYEXE='python2' + fi + + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then + ${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + else + ${_PYEXE} setup.py ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 + fi + return 0 +} + +install_photon_git_post() { + for fname in api master minion syndic; do + # Skip if not meant to be installed + [ $fname = "api" ] && \ + ([ "$_INSTALL_MASTER" -eq $BS_FALSE ] || ! __check_command_exists "salt-${fname}") && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/rpm" + fi + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + + # Salt executables are located under `/usr/local/bin/` on Fedora 36+ + #if [ "${DISTRO_VERSION}" -ge 36 ]; then + # sed -i -e 's:/usr/bin/:/usr/local/bin/:g' /lib/systemd/system/salt-*.service + #fi + + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + systemctl is-enabled salt-$fname.service || (systemctl preset salt-$fname.service && systemctl enable salt-$fname.service) + sleep 1 + systemctl daemon-reload + done +} + +install_photon_restart_daemons() { + [ $_START_DAEMONS -eq $BS_FALSE ] && return + + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + systemctl stop salt-$fname > /dev/null 2>&1 + systemctl start salt-$fname.service && continue + echodebug "Failed to start salt-$fname using systemd" + if [ "$_ECHO_DEBUG" -eq $BS_TRUE ]; then + systemctl status salt-$fname.service + journalctl -xe + fi + done +} + +install_photon_check_services() { + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + __check_services_systemd salt-$fname || return 1 + done + + return 0 +} + +install_photon_onedir_deps() { + + if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then + tdnf -y update || return 1 + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_TRUE" ] && [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + echowarn "Detected -r or -R option while installing Salt packages for Python 3." + echowarn "Python 3 packages for older Salt releases requires the EPEL repository to be installed." + echowarn "Installing the EPEL repository automatically is disabled when using the -r or -R options." + fi + + if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then + __install_saltstack_photon_onedir_repository || return 1 + fi + + # If -R was passed, we need to configure custom repo url with rsync-ed packages + # Which is still handled in __install_saltstack_rhel_repository. This call has + # its own check in case -r was passed without -R. + if [ "$_CUSTOM_REPO_URL" != "null" ]; then + __install_saltstack_photon_onedir_repository || return 1 + fi + + __PACKAGES="procps-ng" + + # shellcheck disable=SC2086 + __tdnf_install_noinput ${__PACKAGES} || return 1 + + if [ "${_EXTRA_PACKAGES}" != "" ]; then + echoinfo "Installing the following extra packages as requested: ${_EXTRA_PACKAGES}" + # shellcheck disable=SC2086 + __tdnf_install_noinput ${_EXTRA_PACKAGES} || return 1 + fi + + return 0 + +} + + +install_photon_onedir() { + STABLE_REV=$ONEDIR_REV + + __PACKAGES="" + + if [ "$_INSTALL_CLOUD" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-cloud" + fi + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-master" + fi + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + __PACKAGES="${__PACKAGES} salt-minion" + fi + if [ "$_INSTALL_SYNDIC" -eq $BS_TRUE ];then + __PACKAGES="${__PACKAGES} salt-syndic" + fi + + # shellcheck disable=SC2086 + __tdnf_install_noinput ${__PACKAGES} || return 1 + + return 0 +} + +install_photon_onedir_post() { + STABLE_REV=$ONEDIR_REV + install_photon_stable_post || return 1 + + return 0 +} +# +# Ended Fedora Install Functions +# +####################################################################################################################### + ####################################################################################################################### # # FreeBSD Install Functions @@ -5841,15 +7270,15 @@ install_freebsd_git_deps() { if [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - SALT_DEPENDENCIES=$(/usr/local/sbin/pkg rquery %dn py38-salt) + SALT_DEPENDENCIES=$(/usr/local/sbin/pkg rquery %dn py39-salt) # shellcheck disable=SC2086 /usr/local/sbin/pkg install -y ${SALT_DEPENDENCIES} python || return 1 - /usr/local/sbin/pkg install -y py38-requests || return 1 - /usr/local/sbin/pkg install -y py38-tornado4 || return 1 + /usr/local/sbin/pkg install -y py39-requests || return 1 + /usr/local/sbin/pkg install -y py39-tornado4 || return 1 else - /usr/local/sbin/pkg install -y python py38-pip py38-setuptools libzmq4 libunwind || return 1 + /usr/local/sbin/pkg install -y python py39-pip py39-setuptools libzmq4 libunwind || return 1 fi echodebug "Adapting paths to FreeBSD" @@ -5895,7 +7324,7 @@ install_freebsd_stable() { # installing latest version of salt from FreeBSD CURRENT ports repo # # shellcheck disable=SC2086 - /usr/local/sbin/pkg install -y py38-salt || return 1 + /usr/local/sbin/pkg install -y py39-salt || return 1 return 0 } @@ -5987,6 +7416,15 @@ install_freebsd_restart_daemons() { service salt_$fname start done } + +install_freebsd_onedir() { +# +# call install_freebsd_stable +# + install_freebsd_stable || return 1 + + return 0 +} # # Ended FreeBSD Install Functions # @@ -6021,7 +7459,7 @@ install_openbsd_git_deps() { __git_clone_and_checkout || return 1 if [ "${_POST_NEON_INSTALL}" -eq $BS_TRUE ]; then - pkg_add -I -v py-pip py-setuptools + pkg_add -I -v py3-pip py3-setuptools fi # @@ -6105,6 +7543,14 @@ install_openbsd_restart_daemons() { return 0 } +install_openbsd_onedir() { +# +# Call install_openbsd_stable +# + install_openbsd_stable || return 1 + + return 0 +} # # Ended OpenBSD Install Functions # @@ -6305,6 +7751,14 @@ install_smartos_restart_daemons() { return 0 } +install_smartos_onedir() { +# +# call install_smartos_stable +# + install_smartos_stable || return 1 + + return 0 +} # # Ended SmartOS Install Functions # @@ -6321,19 +7775,16 @@ __set_suse_pkg_repo() { # Set distro repo variable if [ "${DISTRO_MAJOR_VERSION}" -gt 2015 ]; then DISTRO_REPO="openSUSE_Tumbleweed" + elif [ "${DISTRO_MAJOR_VERSION}" -eq 15 ] && [ "${DISTRO_MINOR_VERSION}" -ge 4 ]; then + DISTRO_REPO="${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" elif [ "${DISTRO_MAJOR_VERSION}" -ge 42 ] || [ "${DISTRO_MAJOR_VERSION}" -eq 15 ]; then DISTRO_REPO="openSUSE_Leap_${DISTRO_MAJOR_VERSION}.${DISTRO_MINOR_VERSION}" else DISTRO_REPO="SLE_${DISTRO_MAJOR_VERSION}_SP${SUSE_PATCHLEVEL}" fi - if [ "$_DOWNSTREAM_PKG_REPO" -eq $BS_TRUE ]; then - suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack" - suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" - else - suse_pkg_url_base="${HTTP_VAL}://repo.saltproject.io/opensuse" - suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack:products.repo" - fi + suse_pkg_url_base="https://download.opensuse.org/repositories/systemsmanagement:/saltstack" + suse_pkg_url_path="${DISTRO_REPO}/systemsmanagement:saltstack.repo" SUSE_PKG_URL="$suse_pkg_url_base/$suse_pkg_url_path" } @@ -6353,7 +7804,7 @@ __version_lte() { zypper --non-interactive install --auto-agree-with-licenses python || return 1 fi - if [ "$(python -c 'import sys; V1=tuple([int(i) for i in sys.argv[1].split(".")]); V2=tuple([int(i) for i in sys.argv[2].split(".")]); print V1<=V2' "$1" "$2")" = "True" ]; then + if [ "$(${_PY_EXE} -c 'import sys; V1=tuple([int(i) for i in sys.argv[1].split(".")]); V2=tuple([int(i) for i in sys.argv[2].split(".")]); print(V1<=V2)' "$1" "$2")" = "True" ]; then __ZYPPER_REQUIRES_REPLACE_FILES=${BS_TRUE} else __ZYPPER_REQUIRES_REPLACE_FILES=${BS_FALSE} @@ -6470,7 +7921,7 @@ install_opensuse_git_deps() { fi # Check for Tumbleweed elif [ "${DISTRO_MAJOR_VERSION}" -ge 20210101 ]; then - __PACKAGES="python3-pip" + __PACKAGES="python3-pip gcc-c++ python3-pyzmq-devel" else __PACKAGES="python-pip python-setuptools gcc" fi @@ -6487,6 +7938,10 @@ install_opensuse_git_deps() { return 0 } +install_opensuse_onedir_deps() { + install_opensuse_stable_deps || return 1 +} + install_opensuse_stable() { __PACKAGES="" @@ -6519,6 +7974,10 @@ install_opensuse_git() { return 0 } +install_opensuse_onedir() { + install_opensuse_stable || return 1 +} + install_opensuse_stable_post() { for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -6563,10 +8022,17 @@ install_opensuse_git_post() { use_usr_lib=$BS_TRUE fi - if [ "${use_usr_lib}" -eq $BS_TRUE ]; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/usr/lib/systemd/system/salt-${fname}.service" + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" else - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/" + fi + + if [ "${use_usr_lib}" -eq $BS_TRUE ]; then + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/usr/lib/systemd/system/salt-${fname}.service" + else + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" fi continue @@ -6581,6 +8047,10 @@ install_opensuse_git_post() { return 0 } +install_opensuse_onedir_post() { + install_opensuse_stable_post || return 1 +} + install_opensuse_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return @@ -6740,6 +8210,11 @@ install_opensuse_15_git() { return 0 } +install_opensuse_15_onedir_deps() { + __opensuse_prep_install || return 1 + return 0 +} + # # End of openSUSE Leap 15 # @@ -6769,6 +8244,13 @@ install_suse_15_git_deps() { return 0 } +install_suse_15_onedir_deps() { + __opensuse_prep_install || return 1 + install_opensuse_15_onedir_deps || return 1 + + return 0 +} + install_suse_15_stable() { install_opensuse_stable || return 1 return 0 @@ -6779,6 +8261,11 @@ install_suse_15_git() { return 0 } +install_suse_15_onedir() { + install_opensuse_stable || return 1 + return 0 +} + install_suse_15_stable_post() { install_opensuse_stable_post || return 1 return 0 @@ -6789,6 +8276,11 @@ install_suse_15_git_post() { return 0 } +install_suse_15_onedir_post() { + install_opensuse_stable_post || return 1 + return 0 +} + install_suse_15_restart_daemons() { install_opensuse_restart_daemons || return 1 return 0 @@ -6871,6 +8363,11 @@ install_suse_12_git_deps() { return 0 } +install_suse_12_onedir_deps() { + install_suse_12_stable_deps || return 1 + return 0 +} + install_suse_12_stable() { install_opensuse_stable || return 1 return 0 @@ -6881,6 +8378,11 @@ install_suse_12_git() { return 0 } +install_suse_12_onedir() { + install_opensuse_stable || return 1 + return 0 +} + install_suse_12_stable_post() { install_opensuse_stable_post || return 1 return 0 @@ -6891,6 +8393,11 @@ install_suse_12_git_post() { return 0 } +install_suse_12_onedir_post() { + install_opensuse_stable_post || return 1 + return 0 +} + install_suse_12_restart_daemons() { install_opensuse_restart_daemons || return 1 return 0 @@ -6967,6 +8474,11 @@ install_suse_11_git_deps() { return 0 } +install_suse_11_onedir_deps() { + install_suse_11_stable_deps || return 1 + return 0 +} + install_suse_11_stable() { install_opensuse_stable || return 1 return 0 @@ -6977,6 +8489,11 @@ install_suse_11_git() { return 0 } +install_suse_11_onedir() { + install_opensuse_stable || return 1 + return 0 +} + install_suse_11_stable_post() { install_opensuse_stable_post || return 1 return 0 @@ -6987,6 +8504,11 @@ install_suse_11_git_post() { return 0 } +install_suse_11_onedir_post() { + install_opensuse_stable_post || return 1 + return 0 +} + install_suse_11_restart_daemons() { install_opensuse_restart_daemons || return 1 return 0 @@ -7086,11 +8608,6 @@ __gentoo_pre_dep() { mkdir /etc/portage fi - # Enable Python 3.6 target for pre Neon Salt release - if echo "${STABLE_REV}" | grep -q "2019" || [ "${ITYPE}" = "git" ] && [ "${_POST_NEON_INSTALL}" -eq $BS_FALSE ]; then - EXTRA_PYTHON_TARGET=python3_6 - fi - # Enable Python 3.7 target for Salt Neon using GIT if [ "${ITYPE}" = "git" ] && [ "${GIT_REV}" = "v3000" ]; then EXTRA_PYTHON_TARGET=python3_7 @@ -7186,6 +8703,9 @@ install_gentoo_git_deps() { __emerge ${GENTOO_GIT_PACKAGES} || return 1 fi + echoinfo "Running emerge -v1 setuptools" + __emerge -v1 setuptools || return 1 + __git_clone_and_checkout || return 1 __gentoo_post_dep || return 1 } @@ -7233,6 +8753,11 @@ install_gentoo_git() { return 0 } +install_gentoo_onedir() { + STABLE_REV=${ONEDIR_REV} + install_gentoo_stable || return 1 +} + install_gentoo_post() { for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -7268,8 +8793,15 @@ install_gentoo_git_post() { [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + # Account for new path for services files in later releases + if [ -f "${_SALT_GIT_CHECKOUT_DIR}/pkg/common/salt-${fname}.service" ]; then + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg/common" + else + _SERVICE_DIR="${_SALT_GIT_CHECKOUT_DIR}/pkg" + fi + if __check_command_exists systemctl ; then - __copyfile "${_SALT_GIT_CHECKOUT_DIR}/pkg/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" + __copyfile "${_SERVICE_DIR}/salt-${fname}.service" "/lib/systemd/system/salt-${fname}.service" # Skip salt-api since the service should be opt-in and not necessarily started on boot [ $fname = "api" ] && continue @@ -7315,6 +8847,10 @@ _eof return 0 } +install_gentoo_onedir_post() { + install_gentoo_post || return 1 +} + install_gentoo_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return @@ -7466,7 +9002,46 @@ __macosx_get_packagesite() { fi PKG="salt-${STABLE_REV}-${__PY_VERSION_REPO}-${DARWIN_ARCH}.pkg" - SALTPKGCONFURL="https://repo.saltproject.io/osx/${PKG}" + SALTPKGCONFURL="https://${_REPO_URL}/osx/${PKG}" +} + +__parse_repo_json_python() { + + # Using latest, grab the right + # version from the repo.json + _JSON_VERSION=$(python - <<-EOF +import json, urllib.request +url = "https://repo.saltproject.io/salt/py3/macos/repo.json" +response = urllib.request.urlopen(url) +data = json.loads(response.read()) +version = data["${_ONEDIR_REV}"][list(data["${_ONEDIR_REV}"])[0]]['version'] +print(version) +EOF +) +echo "${_JSON_VERSION}" +} + +__macosx_get_packagesite_onedir() { + DARWIN_ARCH="x86_64" + + __PY_VERSION_REPO="py2" + if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -eq 3 ]; then + __PY_VERSION_REPO="py3" + fi + + if [ "$(echo "$_ONEDIR_REV" | grep -E '^(latest)$')" != "" ]; then + _PKG_VERSION=$(__parse_repo_json_python) + elif [ "$(echo "$_ONEDIR_REV" | grep -E '^([3-9][0-9]{3}(\.[0-9]*))')" != "" ]; then + _PKG_VERSION=$_ONEDIR_REV + else + _PKG_VERSION=$(__parse_repo_json_python) + fi + if [ "$(echo "$_ONEDIR_REV" | grep -E '^(3005)')" != "" ]; then + PKG="salt-${_PKG_VERSION}-macos-${DARWIN_ARCH}.pkg" + else + PKG="salt-${_PKG_VERSION}-${__PY_VERSION_REPO}-${DARWIN_ARCH}.pkg" + fi + SALTPKGCONFURL="https://${_REPO_URL}/${_ONEDIR_DIR}/${__PY_VERSION_REPO}/macos/${ONEDIR_REV}/${PKG}" } # Using a separate conf step to head for idempotent install... @@ -7475,11 +9050,21 @@ __configure_macosx_pkg_details() { return 0 } +__configure_macosx_pkg_details_onedir() { + __macosx_get_packagesite_onedir || return 1 + return 0 +} + install_macosx_stable_deps() { __configure_macosx_pkg_details || return 1 return 0 } +install_macosx_onedir_deps() { + __configure_macosx_pkg_details_onedir || return 1 + return 0 +} + install_macosx_git_deps() { install_macosx_stable_deps || return 1 @@ -7526,6 +9111,16 @@ install_macosx_stable() { return 0 } +install_macosx_onedir() { + install_macosx_onedir_deps || return 1 + + __fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1 + + /usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1 + + return 0 +} + install_macosx_git() { if [ -n "$_PY_EXE" ]; then @@ -7563,6 +9158,11 @@ install_macosx_stable_post() { return 0 } +install_macosx_onedir_post() { + install_macosx_stable_post || return 1 + return 0 +} + install_macosx_git_post() { install_macosx_stable_post || return 1 return 0 @@ -7571,8 +9171,15 @@ install_macosx_git_post() { install_macosx_restart_daemons() { [ $_START_DAEMONS -eq $BS_FALSE ] && return - /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 - /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + if [ "$_INSTALL_MINION" -eq $BS_TRUE ]; then + /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.minion.plist || return 1 + fi + + if [ "$_INSTALL_MASTER" -eq $BS_TRUE ]; then + /bin/launchctl unload -w /Library/LaunchDaemons/com.saltstack.salt.master.plist || return 1 + /bin/launchctl load -w /Library/LaunchDaemons/com.saltstack.salt.master.plist || return 1 + fi return 0 } @@ -7774,6 +9381,43 @@ preseed_master() { # ####################################################################################################################### +####################################################################################################################### +# +# This function checks if all of the installed daemons are running or not. +# +daemons_running_onedir() { + [ "$_START_DAEMONS" -eq $BS_FALSE ] && return 0 + + FAILED_DAEMONS=0 + for fname in api master minion syndic; do + # Skip salt-api since the service should be opt-in and not necessarily started on boot + [ $fname = "api" ] && continue + + # Skip if not meant to be installed + [ $fname = "minion" ] && [ "$_INSTALL_MINION" -eq $BS_FALSE ] && continue + [ $fname = "master" ] && [ "$_INSTALL_MASTER" -eq $BS_FALSE ] && continue + [ $fname = "syndic" ] && [ "$_INSTALL_SYNDIC" -eq $BS_FALSE ] && continue + + if [ -f "/opt/saltstack/salt/run/run" ]; then + salt_path="/opt/saltstack/salt/run/run ${fname}" + else + salt_path="salt-${fname}" + fi + process_running=$(pgrep -f "${salt_path}") + if [ "${process_running}" = "" ]; then + echoerror "${salt_path} was not found running" + FAILED_DAEMONS=$((FAILED_DAEMONS + 1)) + fi + done + + return $FAILED_DAEMONS +} + +# +# Ended daemons running check function +# +####################################################################################################################### + ####################################################################################################################### # # This function checks if all of the installed daemons are running or not. @@ -7874,6 +9518,7 @@ echodebug "PRESEED_MASTER_FUNC=${PRESEED_MASTER_FUNC}" INSTALL_FUNC_NAMES="install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}_${ITYPE}" INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}_${ITYPE}" INSTALL_FUNC_NAMES="$INSTALL_FUNC_NAMES install_${DISTRO_NAME_L}_${ITYPE}" +echodebug "INSTALL_FUNC_NAMES=${INSTALL_FUNC_NAMES}" INSTALL_FUNC="null" for FUNC_NAME in $(__strip_duplicates "$INSTALL_FUNC_NAMES"); do @@ -7925,6 +9570,7 @@ DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}${PREFIXED_DISTRO_MAJOR_VERSION}${PREFIXED_DISTRO_MINOR_VERSION}" DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}_${ITYPE}" DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${DISTRO_NAME_L}" +DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running_${ITYPE}" DAEMONS_RUNNING_FUNC_NAMES="$DAEMONS_RUNNING_FUNC_NAMES daemons_running" DAEMONS_RUNNING_FUNC="null" @@ -8114,6 +9760,11 @@ if [ "$DAEMONS_RUNNING_FUNC" != "null" ] && [ ${_START_DAEMONS} -eq $BS_TRUE ]; fi fi +if [ "$_AUTO_ACCEPT_MINION_KEYS" -eq "$BS_TRUE" ]; then + echoinfo "Accepting the Salt Minion Keys" + salt-key -yA +fi + # Done! if [ "$_CONFIG_ONLY" -eq $BS_FALSE ]; then echoinfo "Salt installed!" @@ -8121,6 +9772,13 @@ else echoinfo "Salt configured!" fi +if [ "$_QUICK_START" -eq "$BS_TRUE" ]; then + echoinfo "Congratulations!" + echoinfo "A couple of commands to try:" + echoinfo " salt \* test.ping" + echoinfo " salt \* test.version" +fi + exit 0 # vim: set sts=4 ts=4 et diff --git a/setup/so-functions b/setup/so-functions index 1c9b0f43d..b64daaa92 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -1258,7 +1258,7 @@ generate_ssl() { # if the install type is a manager then we need to wait for the minion to be ready before trying # to run the ssl state since we need the minion to sign the certs if [[ "$install_type" =~ ^(EVAL|MANAGER|MANAGERSEARCH|STANDALONE|IMPORT|HELIXSENSOR)$ ]]; then - wait_for_salt_minion + wait_for_salt_minion "$MINION_ID" "5" "$setup_log" || fail_setup fi info "Applying SSL state" logCmd "salt-call state.apply ssl -l info" @@ -1972,6 +1972,7 @@ securityonion_repo() { } repo_sync_local() { + SALTVERSION=$(egrep 'version: [0-9]{4}' ../salt/salt/master.defaults.yaml | sed 's/^.*version: //') info "Repo Sync" if [[ $is_supported ]]; then # Sync the repo from the the SO repo locally. @@ -2021,7 +2022,7 @@ repo_sync_local() { curl -fsSL https://repo.securityonion.net/file/so-repo/prod/2.4/so/so.repo | tee /etc/yum.repos.d/so.repo rpm --import https://repo.saltproject.io/salt/py3/redhat/9/x86_64/SALT-PROJECT-GPG-PUBKEY-2023.pub dnf config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo - curl -fsSL https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/3006.1.repo | tee /etc/yum.repos.d/salt.repo + curl -fsSL "https://repo.saltproject.io/salt/py3/redhat/9/x86_64/minor/$SALTVERSION.repo" | tee /etc/yum.repos.d/salt.repo dnf repolist curl --retry 5 --retry-delay 60 -A "netinstall/$SOVERSION/$OS/$(uname -r)/1" https://sigs.securityonion.net/checkup --output /tmp/install else From d42b5ef901c0aef7ea8a9c2b175bca5a541e3232 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 27 Oct 2023 11:18:56 -0400 Subject: [PATCH 298/417] remove unused url props to avoid kratos complaining about invalid urls when they're blank --- salt/kratos/map.jinja | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/salt/kratos/map.jinja b/salt/kratos/map.jinja index a2477098d..a603d813a 100644 --- a/salt/kratos/map.jinja +++ b/salt/kratos/map.jinja @@ -23,4 +23,16 @@ {% if KRATOSMERGED.oidc.enabled and 'oidc' in salt['pillar.get']('features') %} {% do KRATOSMERGED.config.selfservice.methods.update({'oidc': {'enabled': true, 'config': {'providers': [KRATOSMERGED.oidc.config]}}}) %} -{% endif %} \ No newline at end of file +{% endif %} + +{% if KRATOSMERGED.oidc.config.auth_url is defined and not KRATOSMERGED.oidc.config.auth_url.strip() | length %} +{% do KRATOSMERGED.oidc.config.pop('auth_url') %} +{% endif %} + +{% if KRATOSMERGED.oidc.config.issuer_url is defined and not KRATOSMERGED.oidc.config.issuer_url.strip() | length %} +{% do KRATOSMERGED.oidc.config.pop('issuer_url') %} +{% endif %} + +{% if KRATOSMERGED.oidc.config.token_url is defined and not KRATOSMERGED.oidc.config.token_url.strip() | length %} +{% do KRATOSMERGED.oidc.config.pop('token_url') %} +{% endif %} From 3a83c526608450c9b102a9340fb2a58f393d0727 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 27 Oct 2023 11:20:05 -0400 Subject: [PATCH 299/417] minor updates --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 7f2e97617..8ea99f559 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -2.4.0-kilo +2.4.30 From c955f9210a16f9ce2105620f9728a53a1d21942d Mon Sep 17 00:00:00 2001 From: weslambert Date: Fri, 27 Oct 2023 17:24:27 -0400 Subject: [PATCH 300/417] Remove policy for Cases indices --- salt/elasticsearch/defaults.yaml | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 44cb0ea7d..2e19c50b7 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -108,8 +108,6 @@ elasticsearch: match_mapping_type: string settings: index: - lifecycle: - name: so-case-logs mapping: total_fields: limit: 1500 @@ -119,30 +117,6 @@ elasticsearch: sort: field: '@timestamp' order: desc - policy: - phases: - cold: - actions: - set_priority: - priority: 0 - min_age: 30d - delete: - actions: - delete: {} - min_age: 365d - hot: - actions: - rollover: - max_age: 30d - max_primary_shard_size: 50gb - set_priority: - priority: 100 - min_age: 0ms - warm: - actions: - set_priority: - priority: 50 - min_age: 30d so-common: close: 30 delete: 365 From 76dd6f07abbe45b08823a615e1a277e5368f5bf8 Mon Sep 17 00:00:00 2001 From: weslambert Date: Fri, 27 Oct 2023 17:26:33 -0400 Subject: [PATCH 301/417] Remove policy for OSQuery manager indices --- salt/elasticsearch/defaults.yaml | 52 -------------------------------- 1 file changed, 52 deletions(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 2e19c50b7..2781d2144 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -6323,33 +6323,7 @@ elasticsearch: template: settings: index: - lifecycle: - name: so-logs-osquery-manager-action.responses-logs number_of_replicas: 0 - policy: - phases: - cold: - actions: - set_priority: - priority: 0 - min_age: 30d - delete: - actions: - delete: {} - min_age: 365d - hot: - actions: - rollover: - max_age: 30d - max_primary_shard_size: 50gb - set_priority: - priority: 100 - min_age: 0ms - warm: - actions: - set_priority: - priority: 50 - min_age: 30d so-logs-osquery-manager-actions: index_sorting: false index_template: @@ -6366,33 +6340,7 @@ elasticsearch: template: settings: index: - lifecycle: - name: so-logs-osquery-manager-actions-logs number_of_replicas: 0 - policy: - phases: - cold: - actions: - set_priority: - priority: 0 - min_age: 30d - delete: - actions: - delete: {} - min_age: 365d - hot: - actions: - rollover: - max_age: 30d - max_primary_shard_size: 50gb - set_priority: - priority: 100 - min_age: 0ms - warm: - actions: - set_priority: - priority: 50 - min_age: 30d so-logs-panw_x_panos: index_sorting: false index_template: From 07e51121ba5d2649a75ee12087ece09c7c7e2cb3 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Mon, 30 Oct 2023 16:11:36 -0400 Subject: [PATCH 302/417] ensure networkminer is latest version --- salt/desktop/packages.sls | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/salt/desktop/packages.sls b/salt/desktop/packages.sls index 3817f2e80..841d34744 100644 --- a/salt/desktop/packages.sls +++ b/salt/desktop/packages.sls @@ -346,7 +346,6 @@ desktop_packages: - snappy - sound-theme-freedesktop - soundtouch - - securityonion-networkminer - speech-dispatcher - speech-dispatcher-espeak-ng - speex @@ -433,6 +432,10 @@ desktop_packages: - xorg-x11-xinit-session - zip +install_networkminer: + pkg.latest: + - name: securityonion-networkminer + {% else %} desktop_packages_os_fail: From ed6473a34b7da12fa6cc1ac13a77dfcc07c39dc9 Mon Sep 17 00:00:00 2001 From: weslambert Date: Mon, 30 Oct 2023 20:41:49 -0400 Subject: [PATCH 303/417] Add roles for eval mode --- salt/elasticsearch/defaults.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 2781d2144..dc8f97e44 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -9088,7 +9088,13 @@ elasticsearch: so-eval: config: node: - roles: [] + roles: + - master + - data + - data_hot + - ingest + - transform + - remote_cluster_client so-heavynode: config: node: From c354924b6853a88050f58bd9babddbbfa7ef2b7d Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 31 Oct 2023 10:05:29 -0400 Subject: [PATCH 304/417] Add import roles --- salt/elasticsearch/defaults.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index dc8f97e44..cd8ff9397 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -9107,7 +9107,13 @@ elasticsearch: so-import: config: node: - roles: [] + roles: + - master + - data + - data_hot + - ingest + - transform + - remote_cluster_client so-manager: config: node: From c420e198fb99dcc9a1faf14bb7d69380d517d180 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Tue, 31 Oct 2023 11:18:39 -0400 Subject: [PATCH 305/417] ignore specific Suricata errors --- salt/common/tools/sbin/so-log-check | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 395f60c7d..10f7e8c89 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -158,6 +158,8 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|repodownload.conf" # known issue with reposync on pre-2.4.20 EXCLUDED_ERRORS="$EXCLUDED_ERRORS|missing versions record" # stenographer corrupt index EXCLUDED_ERRORS="$EXCLUDED_ERRORS|soc.field." # known ingest type collisions issue with earlier versions of SO + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error parsing signature" # Malformed Suricata rule, from upstream provider + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sticky buffer has no matches" # Non-critical Suricata error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" From 0c98bd96c7f731c20ed0b5c356fb9f2d2ed32e30 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 31 Oct 2023 12:52:00 -0400 Subject: [PATCH 306/417] Delete salt/idstools/tools/sbin/so-rule UI does this now --- salt/idstools/tools/sbin/so-rule | 454 ------------------------------- 1 file changed, 454 deletions(-) delete mode 100755 salt/idstools/tools/sbin/so-rule diff --git a/salt/idstools/tools/sbin/so-rule b/salt/idstools/tools/sbin/so-rule deleted file mode 100755 index 19618c9f5..000000000 --- a/salt/idstools/tools/sbin/so-rule +++ /dev/null @@ -1,454 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -""" -Local exit codes: - - General error: 1 - - Invalid argument: 2 - - File error: 3 -""" - -import sys, os, subprocess, argparse, signal -import copy -import re -import textwrap -import yaml - -minion_pillar_dir = '/opt/so/saltstack/local/pillar/minions' -salt_proc: subprocess.CompletedProcess = None - - -def print_err(string: str): - print(string, file=sys.stderr) - - -def check_apply(args: dict, prompt: bool = True): - if args.apply: - print('Configuration updated. Applying changes:') - return apply() - else: - if prompt: - message = 'Configuration updated. Would you like to apply your changes now? (y/N) ' - answer = input(message) - while answer.lower() not in [ 'y', 'n', '' ]: - answer = input(message) - if answer.lower() in [ 'n', '' ]: - return 0 - else: - print('Applying changes:') - return apply() - else: - return 0 - - -def apply(): - salt_cmd = ['salt-call', 'state.apply', '-l', 'quiet', 'idstools.sync_files', 'queue=True'] - update_cmd = ['so-rule-update'] - print('Syncing config files...') - cmd = subprocess.run(salt_cmd, stdout=subprocess.DEVNULL) - if cmd.returncode == 0: - print('Updating rules...') - return subprocess.run(update_cmd).returncode - else: - return cmd.returncode - - -def find_minion_pillar() -> str: - regex = '^.*_(manager|managersearch|standalone|import|eval)\.sls$' - - result = [] - for root, _, files in os.walk(minion_pillar_dir): - for f_minion_id in files: - if re.search(regex, f_minion_id): - result.append(os.path.join(root, f_minion_id)) - - if len(result) == 0: - print_err('Could not find manager-type pillar (eval, standalone, manager, managersearch, import). Are you running this script on the manager?') - sys.exit(3) - elif len(result) > 1: - res_arr = [] - for r in result: - res_arr.append(f'\"{r}\"') - res_str = ', '.join(res_arr) - print_err('(This should not happen, the system is in an error state if you see this message.)\n') - print_err('More than one manager-type pillar exists, minion id\'s listed below:') - print_err(f' {res_str}') - sys.exit(3) - else: - return result[0] - - -def read_pillar(pillar: str): - try: - with open(pillar, 'r') as f: - loaded_yaml = yaml.safe_load(f.read()) - if loaded_yaml is None: - print_err(f'Could not parse {pillar}') - sys.exit(3) - return loaded_yaml - except: - print_err(f'Could not open {pillar}') - sys.exit(3) - - -def write_pillar(pillar: str, content: dict): - try: - sids = content['idstools']['sids'] - if sids['disabled'] is not None: - if len(sids['disabled']) == 0: sids['disabled'] = None - if sids['enabled'] is not None: - if len(sids['enabled']) == 0: sids['enabled'] = None - if sids['modify'] is not None: - if len(sids['modify']) == 0: sids['modify'] = None - - with open(pillar, 'w') as f: - return yaml.dump(content, f, default_flow_style=False) - except Exception as e: - print_err(f'Could not open {pillar}') - sys.exit(3) - - -def check_sid_pattern(sid_pattern: str): - message = f'SID {sid_pattern} is not valid, did you forget the \"re:\" prefix for a regex pattern?' - - if sid_pattern.startswith('re:'): - r_string = sid_pattern[3:] - if not valid_regex(r_string): - print_err('Invalid regex pattern.') - return False - else: - return True - else: - sid: int - try: - sid = int(sid_pattern) - except: - print_err(message) - return False - - if sid >= 0: - return True - else: - print_err(message) - return False - - -def valid_regex(pattern: str): - try: - re.compile(pattern) - return True - except re.error: - return False - - -def sids_key_exists(pillar: dict, key: str): - return key in pillar.get('idstools', {}).get('sids', {}) - - -def rem_from_sids(pillar: dict, key: str, val: str, optional = False): - pillar_dict = copy.deepcopy(pillar) - arr = pillar_dict['idstools']['sids'][key] - if arr is None or val not in arr: - if not optional: print(f'{val} already does not exist in {key}') - else: - pillar_dict['idstools']['sids'][key].remove(val) - return pillar_dict - - -def add_to_sids(pillar: dict, key: str, val: str, optional = False): - pillar_dict = copy.deepcopy(pillar) - if pillar_dict['idstools']['sids'][key] is None: - pillar_dict['idstools']['sids'][key] = [] - if val in pillar_dict['idstools']['sids'][key]: - if not optional: print(f'{val} already exists in {key}') - else: - pillar_dict['idstools']['sids'][key].append(val) - return pillar_dict - - -def add_rem_disabled(args: dict): - global salt_proc - - if not check_sid_pattern(args.sid_pattern): - return 2 - - pillar_dict = read_pillar(args.pillar) - - if not sids_key_exists(pillar_dict, 'disabled'): - pillar_dict['idstools']['sids']['disabled'] = None - - if args.remove: - temp_pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern) - else: - temp_pillar_dict = add_to_sids(pillar_dict, 'disabled', args.sid_pattern) - - if temp_pillar_dict['idstools']['sids']['disabled'] == pillar_dict['idstools']['sids']['disabled']: - salt_proc = check_apply(args, prompt=False) - return salt_proc - else: - pillar_dict = temp_pillar_dict - - if not args.remove: - if sids_key_exists(pillar_dict, 'enabled'): - pillar_dict = rem_from_sids(pillar_dict, 'enabled', args.sid_pattern, optional=True) - - modify = pillar_dict.get('idstools', {}).get('sids', {}).get('modify') - if modify is not None: - rem_candidates = [] - for action in modify: - if action.startswith(f'{args.sid_pattern} '): - rem_candidates.append(action) - if len(rem_candidates) > 0: - for item in rem_candidates: - print(f' - {item}') - answer = input(f'The above modify actions contain {args.sid_pattern}. Would you like to remove them? (Y/n) ') - while answer.lower() not in [ 'y', 'n', '' ]: - for item in rem_candidates: - print(f' - {item}') - answer = input(f'The above modify actions contain {args.sid_pattern}. Would you like to remove them? (Y/n) ') - if answer.lower() in [ 'y', '' ]: - for item in rem_candidates: - modify.remove(item) - pillar_dict['idstools']['sids']['modify'] = modify - - write_pillar(pillar=args.pillar, content=pillar_dict) - - salt_proc = check_apply(args) - return salt_proc - - -def list_disabled_rules(args: dict): - pillar_dict = read_pillar(args.pillar) - - disabled = pillar_dict.get('idstools', {}).get('sids', {}).get('disabled') - if disabled is None: - print('No rules disabled.') - return 0 - else: - print('Disabled rules:') - for rule in disabled: - print(f' - {rule}') - return 0 - - -def add_rem_enabled(args: dict): - global salt_proc - - if not check_sid_pattern(args.sid_pattern): - return 2 - - pillar_dict = read_pillar(args.pillar) - - if not sids_key_exists(pillar_dict, 'enabled'): - pillar_dict['idstools']['sids']['enabled'] = None - - if args.remove: - temp_pillar_dict = rem_from_sids(pillar_dict, 'enabled', args.sid_pattern) - else: - temp_pillar_dict = add_to_sids(pillar_dict, 'enabled', args.sid_pattern) - - if temp_pillar_dict['idstools']['sids']['enabled'] == pillar_dict['idstools']['sids']['enabled']: - salt_proc = check_apply(args, prompt=False) - return salt_proc - else: - pillar_dict = temp_pillar_dict - - if not args.remove: - if sids_key_exists(pillar_dict, 'disabled'): - pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern, optional=True) - - write_pillar(pillar=args.pillar, content=pillar_dict) - - salt_proc = check_apply(args) - return salt_proc - - -def list_enabled_rules(args: dict): - pillar_dict = read_pillar(args.pillar) - - enabled = pillar_dict.get('idstools', {}).get('sids', {}).get('enabled') - if enabled is None: - print('No rules explicitly enabled.') - return 0 - else: - print('Enabled rules:') - for rule in enabled: - print(f' - {rule}') - return 0 - - -def add_rem_modify(args: dict): - global salt_proc - - if not check_sid_pattern(args.sid_pattern): - return 2 - - if not valid_regex(args.search_term): - print_err('Search term is not a valid regex pattern.') - - string_val = f'{args.sid_pattern} \"{args.search_term}\" \"{args.replace_term}\"' - - pillar_dict = read_pillar(args.pillar) - - if not sids_key_exists(pillar_dict, 'modify'): - pillar_dict['idstools']['sids']['modify'] = None - - if args.remove: - temp_pillar_dict = rem_from_sids(pillar_dict, 'modify', string_val) - else: - temp_pillar_dict = add_to_sids(pillar_dict, 'modify', string_val) - - if temp_pillar_dict['idstools']['sids']['modify'] == pillar_dict['idstools']['sids']['modify']: - salt_proc = check_apply(args, prompt=False) - return salt_proc - else: - pillar_dict = temp_pillar_dict - - # TODO: Determine if a rule should be removed from disabled if modified. - if not args.remove: - if sids_key_exists(pillar_dict, 'disabled'): - pillar_dict = rem_from_sids(pillar_dict, 'disabled', args.sid_pattern, optional=True) - - write_pillar(pillar=args.pillar, content=pillar_dict) - - salt_proc = check_apply(args) - return salt_proc - - -def list_modified_rules(args: dict): - pillar_dict = read_pillar(args.pillar) - - modify = pillar_dict.get('idstools', {}).get('sids', {}).get('modify') - if modify is None: - print('No rules currently modified.') - return 0 - else: - print('Modified rules + modifications:') - for rule in modify: - print(f' - {rule}') - return 0 - - -def sigint_handler(*_): - print('Exiting gracefully on Ctrl-C') - if salt_proc is not None: salt_proc.send_signal(signal.SIGINT) - sys.exit(0) - - -def main(): - signal.signal(signal.SIGINT, sigint_handler) - - if os.geteuid() != 0: - print_err('You must run this script as root') - sys.exit(1) - - apply_help='After updating rule configuration, apply the idstools state.' - - main_parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter) - - subcommand_desc = textwrap.dedent( - """\ - disabled Manage and list disabled rules (add, remove, list) - enabled Manage and list enabled rules (add, remove, list) - modify Manage and list modified rules (add, remove, list) - """ - ) - subparsers = main_parser.add_subparsers(title='commands', description=subcommand_desc, metavar='', dest='command') - - - sid_or_regex_help = 'A valid SID (ex: "4321") or regular expression pattern (ex: "re:heartbleed|spectre")' - - # Disabled actions - disabled = subparsers.add_parser('disabled') - disabled_sub = disabled.add_subparsers() - - disabled_add = disabled_sub.add_parser('add') - disabled_add.set_defaults(func=add_rem_disabled) - disabled_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help) - disabled_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help) - - disabled_rem = disabled_sub.add_parser('remove') - disabled_rem.set_defaults(func=add_rem_disabled, remove=True) - disabled_rem.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help) - disabled_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help) - - disabled_list = disabled_sub.add_parser('list') - disabled_list.set_defaults(func=list_disabled_rules) - - - # Enabled actions - enabled = subparsers.add_parser('enabled') - enabled_sub = enabled.add_subparsers() - - enabled_add = enabled_sub.add_parser('add') - enabled_add.set_defaults(func=add_rem_enabled) - enabled_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help) - enabled_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help) - - enabled_rem = enabled_sub.add_parser('remove') - enabled_rem.set_defaults(func=add_rem_enabled, remove=True) - enabled_rem.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help) - enabled_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help) - - enabled_list = enabled_sub.add_parser('list') - enabled_list.set_defaults(func=list_enabled_rules) - - - search_term_help='A properly escaped regex search term (ex: "\\\$EXTERNAL_NET")' - replace_term_help='The text to replace the search term with' - - # Modify actions - modify = subparsers.add_parser('modify') - modify_sub = modify.add_subparsers() - - modify_add = modify_sub.add_parser('add') - modify_add.set_defaults(func=add_rem_modify) - modify_add.add_argument('sid_pattern', metavar='SID|REGEX', help=sid_or_regex_help) - modify_add.add_argument('search_term', metavar='SEARCH_TERM', help=search_term_help) - modify_add.add_argument('replace_term', metavar='REPLACE_TERM', help=replace_term_help) - modify_add.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help) - - modify_rem = modify_sub.add_parser('remove') - modify_rem.set_defaults(func=add_rem_modify, remove=True) - modify_rem.add_argument('sid_pattern', metavar='SID', help=sid_or_regex_help) - modify_rem.add_argument('search_term', metavar='SEARCH_TERM', help=search_term_help) - modify_rem.add_argument('replace_term', metavar='REPLACE_TERM', help=replace_term_help) - modify_rem.add_argument('--apply', action='store_const', const=True, required=False, help=apply_help) - - modify_list = modify_sub.add_parser('list') - modify_list.set_defaults(func=list_modified_rules) - - - # Begin parse + run - args = main_parser.parse_args(sys.argv[1:]) - - if not hasattr(args, 'remove'): - args.remove = False - - args.pillar = find_minion_pillar() - - if hasattr(args, 'func'): - exit_code = args.func(args) - else: - if args.command is None: - main_parser.print_help() - else: - if args.command == 'disabled': - disabled.print_help() - elif args.command == 'enabled': - enabled.print_help() - elif args.command == 'modify': - modify.print_help() - sys.exit(0) - - sys.exit(exit_code) - - -if __name__ == '__main__': - main() From cc3a69683c58d0e0618713a081dd5bcf66ae8fb7 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 31 Oct 2023 12:55:47 -0400 Subject: [PATCH 307/417] Delete salt/manager/tools/sbin/so-allow-view --- salt/manager/tools/sbin/so-allow-view | 15 --------------- 1 file changed, 15 deletions(-) delete mode 100755 salt/manager/tools/sbin/so-allow-view diff --git a/salt/manager/tools/sbin/so-allow-view b/salt/manager/tools/sbin/so-allow-view deleted file mode 100755 index 58b972ee2..000000000 --- a/salt/manager/tools/sbin/so-allow-view +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash - -# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one -# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at -# https://securityonion.net/license; you may not use this file except in compliance with the -# Elastic License 2.0. - - - -. /usr/sbin/so-common - -echo "" -echo "Hosts/Networks that have access to login to the Security Onion Console:" - -so-firewall includedhosts analyst From 497294c363d721331e9f727d6d1eacbb20bdb202 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 31 Oct 2023 12:57:10 -0400 Subject: [PATCH 308/417] Delete salt/common/tools/sbin/so-zeek-logs --- salt/common/tools/sbin/so-zeek-logs | 67 ----------------------------- 1 file changed, 67 deletions(-) delete mode 100755 salt/common/tools/sbin/so-zeek-logs diff --git a/salt/common/tools/sbin/so-zeek-logs b/salt/common/tools/sbin/so-zeek-logs deleted file mode 100755 index f6df7f8aa..000000000 --- a/salt/common/tools/sbin/so-zeek-logs +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash -local_salt_dir=/opt/so/saltstack/local - -zeek_logs_enabled() { - echo "zeeklogs:" > $local_salt_dir/pillar/zeeklogs.sls - echo " enabled:" >> $local_salt_dir/pillar/zeeklogs.sls - for BLOG in "${BLOGS[@]}"; do - echo " - $BLOG" | tr -d '"' >> $local_salt_dir/pillar/zeeklogs.sls - done -} - -whiptail_manager_adv_service_zeeklogs() { - BLOGS=$(whiptail --title "so-zeek-logs" --checklist "Please Select Logs to Send:" 24 78 12 \ - "conn" "Connection Logging" ON \ - "dce_rpc" "RPC Logs" ON \ - "dhcp" "DHCP Logs" ON \ - "dnp3" "DNP3 Logs" ON \ - "dns" "DNS Logs" ON \ - "dpd" "DPD Logs" ON \ - "files" "Files Logs" ON \ - "ftp" "FTP Logs" ON \ - "http" "HTTP Logs" ON \ - "intel" "Intel Hits Logs" ON \ - "irc" "IRC Chat Logs" ON \ - "kerberos" "Kerberos Logs" ON \ - "modbus" "MODBUS Logs" ON \ - "notice" "Zeek Notice Logs" ON \ - "ntlm" "NTLM Logs" ON \ - "pe" "PE Logs" ON \ - "radius" "Radius Logs" ON \ - "rfb" "RFB Logs" ON \ - "rdp" "RDP Logs" ON \ - "sip" "SIP Logs" ON \ - "smb_files" "SMB Files Logs" ON \ - "smb_mapping" "SMB Mapping Logs" ON \ - "smtp" "SMTP Logs" ON \ - "snmp" "SNMP Logs" ON \ - "ssh" "SSH Logs" ON \ - "ssl" "SSL Logs" ON \ - "syslog" "Syslog Logs" ON \ - "tunnel" "Tunnel Logs" ON \ - "weird" "Zeek Weird Logs" ON \ - "mysql" "MySQL Logs" ON \ - "socks" "SOCKS Logs" ON \ - "x509" "x.509 Logs" ON 3>&1 1>&2 2>&3 ) - - local exitstatus=$? - - IFS=' ' read -ra BLOGS <<< "$BLOGS" - - return $exitstatus -} - -whiptail_manager_adv_service_zeeklogs -return_code=$? -case $return_code in - 1) - whiptail --title "so-zeek-logs" --msgbox "Cancelling. No changes have been made." 8 75 - ;; - 255) - whiptail --title "so-zeek-logs" --msgbox "Whiptail error occured, exiting." 8 75 - ;; - *) - zeek_logs_enabled - ;; -esac - From d07cfdd3fe9366a6c22f6725027a05ec1be7af82 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Tue, 31 Oct 2023 13:10:55 -0400 Subject: [PATCH 309/417] Update so-functions --- setup/so-functions | 182 --------------------------------------------- 1 file changed, 182 deletions(-) diff --git a/setup/so-functions b/setup/so-functions index 42402ad86..56c4b29c1 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -268,15 +268,6 @@ collect_dockernet() { fi } -collect_es_space_limit() { - whiptail_log_size_limit "$log_size_limit" - - while ! valid_int "$log_size_limit"; do # Upper/lower bounds? - whiptail_invalid_input - whiptail_log_size_limit "$log_size_limit" - done -} - collect_gateway() { whiptail_management_interface_gateway @@ -286,28 +277,6 @@ collect_gateway() { done } -collect_homenet_mngr() { - whiptail_homenet_manager "10.0.0.0/8,192.168.0.0/16,172.16.0.0/12" - - while ! valid_cidr_list "$HNMANAGER"; do - whiptail_invalid_input - whiptail_homenet_manager "$HNMANAGER" - done -} - -collect_homenet_snsr() { - if whiptail_homenet_sensor_inherit; then - export HNSENSOR=inherit - else - whiptail_homenet_sensor "10.0.0.0/8,192.168.0.0/16,172.16.0.0/12" - - while ! valid_cidr_list "$HNSENSOR"; do - whiptail_invalid_input - whiptail_homenet_sensor "$HNSENSOR" - done - fi -} - collect_hostname() { collect_hostname_validate @@ -346,26 +315,6 @@ collect_idh_preferences() { if [[ "$idh_preferences" != "" ]]; then IDH_MGTRESTRICT='True'; fi } -collect_idh_services() { - whiptail_idh_services - - case "$IDH_SERVICES" in - 'Linux Webserver (NAS Skin)') - IDH_SERVICES='"HTTP","FTP","SSH"' - ;; - 'MySQL Server') - IDH_SERVICES='"MYSQL","SSH"' - ;; - 'MSSQL Server') - IDH_SERVICES='"MSSQL","VNC' - ;; - 'Custom') - whiptail_idh_services_custom - IDH_SERVICES=$(echo "$IDH_SERVICES" | tr '[:blank:]' ',' ) - ;; - esac -} - collect_int_ip_mask() { whiptail_management_interface_ip_mask @@ -425,71 +374,6 @@ collect_net_method() { fi } -collect_ntp_servers() { - if whiptail_ntp_ask; then - [[ $is_airgap ]] && ntp_string="" - whiptail_ntp_servers "$ntp_string" - - while ! valid_ntp_list "$ntp_string"; do - whiptail_invalid_input - whiptail_ntp_servers "$ntp_string" - done - - IFS="," read -r -a ntp_servers <<< "$ntp_string" # Split string on commas into array - else - ntp_servers=() - fi -} - -collect_oinkcode() { - whiptail_oinkcode - - while ! valid_string "$OINKCODE" "" "128"; do - whiptail_invalid_input - whiptail_oinkcode "$OINKCODE" - done -} - -collect_patch_schedule() { - whiptail_patch_schedule - - case "$patch_schedule" in - 'New Schedule') - whiptail_patch_schedule_select_days - whiptail_patch_schedule_select_hours - collect_patch_schedule_name_new - patch_schedule_os_new - ;; - 'Import Schedule') - collect_patch_schedule_name_import - ;; - 'Automatic') - PATCHSCHEDULENAME='auto' - ;; - 'Manual') - PATCHSCHEDULENAME='manual' - ;; - esac -} - -collect_patch_schedule_name_new() { - whiptail_patch_name_new_schedule - - while ! valid_string "$PATCHSCHEDULENAME"; do - whiptail_invalid_string "schedule name" - whiptail_patch_name_new_schedule "$PATCHSCHEDULENAME" - done -} - -collect_patch_schedule_name_import() { - whiptail_patch_schedule_import - - while ! valid_string "$PATCHSCHEDULENAME"; do - whiptail_invalid_string "schedule name" - whiptail_patch_schedule_import "$PATCHSCHEDULENAME" - done -} - collect_proxy() { [[ -n $TESTING ]] && return local ask=${1:-true} @@ -658,47 +542,6 @@ configure_minion() { } >> "$setup_log" 2>&1 } -configure_ntp() { - local chrony_conf=/etc/chrony.conf - - # Install chrony if it isn't already installed - if ! command -v chronyc &> /dev/null; then - logCmd "dnf -y install chrony" - fi - - [[ -f $chrony_conf ]] && mv $chrony_conf "$chrony_conf.bak" - - printf '%s\n' "# NTP server list" > $chrony_conf - - # Build list of servers - for addr in "${ntp_servers[@]}"; do - echo "server $addr iburst" >> $chrony_conf - done - - printf '\n%s\n' "# Config options" >> $chrony_conf - - printf '%s\n' \ - 'driftfile /var/lib/chrony/drift' \ - 'makestep 1.0 3' \ - 'rtcsync' \ - 'logdir /var/log/chrony' >> $chrony_conf - - if [[ $is_rpm ]]; then - systemctl enable chronyd - systemctl restart chronyd - else - systemctl enable chrony - systemctl restart chrony - fi - - # Tell the chrony daemon to sync time & update the system time - # Since these commands only make a call to chronyd, wait after each command to make sure the changes are made - printf "Syncing chrony time to server: " - chronyc -a 'burst 4/4' && sleep 30 - printf "Forcing chrony to update the time: " - chronyc -a makestep && sleep 30 -} - checkin_at_boot() { local minion_config=/etc/salt/minion @@ -1055,16 +898,6 @@ download_elastic_agent_artifacts() { fi } -installer_progress_loop() { - local i=0 - local msg="${1:-Performing background actions...}" - while true; do - [[ $i -lt 98 ]] && ((i++)) - set_progress_str "$i" "$msg" nolog - [[ $i -gt 0 ]] && sleep 5s - done -} - installer_prereq_packages() { if [[ $is_deb ]]; then # Print message to stdout so the user knows setup is doing something @@ -1648,21 +1481,6 @@ network_setup() { logCmd "sed -i '/\$MNIC/${INTERFACE}/g' /etc/NetworkManager/dispatcher.d/pre-up.d/99-so-checksum-offload-disable" } -ntp_pillar_entries() { - - local pillar_file=$local_salt_dir/pillar/minions/$MINION_ID.sls - - - if [[ ${#ntp_servers[@]} -gt 0 ]]; then - printf '%s\n'\ - "ntp:"\ - " servers:" > "$pillar_file" - for addr in "${ntp_servers[@]}"; do - printf '%s\n' " - '$addr'" >> "$pillar_file" - done - fi -} - parse_install_username() { # parse out the install username so things copy correctly INSTALLUSERNAME=${SUDO_USER:-${USER}} From ae45d40eca779ebb06403b3ce5ffabc1572f32e9 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 13:34:30 +0000 Subject: [PATCH 310/417] Add Sublime Platform ingest pipeline --- salt/elasticsearch/files/ingest/sublime | 34 +++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 salt/elasticsearch/files/ingest/sublime diff --git a/salt/elasticsearch/files/ingest/sublime b/salt/elasticsearch/files/ingest/sublime new file mode 100644 index 000000000..c26f93c01 --- /dev/null +++ b/salt/elasticsearch/files/ingest/sublime @@ -0,0 +1,34 @@ +{ + "description" : " Email alerts from Sublime", + "processors" : [ + { "set": { "field": "event.module", "value": "sublime" } }, + { "set": { "field": "event.dataset", "value": "alert" } }, + { "set": { "field": "event.severity", "value": 3, "override": true } }, + { "set": { "field": "rule.name", "value": "Sublime Platform: {{ flagged_rules.0.name }}", "override": true } }, + { "set": { "field": "sublime.message_group_id", "value": "{{ _id }}", "override": true } }, + { "set": { "field": "email.address", "value": "{{ messages.0.recipients.0.email }}", "override": true } }, + { "set": { "field": "email.forwarded_recipents", "value": "{{ messages.0.forwarded_receipients }}", "override": true } }, + { "set": { "field": "email.sender.address", "value": "{{ messages.0.sender.email }}", "override": true } }, + { "set": { "field": "email.subject", "value": "{{ messages.0.subject }}", "override": true } }, + { "set": { "field": "email.forwarded_at", "value": "{{ messages.0.forwarded_at }}", "override": true } }, + { "set": { "field": "email.created_at", "value": "{{ messages.0.created_at }}", "override": true } }, + { "set": { "field": "email.read_at", "value": "{{ messages.0.read_at }}", "override": true } }, + { "set": { "field": "email.replied_at", "value": "{{ messages.0.replied_at }}", "override": true } }, + { + "grok": { + "field": "sublime.request_url", + "patterns": ["^https://api.%{DATA:sublime_host}/v0%{GREEDYDATA}$"], + "ignore_failure": true + } + }, + + { "rename": { "field": "sublime_host", "target_field": "sublime.url", "ignore_missing": true } }, + { "rename": { "field": "data", "target_field": "sublime", "ignore_missing": true } }, + { "rename": { "field": "flagged_rules", "target_field": "sublime.flagged_rules", "ignore_missing": true } }, + { "rename": { "field": "organization_id", "target_field": "sublime.organization_id", "ignore_missing": true } }, + { "rename": { "field": "review_status", "target_field": "sublime.review_status", "ignore_missing": true } }, + { "rename": { "field": "state", "target_field": "sublime.state", "ignore_missing": true } }, + { "rename": { "field": "user_reports", "target_field": "sublime.user_reports", "ignore_missing": true } }, + { "pipeline": { "name": "common" } } + ] +} From 4dc64400c58b9de09f6ac811180a55208dcea7a6 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 13:36:32 +0000 Subject: [PATCH 311/417] Support document_id --- .../so/9805_output_elastic_agent.conf.jinja | 23 +++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/salt/logstash/pipelines/config/so/9805_output_elastic_agent.conf.jinja b/salt/logstash/pipelines/config/so/9805_output_elastic_agent.conf.jinja index 0a148155c..f7671e2b7 100644 --- a/salt/logstash/pipelines/config/so/9805_output_elastic_agent.conf.jinja +++ b/salt/logstash/pipelines/config/so/9805_output_elastic_agent.conf.jinja @@ -1,13 +1,16 @@ output { if "elastic-agent" in [tags] { - if [metadata][pipeline] { + if [metadata][pipeline] { + if [metadata][_id] { elasticsearch { hosts => "{{ GLOBALS.manager }}" ecs_compatibility => v8 data_stream => true user => "{{ ES_USER }}" password => "{{ ES_PASS }}" + document_id => "%{[metadata][_id]}" pipeline => "%{[metadata][pipeline]}" + silence_errors_in_log => ["version_conflict_engine_exception"] ssl => true ssl_certificate_verification => false } @@ -19,10 +22,22 @@ output { data_stream => true user => "{{ ES_USER }}" password => "{{ ES_PASS }}" + pipeline => "%{[metadata][pipeline]}" ssl => true - ssl_certificate_verification => false + ssl_certificate_verification => false } - } + } + } + else { + elasticsearch { + hosts => "{{ GLOBALS.manager }}" + ecs_compatibility => v8 + data_stream => true + user => "{{ ES_USER }}" + password => "{{ ES_PASS }}" + ssl => true + ssl_certificate_verification => false + } + } } } - From 51247be6b97583e15dad1af1c93ff031ef0a51a9 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 13:37:52 +0000 Subject: [PATCH 312/417] Sublime Platform integration defaults --- salt/elasticfleet/defaults.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index a17957e7c..ba779f5a0 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -90,3 +90,10 @@ elasticfleet: - zscaler_zia - zscaler_zpa - 1password + optional_integrations: + sublime_platform: + enabled_nodes: [] + api_key: + base_url: https://api.platform.sublimesecurity.com + poll_interval: 5m + limit: 100 From 23ee9c2bb02842b7cd745e7b667bc02ad4ea6328 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 13:41:40 +0000 Subject: [PATCH 313/417] Sublime Platform integration --- .../sublime_platform.json | 44 +++++++++++++++++++ salt/elasticfleet/soc_elasticfleet.yaml | 32 ++++++++++++++ 2 files changed, 76 insertions(+) create mode 100644 salt/elasticfleet/files/integrations-optional/sublime_platform.json diff --git a/salt/elasticfleet/files/integrations-optional/sublime_platform.json b/salt/elasticfleet/files/integrations-optional/sublime_platform.json new file mode 100644 index 000000000..8feedc879 --- /dev/null +++ b/salt/elasticfleet/files/integrations-optional/sublime_platform.json @@ -0,0 +1,44 @@ +{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED -%} +{%- from 'sensoroni/map.jinja' import SENSORONIMERGED -%} +{%- from 'vars/globals.map.jinja' import GLOBALS -%} +{%- raw -%} +{ + "package": { + "name": "httpjson", + "version": "" + }, + "name": "sublime-platform", + "namespace": "default", + "description": "", + "policy_id": "FleetServer_{%- endraw -%}{{ NAME }}{%- raw -%}", + "vars": {}, + "inputs": { + "generic-httpjson": { + "enabled": true, + "streams": { + "httpjson.generic": { + "enabled": true, + "vars": { + "request_method": "GET", + "processors": "- drop_event:\n when:\n not:\n contains: \n message: \"flagged_rules\"\n- decode_json_fields:\n fields: [\"message\"]\n document_id: id\n target: \"\"", + "enable_request_tracer": false, + "oauth_scopes": [], + "request_transforms": "- set:\n target: header.Authorization\n value: 'Bearer {% endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.api_key }}{%- raw -%}'\n- set:\n target: header.accept\n value: application/json\n- set:\n target: url.params.last_message_created_at[gte]\n value: '[[formatDate (now (parseDuration \"-{%- endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.poll_interval }}{%- raw -%}\")) \"2006-01-02T15:04:05Z\"]]'\n- set:\n target: url.params.reviewed\n value: false\n- set:\n target: url.params.flagged\n value: true\n- set:\n target: url.params.limit\n value: {% endraw %}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.limit }}{%- raw -%}", + "response_transforms": "", + "request_redirect_headers_ban_list": [], + "request_encode_as": "application/x-www-form-urlencoded", + "request_url": "{%- endraw -%}{{ ELASTICFLEETMERGED.optional_integrations.sublime_platform.base_url }}{%- raw -%}/v0/message-groups", + "response_split": "target: body.message_groups\ntype: array\nkeep_parent: false\ntransforms:\n - set:\n target: body.sublime.request_url\n value : '[[ .last_response.url.value ]]'", + "tags": [ + "forwarded" + ], + "pipeline": "sublime", + "data_stream.dataset": "sublime", + "request_interval": "1m" + } + } + } + } + } +} +{%- endraw -%} diff --git a/salt/elasticfleet/soc_elasticfleet.yaml b/salt/elasticfleet/soc_elasticfleet.yaml index af660358a..8cb975086 100644 --- a/salt/elasticfleet/soc_elasticfleet.yaml +++ b/salt/elasticfleet/soc_elasticfleet.yaml @@ -40,3 +40,35 @@ elasticfleet: helpLink: elastic-fleet.html sensitive: True advanced: True + optional_integrations: + sublime_platform: + enabled_nodes: + description: Determines if the Sublime Platform integration is enabled. + global: True + helpLink: elastic-fleet.html + advanced: True + forcedType: "[]string" + api_key: + description: API key for Sublime Platform. + global: False + helpLink: elastic-fleet.html + advanced: True + forcedType: string + base_url: + description: Base URL for Sublime Platform. + global: False + helpLink: elastic-fleet.html + advanced: True + forcedType: string + poll_interval: + description: Poll interval for alerts from Sublime Platform. + global: False + helpLink: elastic-fleet.html + advanced: True + forcedType: string + limit: + description: The maximum number of message groups to return from Sublime Platform. + global: False + helpLink: elastic-fleet.html + advanced: True + forcedType: int From 9701d0ac206382124b5e8ffe854267b30f48aa18 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 13:47:20 +0000 Subject: [PATCH 314/417] Optional integration Fleet configuration --- salt/elasticfleet/config.sls | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/salt/elasticfleet/config.sls b/salt/elasticfleet/config.sls index d2e357c91..29eb6e972 100644 --- a/salt/elasticfleet/config.sls +++ b/salt/elasticfleet/config.sls @@ -6,6 +6,7 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% from 'vars/globals.map.jinja' import GLOBALS %} {% if sls.split('.')[0] in allowed_states %} +{% set node_data = salt['pillar.get']('node_data') %} # Add EA Group elasticfleetgroup: @@ -92,6 +93,34 @@ eaintegration: - user: 947 - group: 939 +{% for minion in node_data %} +{% set role = node_data[minion]["role"] %} +{% if role in [ "fleet","heavynode", "manager","managersearch","standalone" ] %} +{% set optional_integrations = salt['pillar.get']('elasticfleet:optional_integrations', {}) %} +{% set integration_keys = salt['pillar.get']('elasticfleet:optional_integrations', {}).keys() %} +fleet_server_integrations_{{ minion }}: + file.directory: + - name: /opt/so/conf/elastic-fleet/integrations/FleetServer_{{ minion }} + - user: 947 + - group: 939 + - makedirs: True +{% for integration in integration_keys %} +{% set enabled_nodes = optional_integrations[integration]["enabled_nodes"] %} +{% if minion in enabled_nodes %} +optional_integrations_dynamic_{{ minion }}: + file.managed: + - name: /opt/so/conf/elastic-fleet/integrations/FleetServer_{{ minion }}/{{ integration }}.json + - source: salt://elasticfleet/files/integrations-optional/{{ integration }}.json + - user: 947 + - group: 939 + - template: jinja + - defaults: + NAME: {{ minion }} +{% endif %} +{% endfor %} +{% endif %} +{% endfor %} + ea-integrations-load: file.absent: - name: /opt/so/state/eaintegrations.txt @@ -99,6 +128,9 @@ ea-integrations-load: - file: eaintegration - file: eadynamicintegration - file: eapackageupgrade + {% for minion in node_data %} + - file: optional_integrations_dynamic_{{ minion }} + {% endfor %} {% endif %} {% else %} From 44e45843bfc5c9af30f54ea01306e2ee5a2408d2 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 13:52:38 +0000 Subject: [PATCH 315/417] Change optional integration Fleet configuration --- salt/elasticfleet/config.sls | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/salt/elasticfleet/config.sls b/salt/elasticfleet/config.sls index 29eb6e972..78fb1d412 100644 --- a/salt/elasticfleet/config.sls +++ b/salt/elasticfleet/config.sls @@ -93,6 +93,13 @@ eaintegration: - user: 947 - group: 939 +eaoptionalintegrationsdir: + file.directory: + - name: /opt/so/conf/elastic-fleet/integrations-optional + - user: 947 + - group: 939 + - makedirs: True + {% for minion in node_data %} {% set role = node_data[minion]["role"] %} {% if role in [ "fleet","heavynode", "manager","managersearch","standalone" ] %} @@ -100,16 +107,16 @@ eaintegration: {% set integration_keys = salt['pillar.get']('elasticfleet:optional_integrations', {}).keys() %} fleet_server_integrations_{{ minion }}: file.directory: - - name: /opt/so/conf/elastic-fleet/integrations/FleetServer_{{ minion }} + - name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }} - user: 947 - group: 939 - makedirs: True {% for integration in integration_keys %} {% set enabled_nodes = optional_integrations[integration]["enabled_nodes"] %} {% if minion in enabled_nodes %} -optional_integrations_dynamic_{{ minion }}: +optional_integrations_dynamic_{{ minion }}_{{ integration }}: file.managed: - - name: /opt/so/conf/elastic-fleet/integrations/FleetServer_{{ minion }}/{{ integration }}.json + - name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}/{{ integration }}.json - source: salt://elasticfleet/files/integrations-optional/{{ integration }}.json - user: 947 - group: 939 @@ -120,17 +127,13 @@ optional_integrations_dynamic_{{ minion }}: {% endfor %} {% endif %} {% endfor %} - ea-integrations-load: file.absent: - name: /opt/so/state/eaintegrations.txt - onchanges: - file: eaintegration - file: eadynamicintegration - - file: eapackageupgrade - {% for minion in node_data %} - - file: optional_integrations_dynamic_{{ minion }} - {% endfor %} + - file: /opt/so/conf/elastic-fleet/integrations-optional/* {% endif %} {% else %} From a0926b7b872ea091475e71b6476c3732f99256cd Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 13:59:24 +0000 Subject: [PATCH 316/417] Load optional integrations --- .../so-elastic-fleet-integration-policy-load | 24 +++++++++++++++++-- 1 file changed, 22 insertions(+), 2 deletions(-) diff --git a/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load b/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load index 44e7ccf2b..518d29d26 100644 --- a/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load +++ b/salt/elasticfleet/tools/sbin/so-elastic-fleet-integration-policy-load @@ -64,8 +64,28 @@ if [ ! -f /opt/so/state/eaintegrations.txt ]; then if [[ "$RETURN_CODE" != "1" ]]; then touch /opt/so/state/eaintegrations.txt fi + + # Fleet Server - Optional integrations + for INTEGRATION in /opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json + do + if ! [ "$INTEGRATION" == "/opt/so/conf/elastic-fleet/integrations-optional/FleetServer*/*.json" ]; then + FLEET_POLICY=`echo "$INTEGRATION"| cut -d'/' -f7` + printf "\n\nFleet Server Policy - Loading $INTEGRATION\n" + elastic_fleet_integration_check "$FLEET_POLICY" "$INTEGRATION" + if [ -n "$INTEGRATION_ID" ]; then + printf "\n\nIntegration $NAME exists - Updating integration\n" + elastic_fleet_integration_update "$INTEGRATION_ID" "@$INTEGRATION" + else + printf "\n\nIntegration does not exist - Creating integration\n" + if [ "$NAME" != "elasticsearch-logs" ]; then + elastic_fleet_integration_create "@$INTEGRATION" + fi + fi + fi + done + if [[ "$RETURN_CODE" != "1" ]]; then + touch /opt/so/state/eaintegrations.txt + fi else exit $RETURN_CODE fi - - From bca1194a468362f36749cbaeafcbb2f15c98b5b4 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 14:01:55 +0000 Subject: [PATCH 317/417] Sublime SOC Action --- salt/soc/defaults.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index 6d8ed5bfd..ceca9ef31 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -59,6 +59,12 @@ soc: target: _blank links: - 'https://www.virustotal.com/gui/search/{value}' + - name: Sublime Platform Email Review + description: Review email in Sublime Platform + icon: fa-external-link-alt + target: _blank + links: + - 'https://{:sublime.url}/messages/{:sublime.message_group_id}' eventFields: default: - soc_timestamp From 338146feddf21e1cb0e7faea232d37d318bab2d7 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 1 Nov 2023 10:19:56 -0400 Subject: [PATCH 318/417] fix repo update during soup for airgap --- salt/manager/tools/sbin/soup | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index f30c3f15d..263fab7d0 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -579,7 +579,7 @@ update_airgap_rules() { rsync -av $UPDATE_DIR/agrules/* /nsm/repo/rules/ } -update_centos_repo() { +update_airgap_repo() { # Update the files in the repo echo "Syncing new updates to /nsm/repo" rsync -av $AGREPO/* /nsm/repo/ @@ -821,6 +821,7 @@ main() { set -e if [[ $is_airgap -eq 0 ]]; then + update_airgap_repo yum clean all check_os_updates elif [[ $OS == 'oracle' ]]; then @@ -895,11 +896,6 @@ main() { update_airgap_rules fi - # Only update the repo if its airgap - if [[ $is_airgap -eq 0 && $UPGRADESALT -ne 1 ]]; then - update_centos_repo - fi - # since we don't run the backup.config_backup state on import we wont snapshot previous version states and pillars if [[ ! "$MINIONID" =~ "_import" ]]; then echo "" From 2b3e405b2d30361874f2a76f3ff4a4d8cc0b5b31 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 1 Nov 2023 10:41:40 -0400 Subject: [PATCH 319/417] Delete pillar/thresholding/pillar.usage --- pillar/thresholding/pillar.usage | 20 -------------------- 1 file changed, 20 deletions(-) delete mode 100644 pillar/thresholding/pillar.usage diff --git a/pillar/thresholding/pillar.usage b/pillar/thresholding/pillar.usage deleted file mode 100644 index 1626433b1..000000000 --- a/pillar/thresholding/pillar.usage +++ /dev/null @@ -1,20 +0,0 @@ -thresholding: - sids: - : - - threshold: - gen_id: - type: - track: - count: - seconds: - - rate_filter: - gen_id: - track: - count: - seconds: - new_action: - timeout: - - suppress: - gen_id: - track: - ip: From f62e02a47704a3a62b7deca1fc93321349067bd4 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 1 Nov 2023 10:42:29 -0400 Subject: [PATCH 320/417] Delete pillar/thresholding/pillar.example --- pillar/thresholding/pillar.example | 44 ------------------------------ 1 file changed, 44 deletions(-) delete mode 100644 pillar/thresholding/pillar.example diff --git a/pillar/thresholding/pillar.example b/pillar/thresholding/pillar.example deleted file mode 100644 index 705cb606c..000000000 --- a/pillar/thresholding/pillar.example +++ /dev/null @@ -1,44 +0,0 @@ -thresholding: - sids: - 8675309: - - threshold: - gen_id: 1 - type: threshold - track: by_src - count: 10 - seconds: 10 - - threshold: - gen_id: 1 - type: limit - track: by_dst - count: 100 - seconds: 30 - - rate_filter: - gen_id: 1 - track: by_rule - count: 50 - seconds: 30 - new_action: alert - timeout: 30 - - suppress: - gen_id: 1 - track: by_either - ip: 10.10.3.7 - 11223344: - - threshold: - gen_id: 1 - type: limit - track: by_dst - count: 10 - seconds: 10 - - rate_filter: - gen_id: 1 - track: by_src - count: 50 - seconds: 20 - new_action: pass - timeout: 60 - - suppress: - gen_id: 1 - track: by_src - ip: 10.10.3.0/24 From 655c88cd09874abe92106b3df38a8ae0225b4a55 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 16:47:51 +0000 Subject: [PATCH 321/417] Make sure enabled_nodes is populated --- salt/elasticfleet/config.sls | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/elasticfleet/config.sls b/salt/elasticfleet/config.sls index 78fb1d412..434b6db2d 100644 --- a/salt/elasticfleet/config.sls +++ b/salt/elasticfleet/config.sls @@ -112,6 +112,7 @@ fleet_server_integrations_{{ minion }}: - group: 939 - makedirs: True {% for integration in integration_keys %} +{% if 'enabled_nodes' in optional_integrations[integration]%} {% set enabled_nodes = optional_integrations[integration]["enabled_nodes"] %} {% if minion in enabled_nodes %} optional_integrations_dynamic_{{ minion }}_{{ integration }}: @@ -124,6 +125,7 @@ optional_integrations_dynamic_{{ minion }}_{{ integration }}: - defaults: NAME: {{ minion }} {% endif %} +{% endif %} {% endfor %} {% endif %} {% endfor %} From c32935e2e63fbf6e01bd7417e8afdf9453163993 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 17:02:43 +0000 Subject: [PATCH 322/417] Remove optional integration from configuration if not enabled --- salt/elasticfleet/config.sls | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/salt/elasticfleet/config.sls b/salt/elasticfleet/config.sls index 434b6db2d..02672d58f 100644 --- a/salt/elasticfleet/config.sls +++ b/salt/elasticfleet/config.sls @@ -124,6 +124,10 @@ optional_integrations_dynamic_{{ minion }}_{{ integration }}: - template: jinja - defaults: NAME: {{ minion }} +{% else %} +optional_integrations_dynamic_{{ minion }}_{{ integration }}_delete: + file.absent: + - name: /opt/so/conf/elastic-fleet/integrations-optional/FleetServer_{{ minion }}/{{ integration }}.json {% endif %} {% endif %} {% endfor %} From b3b67acf07a9d7099db56a3308b5c70942d4f9c7 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 1 Nov 2023 15:11:54 -0400 Subject: [PATCH 323/417] Add memory restrictions --- .../assigned_hostgroups.local.map.yaml | 1 - setup/so-functions | 27 ++++++++++++++----- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/files/firewall/assigned_hostgroups.local.map.yaml b/files/firewall/assigned_hostgroups.local.map.yaml index 07f389af0..be34cb0be 100644 --- a/files/firewall/assigned_hostgroups.local.map.yaml +++ b/files/firewall/assigned_hostgroups.local.map.yaml @@ -12,7 +12,6 @@ role: eval: fleet: heavynode: - helixsensor: idh: import: manager: diff --git a/setup/so-functions b/setup/so-functions index 56c4b29c1..a6d8c585a 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -562,7 +562,7 @@ check_requirements() { req_cores=4 req_nics=2 elif [[ $is_standalone ]]; then - req_mem=24 + req_mem=16 req_cores=4 req_nics=2 elif [[ $is_manager ]]; then @@ -586,7 +586,7 @@ check_requirements() { req_cores=4 req_nics=1 elif [[ $is_heavynode ]]; then - req_mem=24 + req_mem=16 req_cores=4 req_nics=2 elif [[ $is_idh ]]; then @@ -651,6 +651,17 @@ check_requirements() { if [[ $total_mem_hr -lt $req_mem ]]; then whiptail_requirements_error "memory" "${total_mem_hr} GB" "${req_mem} GB" + if [[ $is_standalone || $is_heavynode ]]; then + echo "This install type will fail with less than $req_mem GB of memory" + exit 0 + fi + fi + if [[ $is_standalone || $is_heavynode ]]; then + if [[ $total_mem_hr -gt 15 && $total_mem_hr -lt 24 ]]; then + low_mem=true + else + low_mem=false + fi fi } @@ -956,9 +967,7 @@ docker_seed_registry() { if ! [ -f /nsm/docker-registry/docker/registry.tar ]; then if [ "$install_type" == 'IMPORT' ]; then - container_list 'so-import' - elif [ "$install_type" == 'HELIXSENSOR' ]; then - container_list 'so-helix' + container_list 'so-import' else container_list fi @@ -1217,7 +1226,7 @@ ls_heapsize() { fi case "$install_type" in - 'MANAGERSEARCH' | 'HEAVYNODE' | 'HELIXSENSOR' | 'STANDALONE') + 'MANAGERSEARCH' | 'HEAVYNODE' | 'STANDALONE') LS_HEAP_SIZE='1000m' ;; 'EVAL') @@ -1699,7 +1708,11 @@ drop_install_options() { echo "INTERFACE=$INTERFACE" >> /opt/so/install.txt NODETYPE=${install_type^^} echo "NODETYPE=$NODETYPE" >> /opt/so/install.txt - echo "CORECOUNT=$lb_procs" >> /opt/so/install.txt + if [[ $low_mem == "true" ]]; then + echo "CORECOUNT=1" >> /opt/so/install.txt + else + echo "CORECOUNT=$lb_procs" >> /opt/so/install.txt + fi echo "LSHOSTNAME=$HOSTNAME" >> /opt/so/install.txt echo "LSHEAP=$LS_HEAP_SIZE" >> /opt/so/install.txt echo "CPUCORES=$num_cpu_cores" >> /opt/so/install.txt From cc93976db99d82c9303386df1ebc1effc6bda2ee Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 1 Nov 2023 15:17:23 -0400 Subject: [PATCH 324/417] Add memory restrictions --- setup/so-functions | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-functions b/setup/so-functions index a6d8c585a..6ad0947d1 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -652,7 +652,7 @@ check_requirements() { if [[ $total_mem_hr -lt $req_mem ]]; then whiptail_requirements_error "memory" "${total_mem_hr} GB" "${req_mem} GB" if [[ $is_standalone || $is_heavynode ]]; then - echo "This install type will fail with less than $req_mem GB of memory" + echo "This install type will fail with less than $req_mem GB of memory. Exiting setup." exit 0 fi fi From e6a0838e4cd2ac0bc8c42e686363a68d7f10d6f0 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Wed, 1 Nov 2023 15:26:24 -0400 Subject: [PATCH 325/417] Add memory restrictions --- setup/so-setup | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 543ac0156..db5df492d 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -676,7 +676,11 @@ if ! [[ -f $install_opt_file ]]; then export MAINIP=$MAINIP export PATCHSCHEDULENAME=$PATCHSCHEDULENAME export INTERFACE=$INTERFACE - export CORECOUNT=$lb_procs + if [[ $low_mem == "true" ]]; then + export CORECOUNT=1 + else + export CORECOUNT=$lb_procs + fi export LSHOSTNAME=$HOSTNAME export LSHEAP=$LS_HEAP_SIZE export CPUCORES=$num_cpu_cores From f33079f1e35792a31396f5064aa02dcbf690c2b7 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 1 Nov 2023 20:09:56 +0000 Subject: [PATCH 326/417] Make settings global --- salt/elasticfleet/soc_elasticfleet.yaml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/salt/elasticfleet/soc_elasticfleet.yaml b/salt/elasticfleet/soc_elasticfleet.yaml index 8cb975086..8685a96e5 100644 --- a/salt/elasticfleet/soc_elasticfleet.yaml +++ b/salt/elasticfleet/soc_elasticfleet.yaml @@ -43,32 +43,33 @@ elasticfleet: optional_integrations: sublime_platform: enabled_nodes: - description: Determines if the Sublime Platform integration is enabled. + description: Fleet nodes with the Sublime Platform integration enabled. Enter one per line. global: True helpLink: elastic-fleet.html advanced: True forcedType: "[]string" api_key: description: API key for Sublime Platform. - global: False + global: True helpLink: elastic-fleet.html advanced: True forcedType: string + sensitive: True base_url: description: Base URL for Sublime Platform. - global: False + global: True helpLink: elastic-fleet.html advanced: True forcedType: string poll_interval: description: Poll interval for alerts from Sublime Platform. - global: False + global: True helpLink: elastic-fleet.html advanced: True forcedType: string limit: description: The maximum number of message groups to return from Sublime Platform. - global: False + global: True helpLink: elastic-fleet.html advanced: True forcedType: int From 344dd7d61fac0c4802eed7baafa6f6c69ebb1381 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 1 Nov 2023 16:50:20 -0400 Subject: [PATCH 327/417] Add Elastic Fleet reset script --- .../tools/sbin_jinja/so-elastic-fleet-reset | 65 +++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100644 salt/manager/tools/sbin_jinja/so-elastic-fleet-reset diff --git a/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset b/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset new file mode 100644 index 000000000..197ea0912 --- /dev/null +++ b/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset @@ -0,0 +1,65 @@ +#!/bin/bash +# +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. +{% from 'vars/globals.map.jinja' import GLOBALS %} + + +. /usr/sbin/so-common + +require_manager + + +# Inform user we are about to remove Elastic Fleet data +echo +echo "This script will remove the current Elastic Fleet install & all of its data and rerun Elastic Fleet setup." +echo +echo "If you would like to proceed, please type "AGREE" and hit ENTER." +echo +# Read user input +read INPUT +if [ "$INPUT" != "AGREE" ] ; then exit 0; fi + + +printf "\nUninstalling all Elastic Agents on all Grid Nodes...\n\n" +salt \* cmd.run "elastic-agent uninstall -f" queue=True + +printf "\nStopping Fleet Container...\n" +so-elastic-fleet-stop --force + +printf "\nDeleting Fleet Data from Pillars...\n" +sed -i -z "s/elasticfleet:.*grid_enrollment_heavy.*'//" /opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls +sed -i "/fleet_grid_enrollment_token_general.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls +sed -i "/fleet_grid_enrollment_token_heavy.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls + +printf "\n\nDeleting Elastic Fleet data...\n\n" + +ALIASES=".fleet-servers .fleet-policies-leader .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest" +for ALIAS in ${ALIASES} +do + # Get all concrete indices from alias + INDXS=$(curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" | jq -r '.aliases[].indices[]') + + # Delete all resolved indices + for INDX in ${INDXS} + do + printf "\nDeleting $INDX \n" + curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE + done +done + +printf "\n\nRestarting Kibana..\n" +so-kibana-restart --force + +wait_for_web_response "http://localhost:5601/app/kibana" "Elastic" 300 "curl -K /opt/so/conf/elasticsearch/curl.config" + +printf "\nStarting Elastic Fleet Setup...\n" +so-elastic-fleet-setup + + +printf "\nRe-installing Elastic Agent on all Grid Nodes...\n\n" +salt \* state.apply elasticfleet.install_agent_grid queue=True + +printf "\nElastic Fleet Reset complete....\n" \ No newline at end of file From c230cf4eb7c197b554bd43dfbb8af11dd8144300 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Wed, 1 Nov 2023 17:00:32 -0400 Subject: [PATCH 328/417] Formatting --- salt/manager/tools/sbin_jinja/so-elastic-fleet-reset | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset b/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset index 197ea0912..35f867884 100644 --- a/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset +++ b/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset @@ -16,7 +16,7 @@ require_manager echo echo "This script will remove the current Elastic Fleet install & all of its data and rerun Elastic Fleet setup." echo -echo "If you would like to proceed, please type "AGREE" and hit ENTER." +echo "If you would like to proceed, please type AGREE and hit ENTER." echo # Read user input read INPUT @@ -50,7 +50,7 @@ do done done -printf "\n\nRestarting Kibana..\n" +printf "\n\nRestarting Kibana...\n" so-kibana-restart --force wait_for_web_response "http://localhost:5601/app/kibana" "Elastic" 300 "curl -K /opt/so/conf/elasticsearch/curl.config" From c8d89971198b7afec72d60a56febc855781b1880 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 2 Nov 2023 09:21:57 -0400 Subject: [PATCH 329/417] adjust log filter to include all hosts --- setup/so-verify | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/setup/so-verify b/setup/so-verify index e9a8a375c..98bda96be 100755 --- a/setup/so-verify +++ b/setup/so-verify @@ -25,7 +25,8 @@ log_has_errors() { # Ignore salt mast cached public key and minion failed to auth because this is a test # to see if the salt key had already been accepted. - # Ignore failed to connect to ::1 since we have most curls wrapped in a retry. + # Ignore failed to connect to since we have most curls wrapped in a retry and there are + # multiple mirrors available. # Ignore perl-Error- since that is the name of a Perl package SO installs. @@ -39,7 +40,7 @@ log_has_errors() { grep -E "FAILED|Failed|failed|ERROR|Result: False|Error is not recoverable" "$setup_log" | \ grep -vE "The Salt Master has cached the public key for this node" | \ grep -vE "Minion failed to authenticate with the master" | \ - grep -vE "Failed to connect to ::1" | \ + grep -vE "Failed to connect to " | \ grep -vE "Failed to set locale" | \ grep -vE "perl-Error-" | \ grep -vE "Failed:\s*?[0-9]+" | \ From 65735fc4d3ff484eb02b0e43912373823267bb4d Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 2 Nov 2023 09:54:01 -0400 Subject: [PATCH 330/417] Add eval and import roles --- salt/elasticfleet/config.sls | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticfleet/config.sls b/salt/elasticfleet/config.sls index 02672d58f..3ff74efcd 100644 --- a/salt/elasticfleet/config.sls +++ b/salt/elasticfleet/config.sls @@ -102,7 +102,7 @@ eaoptionalintegrationsdir: {% for minion in node_data %} {% set role = node_data[minion]["role"] %} -{% if role in [ "fleet","heavynode", "manager","managersearch","standalone" ] %} +{% if role in [ "eval","fleet","heavynode","import","manager","managersearch","standalone" ] %} {% set optional_integrations = salt['pillar.get']('elasticfleet:optional_integrations', {}) %} {% set integration_keys = salt['pillar.get']('elasticfleet:optional_integrations', {}).keys() %} fleet_server_integrations_{{ minion }}: From 1d2518310dfdc59cc05a199f619fe6063f93623a Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 2 Nov 2023 09:59:45 -0400 Subject: [PATCH 331/417] more log bypass --- salt/common/tools/sbin/so-log-check | 1 + setup/so-verify | 3 +++ 2 files changed, 4 insertions(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 10f7e8c89..101001be0 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -160,6 +160,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|soc.field." # known ingest type collisions issue with earlier versions of SO EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error parsing signature" # Malformed Suricata rule, from upstream provider EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sticky buffer has no matches" # Non-critical Suricata error + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to determine destination index stats" # Elastic transform temporary error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" diff --git a/setup/so-verify b/setup/so-verify index 98bda96be..e4d90b937 100755 --- a/setup/so-verify +++ b/setup/so-verify @@ -36,6 +36,8 @@ log_has_errors() { # This is ignored for Ubuntu # Failed to restart snapd.mounts-pre.target: Operation refused, unit snapd.mounts-pre.target # may be requested by dependency only (it is configured to refuse manual start/stop). + + # Exit code 100 failure is likely apt-get running in the background, we wait for it to unlock. grep -E "FAILED|Failed|failed|ERROR|Result: False|Error is not recoverable" "$setup_log" | \ grep -vE "The Salt Master has cached the public key for this node" | \ @@ -55,6 +57,7 @@ log_has_errors() { grep -vE "Login Failed Details" | \ grep -vE "response from daemon: unauthorized" | \ grep -vE "Reading first line of patchfile" | \ + grep -vE "Command failed with exit code 100; will retry" | \ grep -vE "Running scope as unit" &> "$error_log" if [[ $? -eq 0 ]]; then From 5388b92865bfc4c2903322ba5cadfcb54398932c Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 2 Nov 2023 10:20:32 -0400 Subject: [PATCH 332/417] Refactor & cleanup --- salt/common/tools/sbin/so-common | 4 ++ .../tools/sbin_jinja/so-elastic-fleet-setup | 11 +++++ .../tools/sbin_jinja/so-elastic-fleet-reset | 44 +++++++++++++------ setup/so-setup | 6 ++- 4 files changed, 49 insertions(+), 16 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index bfa61f1b7..8089db28b 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -551,6 +551,10 @@ set_version() { fi } +status () { + printf "\n=========================================================================\n$(date) | $1\n=========================================================================\n" +} + systemctl_func() { local action=$1 local echo_action=$1 diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index 83a155ae6..d908d1df7 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -8,8 +8,19 @@ INTCA=/etc/pki/tls/certs/intca.crt +. /usr/sbin/so-common . /usr/sbin/so-elastic-fleet-common +# Check to make sure that Kibana API is up & ready +RETURN_CODE=0 +wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config" +RETURN_CODE=$? + +if [[ "$RETURN_CODE" != "0" ]]; then + printf "Kibana API not accessible, exiting Elastic Fleet setup..." + exit 1 +fi + printf "\n### Create ES Token ###\n" ESTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/service_tokens" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq -r .value) diff --git a/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset b/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset index 35f867884..5a7be9e60 100644 --- a/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset +++ b/salt/manager/tools/sbin_jinja/so-elastic-fleet-reset @@ -11,30 +11,39 @@ require_manager - # Inform user we are about to remove Elastic Fleet data echo echo "This script will remove the current Elastic Fleet install & all of its data and rerun Elastic Fleet setup." echo -echo "If you would like to proceed, please type AGREE and hit ENTER." +echo "If you would like to proceed, type AGREE and hit ENTER." echo # Read user input read INPUT -if [ "$INPUT" != "AGREE" ] ; then exit 0; fi +if [ "${INPUT^^}" != 'AGREE' ]; then exit 0; fi -printf "\nUninstalling all Elastic Agents on all Grid Nodes...\n\n" +status "Uninstalling all Elastic Agents on all Grid Nodes..." salt \* cmd.run "elastic-agent uninstall -f" queue=True -printf "\nStopping Fleet Container...\n" +status "Stopping Fleet Container..." so-elastic-fleet-stop --force -printf "\nDeleting Fleet Data from Pillars...\n" +status "Deleting Fleet Data from Pillars..." sed -i -z "s/elasticfleet:.*grid_enrollment_heavy.*'//" /opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls sed -i "/fleet_grid_enrollment_token_general.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls sed -i "/fleet_grid_enrollment_token_heavy.*/d" /opt/so/saltstack/local/pillar/global/soc_global.sls -printf "\n\nDeleting Elastic Fleet data...\n\n" +status "Deleting Elastic Fleet data..." + +# Check to make sure that Elasticsearch is up & ready +RETURN_CODE=0 +wait_for_web_response "https://localhost:9200/_cat/indices/.kibana*" "green open" 300 "curl -K /opt/so/conf/elasticsearch/curl.config" +RETURN_CODE=$? + +if [[ "$RETURN_CODE" != "0" ]]; then + status "Elasticsearch not accessible, exiting script..." + exit 1 +fi ALIASES=".fleet-servers .fleet-policies-leader .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest" for ALIAS in ${ALIASES} @@ -45,21 +54,28 @@ do # Delete all resolved indices for INDX in ${INDXS} do - printf "\nDeleting $INDX \n" + status "Deleting $INDX" curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE done done -printf "\n\nRestarting Kibana...\n" +status "Restarting Kibana..." so-kibana-restart --force -wait_for_web_response "http://localhost:5601/app/kibana" "Elastic" 300 "curl -K /opt/so/conf/elasticsearch/curl.config" +status "Checking to make sure that Kibana API is up & ready..." +RETURN_CODE=0 +wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config" +RETURN_CODE=$? -printf "\nStarting Elastic Fleet Setup...\n" +if [[ "$RETURN_CODE" != "0" ]]; then + status "Kibana API not accessible, exiting script..." + exit 1 +fi + +status "Starting Elastic Fleet Setup..." so-elastic-fleet-setup - -printf "\nRe-installing Elastic Agent on all Grid Nodes...\n\n" +status "Re-installing Elastic Agent on all Grid Nodes..." salt \* state.apply elasticfleet.install_agent_grid queue=True -printf "\nElastic Fleet Reset complete....\n" \ No newline at end of file +status "Elastic Fleet Reset complete...." \ No newline at end of file diff --git a/setup/so-setup b/setup/so-setup index 543ac0156..2ea2809d5 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -768,8 +768,10 @@ if ! [[ -f $install_opt_file ]]; then info "Restarting SOC to pick up initial user" logCmd "so-soc-restart" title "Setting up Elastic Fleet" - logCmd "salt-call state.apply elasticfleet.config" - logCmd "so-elastic-fleet-setup" + logCmd "salt-call state.apply elasticfleet.config" + if ! logCmd so-setup-elastic-fleet; then + fail_setup + fi if [[ ! $is_import ]]; then title "Setting up Playbook" logCmd "so-playbook-reset" From 6c4dc7cc090d63faf93eeb952a1dcbdaf81c9c38 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Thu, 2 Nov 2023 10:23:03 -0400 Subject: [PATCH 333/417] fix UPGRADECOMMAND used for distrib salt upgrade. remove unneeded vars --- salt/salt/map.jinja | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/salt/salt/map.jinja b/salt/salt/map.jinja index 131ff46ca..5f687ef3f 100644 --- a/salt/salt/map.jinja +++ b/salt/salt/map.jinja @@ -5,22 +5,18 @@ {% set SPLITCHAR = '+' %} {% set SALTNOTHELD = salt['cmd.run']('apt-mark showhold | grep -q salt ; echo $?', python_shell=True) %} {% set SALTPACKAGES = ['salt-common', 'salt-master', 'salt-minion'] %} - {% set SALT_STATE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/states' %} - {% set SALT_MODULE_CODE_PATH = '/usr/lib/python3/dist-packages/salt/modules' %} {% set SYSTEMD_UNIT_FILE = '/lib/systemd/system/salt-minion.service' %} {% else %} {% set SPLITCHAR = '-' %} {% set SALTNOTHELD = salt['cmd.run']('yum versionlock list | grep -q salt ; echo $?', python_shell=True) %} {% set SALTPACKAGES = ['salt', 'salt-master', 'salt-minion'] %} - {% set SALT_STATE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/states' %} - {% set SALT_MODULE_CODE_PATH = '/usr/lib/python3.6/site-packages/salt/modules' %} {% set SYSTEMD_UNIT_FILE = '/usr/lib/systemd/system/salt-minion.service' %} {% endif %} {% set INSTALLEDSALTVERSION = grains.saltversion %} {% if grains.saltversion|string != SALTVERSION|string %} - {% if grains.os|lower in ['Rocky', 'redhat', 'CentOS Stream'] %} + {% if grains.os_family|lower == 'redhat' %} {% set UPGRADECOMMAND = 'yum clean all ; /usr/sbin/bootstrap-salt.sh -s 120 -r -F -x python3 stable ' ~ SALTVERSION %} {% elif grains.os_family|lower == 'debian' %} {% set UPGRADECOMMAND = '/usr/sbin/bootstrap-salt.sh -s 120 -F -x python3 stable ' ~ SALTVERSION %} From 2c0e287f8c320f3d0f1d800a0916e90d59a93ca7 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Thu, 2 Nov 2023 10:34:24 -0400 Subject: [PATCH 334/417] Fix name --- setup/so-setup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 2ea2809d5..26955b893 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -769,7 +769,7 @@ if ! [[ -f $install_opt_file ]]; then logCmd "so-soc-restart" title "Setting up Elastic Fleet" logCmd "salt-call state.apply elasticfleet.config" - if ! logCmd so-setup-elastic-fleet; then + if ! logCmd so-elastic-fleet-setup; then fail_setup fi if [[ ! $is_import ]]; then From e18e0fd69a982d317a656a3d8e15d2bcd0160c8e Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 2 Nov 2023 10:39:14 -0400 Subject: [PATCH 335/417] more log bypass --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 101001be0..0fd98a12e 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -161,6 +161,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error parsing signature" # Malformed Suricata rule, from upstream provider EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sticky buffer has no matches" # Non-critical Suricata error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to determine destination index stats" # Elastic transform temporary error + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving docker" # Telegraf unable to reach Docker engine, rare EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" From 32701b594187484a1944d6d4fc4072e822d5b468 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 2 Nov 2023 12:50:12 -0400 Subject: [PATCH 336/417] more log bypass --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 0fd98a12e..dc2b1d741 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -114,6 +114,7 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|failed to poll" # server not yet ready (sensoroni waiting on soc) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|minions returned with non" # server not yet ready (salt waiting on minions) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so_long_term" # server not yet ready (influxdb not yet setup) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|search_phase_execution_exception" # server not yet ready (elastalert running searches before ES is ready) fi if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then From 51e7861757e04ecb2a66bfb3790ebcebdbe5a11d Mon Sep 17 00:00:00 2001 From: weslambert Date: Thu, 2 Nov 2023 16:41:34 -0400 Subject: [PATCH 337/417] Don't source so-elastic-fleet-common if not there --- .../tools/sbin_jinja/so-elasticsearch-templates-load | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index aac6279fc..857da5434 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -9,7 +9,9 @@ . /usr/sbin/so-common {% if GLOBALS.role != 'so-heavynode' %} -. /usr/sbin/so-elastic-fleet-common +if [ -f /usr/sbin/so-elastic-fleet-common ]; then + . /usr/sbin/so-elastic-fleet-common +fi {% endif %} default_conf_dir=/opt/so/conf From 96fdfb382977de14430f18dcee522f441238b032 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Thu, 2 Nov 2023 16:46:41 -0400 Subject: [PATCH 338/417] ignore connectivity problems to docker containers during startup --- salt/common/tools/sbin/so-log-check | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index dc2b1d741..170ef9506 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -115,6 +115,8 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|minions returned with non" # server not yet ready (salt waiting on minions) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|so_long_term" # server not yet ready (influxdb not yet setup) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|search_phase_execution_exception" # server not yet ready (elastalert running searches before ES is ready) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving docker" # Telegraf unable to reach Docker engine, rare + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving container" # Telegraf unable to reach Docker engine, rare fi if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then @@ -162,7 +164,6 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error parsing signature" # Malformed Suricata rule, from upstream provider EXCLUDED_ERRORS="$EXCLUDED_ERRORS|sticky buffer has no matches" # Non-critical Suricata error EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Unable to determine destination index stats" # Elastic transform temporary error - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving docker" # Telegraf unable to reach Docker engine, rare EXCLUDED_ERRORS="$EXCLUDED_ERRORS|iteration" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|communication packets" EXCLUDED_ERRORS="$EXCLUDED_ERRORS|use of closed" From 3875970dc52de40d2e112082798f4285776383fd Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 2 Nov 2023 21:09:37 +0000 Subject: [PATCH 339/417] Add checkpoint and vsphere packages --- salt/elasticfleet/defaults.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index ba779f5a0..61ae5999f 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -35,6 +35,7 @@ elasticfleet: - azure - barracuda - carbonblack_edr + - checkpoint - cisco_asa - cisco_duo - cisco_meraki @@ -86,6 +87,7 @@ elasticfleet: - ti_otx - ti_recordedfuture - udp + - vsphere - windows - zscaler_zia - zscaler_zpa From 5bfef3f527bb530793ded8120fc91bdb728b03e3 Mon Sep 17 00:00:00 2001 From: Wes Date: Thu, 2 Nov 2023 21:10:01 +0000 Subject: [PATCH 340/417] Add checkpoint and vsphere templates --- salt/elasticsearch/defaults.yaml | 220 +++++++++++++++++++++++++++++++ 1 file changed, 220 insertions(+) diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index cd8ff9397..02c2529a6 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -1965,6 +1965,50 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-logs-checkpoint_x_firewall: + index_sorting: False + index_template: + index_patterns: + - "logs-checkpoint.firewall-*" + template: + settings: + index: + lifecycle: + name: so-logs-checkpoint.firewall-logs + number_of_replicas: 0 + composed_of: + - "logs-checkpoint.firewall@package" + - "logs-checkpoint.firewall@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logs-cisco_asa_x_log: index_sorting: false index_template: @@ -7798,6 +7842,50 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-logs-vsphere_x_log: + index_sorting: False + index_template: + index_patterns: + - "logs-vsphere.log-*" + template: + settings: + index: + lifecycle: + name: so-logs-vsphere.log-logs + number_of_replicas: 0 + composed_of: + - "logs-vsphere.log@package" + - "logs-vsphere.log@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logs-windows_x_forwarded: index_sorting: false index_template: @@ -8414,6 +8502,138 @@ elasticsearch: set_priority: priority: 50 min_age: 30d + so-metrics-vsphere_x_datastore: + index_sorting: False + index_template: + index_patterns: + - "metrics-vsphere.datastore-*" + template: + settings: + index: + lifecycle: + name: so-metrics-vsphere.datastore-logs + number_of_replicas: 0 + composed_of: + - "metrics-vsphere.datastore@package" + - "metrics-vsphere.datastore@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-metrics-vsphere_x_host: + index_sorting: False + index_template: + index_patterns: + - "metrics-vsphere.host-*" + template: + settings: + index: + lifecycle: + name: so-metrics-vsphere.host-logs + number_of_replicas: 0 + composed_of: + - "metrics-vsphere.host@package" + - "metrics-vsphere.host@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d + so-metrics-vsphere_x_virtualmachine: + index_sorting: False + index_template: + index_patterns: + - "metrics-vsphere.virtualmachine-*" + template: + settings: + index: + lifecycle: + name: so-metrics-vsphere.virtualmachine-logs + number_of_replicas: 0 + composed_of: + - "metrics-vsphere.virtualmachine@package" + - "metrics-vsphere.virtualmachine@custom" + - "so-fleet_globals-1" + - "so-fleet_agent_id_verification-1" + priority: 501 + data_stream: + hidden: false + allow_custom_routing: false + policy: + phases: + cold: + actions: + set_priority: + priority: 0 + min_age: 30d + delete: + actions: + delete: {} + min_age: 365d + hot: + actions: + rollover: + max_age: 30d + max_primary_shard_size: 50gb + set_priority: + priority: 100 + min_age: 0ms + warm: + actions: + set_priority: + priority: 50 + min_age: 30d so-logstash: index_sorting: false index_template: From 8c7767b381462ae911abd7844e9261475c4b3bec Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 3 Nov 2023 08:41:33 -0400 Subject: [PATCH 341/417] Dont overwrite metadata --- .../pipelines/config/so/0012_input_elastic_agent.conf.jinja | 3 +++ .../pipelines/config/so/0013_input_lumberjack_fleet.conf | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/salt/logstash/pipelines/config/so/0012_input_elastic_agent.conf.jinja b/salt/logstash/pipelines/config/so/0012_input_elastic_agent.conf.jinja index 035436a96..6ba29f8e5 100644 --- a/salt/logstash/pipelines/config/so/0012_input_elastic_agent.conf.jinja +++ b/salt/logstash/pipelines/config/so/0012_input_elastic_agent.conf.jinja @@ -11,7 +11,10 @@ input { } } filter { +if ![metadata] { mutate { rename => {"@metadata" => "metadata"} } } +} + diff --git a/salt/logstash/pipelines/config/so/0013_input_lumberjack_fleet.conf b/salt/logstash/pipelines/config/so/0013_input_lumberjack_fleet.conf index 0377a81c4..fd9a87a22 100644 --- a/salt/logstash/pipelines/config/so/0013_input_lumberjack_fleet.conf +++ b/salt/logstash/pipelines/config/so/0013_input_lumberjack_fleet.conf @@ -13,10 +13,11 @@ input { filter { - if "fleet-lumberjack-input" in [tags] { +if ![metadata] { mutate { rename => {"@metadata" => "metadata"} } } } + From 1a3d4a2051bcd0f1379ccbb05145f6a4d519f7d2 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Fri, 3 Nov 2023 09:14:26 -0400 Subject: [PATCH 342/417] ignore malformed open canary log lines --- salt/common/tools/sbin/so-log-check | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index 170ef9506..d3aff6c14 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -140,6 +140,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0 + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Canary running" # false positive (Open Canary logging out blank IP addresses) fi if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then From a8b0e41dbe70dfae77b2cfb116dc48df276cb819 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 3 Nov 2023 11:04:52 -0400 Subject: [PATCH 343/417] exit 0 --- salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index d908d1df7..9c06cb7c2 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -131,3 +131,4 @@ salt-call state.apply elasticfleet queue=True # Generate installers & install Elastic Agent on the node so-elastic-agent-gen-installers salt-call state.apply elasticfleet.install_agent_grid queue=True +exit 0 \ No newline at end of file From 3d8663db66e0f832fc022fb5e687107c72e14787 Mon Sep 17 00:00:00 2001 From: Mike Reeves Date: Fri, 3 Nov 2023 11:29:45 -0400 Subject: [PATCH 344/417] Update soc_elasticsearch.yaml --- salt/elasticsearch/soc_elasticsearch.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index a5170b776..ce795fe5a 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -463,7 +463,7 @@ elasticsearch: so-syslog: *indexSettings so-zeek: *indexSettings so_roles: - so-manger: &soroleSettings + so-manager: &soroleSettings config: node: roles: From 9d2b84818f2b7bb4bd20b601d52aead6653e072b Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Fri, 3 Nov 2023 15:00:13 -0400 Subject: [PATCH 345/417] apply es and soc states to manager if new search or hn are added --- salt/manager/tools/sbin/so-minion | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/so-minion b/salt/manager/tools/sbin/so-minion index eca96da5c..1baf88cad 100755 --- a/salt/manager/tools/sbin/so-minion +++ b/salt/manager/tools/sbin/so-minion @@ -631,8 +631,15 @@ if [[ "$OPERATION" == 'add' || "$OPERATION" == 'setup' ]]; then # and they need to wait for ca creation to update the mine updateMine checkMine "network.ip_addrs" + # apply the elasticsearch state to the manager if a new searchnode was added + if [[ "$NODETYPE" == "SEARCHNODE" || "$NODETYPE" == "HEAVYNODE" ]]; then + # calls so-common and set_minionid sets MINIONID to local minion id + set_minionid + salt $MINIONID state.apply elasticsearch queue=True --async + salt $MINIONID state.apply soc queue=True --async + fi # run this async so the cli doesn't wait for a return - salt "$MINION_ID" state.highstate --async + salt "$MINION_ID" state.highstate --async queue=True fi fi From 0086c247296b226dde56c83271ac47664672bb89 Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Fri, 3 Nov 2023 15:21:06 -0400 Subject: [PATCH 346/417] Upgrade Elastic Agent --- salt/manager/tools/sbin/soup | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 263fab7d0..27c6cb98d 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -431,8 +431,7 @@ post_to_2.4.4() { } post_to_2.4.5() { - echo "Regenerating Elastic Agent Installers" - /sbin/so-elastic-agent-gen-installers + echo "Nothing to apply" POSTVERSION=2.4.5 } @@ -449,7 +448,8 @@ post_to_2.4.20() { } post_to_2.4.30() { - echo "Nothing to apply" + echo "Regenerating Elastic Agent Installers" + /sbin/so-elastic-agent-gen-installers POSTVERSION=2.4.30 } @@ -511,7 +511,7 @@ up_to_2.4.4() { } up_to_2.4.5() { - determine_elastic_agent_upgrade + echo "Nothing to do for 2.4.5" INSTALLEDVERSION=2.4.5 } @@ -529,7 +529,7 @@ up_to_2.4.20() { } up_to_2.4.30() { - echo "Nothing to do for 2.4.30" + determine_elastic_agent_upgrade INSTALLEDVERSION=2.4.30 } From 7a0b21647f40f8217da0941917c3681237a210c8 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Sat, 4 Nov 2023 10:05:37 -0400 Subject: [PATCH 347/417] disregard false positives --- salt/common/tools/sbin/so-log-check | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index d3aff6c14..dc84ba5bd 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -117,6 +117,8 @@ if [[ $EXCLUDE_STARTUP_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|search_phase_execution_exception" # server not yet ready (elastalert running searches before ES is ready) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving docker" # Telegraf unable to reach Docker engine, rare EXCLUDED_ERRORS="$EXCLUDED_ERRORS|timeout retrieving container" # Telegraf unable to reach Docker engine, rare + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|error while communicating" # Elasticsearch MS -> HN "sensor" temporarily unavailable + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tls handshake error" # Docker registry container when new node comes onlines fi if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then @@ -140,7 +142,7 @@ if [[ $EXCLUDE_FALSE_POSITIVE_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|example" # false positive (example test data) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|status 200" # false positive (request successful, contained error string in content) EXCLUDED_ERRORS="$EXCLUDED_ERRORS|app_layer.error" # false positive (suricata 7) in stats.log e.g. app_layer.error.imap.parser | Total | 0 - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|Canary running" # false positive (Open Canary logging out blank IP addresses) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|is not an ip string literal" # false positive (Open Canary logging out blank IP addresses) fi if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then From f6cd35e1435c49d1bca5078409ca3e92a1bb404f Mon Sep 17 00:00:00 2001 From: Josh Brower Date: Mon, 6 Nov 2023 08:03:31 -0500 Subject: [PATCH 348/417] Set execute permissions --- salt/elasticfleet/config.sls | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticfleet/config.sls b/salt/elasticfleet/config.sls index 3ff74efcd..5d5d3e826 100644 --- a/salt/elasticfleet/config.sls +++ b/salt/elasticfleet/config.sls @@ -68,6 +68,7 @@ eapackageupgrade: - source: salt://elasticfleet/tools/sbin_jinja/so-elastic-fleet-package-upgrade - user: 947 - group: 939 + - mode: 755 - template: jinja {% if GLOBALS.role != "so-fleet" %} From 74eda68d8424472460de791d0ec5321ba4a31293 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 Nov 2023 13:16:35 +0000 Subject: [PATCH 349/417] Exit if unable to communicate with Elasticsearch --- salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines | 1 + 1 file changed, 1 insertion(+) diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines index 350ac97c5..f6f56eaf1 100755 --- a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines @@ -37,6 +37,7 @@ if [ ! -f /opt/so/state/espipelines.txt ]; then echo echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" echo + exit 1 fi cd ${ELASTICSEARCH_INGEST_PIPELINES} From c30a0d5b5b0bc43df6a0a4df12c38a5c6c583383 Mon Sep 17 00:00:00 2001 From: Wes Date: Mon, 6 Nov 2023 14:29:01 +0000 Subject: [PATCH 350/417] Better error handling and state file management --- .../tools/sbin/so-elasticsearch-pipelines | 45 +++++++++++-------- 1 file changed, 27 insertions(+), 18 deletions(-) diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines index f6f56eaf1..ff826d2c9 100755 --- a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines @@ -6,7 +6,6 @@ . /usr/sbin/so-common - RETURN_CODE=0 ELASTICSEARCH_HOST=$1 ELASTICSEARCH_PORT=9200 @@ -15,41 +14,51 @@ ELASTICSEARCH_PORT=9200 ELASTICSEARCH_INGEST_PIPELINES="/opt/so/conf/elasticsearch/ingest/" # Wait for ElasticSearch to initialize - if [ ! -f /opt/so/state/espipelines.txt ]; then +echo "State file /opt/so/state/espipelines.txt not found. Running so-elasticsearch-pipelines." echo -n "Waiting for ElasticSearch..." COUNT=0 ELASTICSEARCH_CONNECTED="no" while [[ "$COUNT" -le 240 ]]; do curl -K /opt/so/conf/elasticsearch/curl.config -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" - if [ $? -eq 0 ]; then - ELASTICSEARCH_CONNECTED="yes" - echo "connected!" - break - else - ((COUNT+=1)) - sleep 1 - echo -n "." - fi + if [ $? -eq 0 ]; then + ELASTICSEARCH_CONNECTED="yes" + echo "connected!" + break + else + ((COUNT+=1)) + sleep 1 + echo -n "." + fi done if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then - echo - echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" - echo + echo + echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" + echo exit 1 fi cd ${ELASTICSEARCH_INGEST_PIPELINES} - echo "Loading pipelines..." - for i in .[a-z]* *; do echo $i; RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); echo $RESPONSE; if [[ "$RESPONSE" == *"error"* ]]; then RETURN_CODE=1; fi; done + for i in .[a-z]* *; + do echo $i; + RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); + ERRORS=$(echo $RESPONSE | grep -E "Connection attempt timed out|error"); + if ! [ -z "$ERRORS" ]; then + echo $ERRORS; + RETURN_CODE=1; + fi; + done echo cd - >/dev/null + if [[ "$RETURN_CODE" != "1" ]]; then touch /opt/so/state/espipelines.txt - fi -else + else + echo "Errors were detected. This script will run again during the next application of the state." + fi +else exit $RETURN_CODE fi From b99c7ce76ee769635baeb6fb815430b1cad2c706 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Mon, 6 Nov 2023 11:22:35 -0500 Subject: [PATCH 351/417] improve verbosity of setup logs --- setup/so-setup | 1 + setup/so-verify | 8 +++++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/setup/so-setup b/setup/so-setup index 691d52a2c..a4e67535b 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -774,6 +774,7 @@ if ! [[ -f $install_opt_file ]]; then title "Setting up Elastic Fleet" logCmd "salt-call state.apply elasticfleet.config" if ! logCmd so-elastic-fleet-setup; then + error "Failed to run so-elastic-fleet-setup" fail_setup fi if [[ ! $is_import ]]; then diff --git a/setup/so-verify b/setup/so-verify index e4d90b937..3f00cc420 100755 --- a/setup/so-verify +++ b/setup/so-verify @@ -61,8 +61,11 @@ log_has_errors() { grep -vE "Running scope as unit" &> "$error_log" if [[ $? -eq 0 ]]; then + # This function succeeds (returns 0) if errors are detected return 0 fi + + # No errors found, return 1 (function failed to find errors) return 1 } @@ -121,7 +124,10 @@ main() { echo "WARNING: Failed setup a while ago" exit_code=1 elif log_has_errors; then - echo "WARNING: Errors detected during setup" + echo "WARNING: Errors detected during setup." + echo "--------- ERRORS ---------" + cat $error_log + echo "--------------------------" exit_code=1 touch /root/failure elif using_iso && cron_error_in_mail_spool; then From cce80eb2fbf56bdcb73f067ee2912c72bb09c78f Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 7 Nov 2023 09:02:48 -0500 Subject: [PATCH 352/417] Change pipeline to 1.8.0 --- .../component/elastic-agent/logs-elastic_agent@package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json index 57dc73c66..d0f4fbee5 100644 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json +++ b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json @@ -5,7 +5,7 @@ "name": "logs" }, "codec": "best_compression", - "default_pipeline": "logs-elastic_agent-1.7.0", + "default_pipeline": "logs-elastic_agent-1.8.0", "mapping": { "total_fields": { "limit": "10000" From 0b4a246ddbaef67effb5e33affdee35cd11deeac Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 7 Nov 2023 16:44:42 +0000 Subject: [PATCH 353/417] State file changes and retry logic --- salt/elasticsearch/enabled.sls | 10 +- .../tools/sbin/so-elasticsearch-pipelines | 30 +++- .../so-elasticsearch-templates-load | 154 ++++++++++++++---- 3 files changed, 150 insertions(+), 44 deletions(-) diff --git a/salt/elasticsearch/enabled.sls b/salt/elasticsearch/enabled.sls index fa0f824b4..f7ab7749f 100644 --- a/salt/elasticsearch/enabled.sls +++ b/salt/elasticsearch/enabled.sls @@ -110,7 +110,7 @@ escomponenttemplates: - group: 939 - clean: True - onchanges_in: - - cmd: so-elasticsearch-templates + - file: so-elasticsearch-templates-reload # Auto-generate templates from defaults file {% for index, settings in ES_INDEX_SETTINGS.items() %} @@ -123,7 +123,7 @@ es_index_template_{{index}}: TEMPLATE_CONFIG: {{ settings.index_template }} - template: jinja - onchanges_in: - - cmd: so-elasticsearch-templates + - file: so-elasticsearch-templates-reload {% endif %} {% endfor %} @@ -142,7 +142,7 @@ es_template_{{TEMPLATE.split('.')[0] | replace("/","_") }}: - user: 930 - group: 939 - onchanges_in: - - cmd: so-elasticsearch-templates + - file: so-elasticsearch-templates-reload {% endfor %} {% endif %} @@ -167,6 +167,10 @@ so-elasticsearch-ilm-policy-load: - onchanges: - file: so-elasticsearch-ilm-policy-load-script +so-elasticsearch-templates-reload: + file.absent: + - name: /opt/so/state/estemplates.txt + so-elasticsearch-templates: cmd.run: - name: /usr/sbin/so-elasticsearch-templates-load diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines index ff826d2c9..2ddc5fa52 100755 --- a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines @@ -41,19 +41,31 @@ echo "State file /opt/so/state/espipelines.txt not found. Running so-elasticsear cd ${ELASTICSEARCH_INGEST_PIPELINES} echo "Loading pipelines..." - for i in .[a-z]* *; - do echo $i; - RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); - ERRORS=$(echo $RESPONSE | grep -E "Connection attempt timed out|error"); - if ! [ -z "$ERRORS" ]; then - echo $ERRORS; - RETURN_CODE=1; - fi; + for i in .[a-z]* *; + do + echo $i; + SUCCESSFUL="no" + while [[ "$TRYCOUNT" -le 4 ]]; do + RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); + if [ "$RESPONSE" == '{"acknowledged":true}' ]; then + SUCCESSFUL="yes" + break + else + ((TRYCOUNT+=1)) + sleep 5 + echo -n "Attempt $TRYCOUNT/5 unsuccessful..." + fi + done + if ! [ "$SUCCESSFUL" == "yes" ];then + echo -n "Could not load pipeline." + echo -n "$RESPONSE" + exit 1 + fi done echo cd - >/dev/null - + if [[ "$RETURN_CODE" != "1" ]]; then touch /opt/so/state/espipelines.txt else diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index 857da5434..d1e5dc41a 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -7,25 +7,31 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %} -. /usr/sbin/so-common -{% if GLOBALS.role != 'so-heavynode' %} -if [ -f /usr/sbin/so-elastic-fleet-common ]; then - . /usr/sbin/so-elastic-fleet-common -fi -{% endif %} +RETURN_CODE=0 -default_conf_dir=/opt/so/conf +if [ ! -f /opt/so/state/estemplates.txt ]; then + echo "State file /opt/so/state/estemplates.txt not found. Running so-elasticsearch-templates-load." -# Define a default directory to load pipelines from -ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/" + . /usr/sbin/so-common -{% if GLOBALS.role == 'so-heavynode' %} -file="/opt/so/conf/elasticsearch/templates/index/so-common-template.json" -{% else %} -file="/usr/sbin/so-elastic-fleet-common" -{% endif %} + {% if GLOBALS.role != 'so-heavynode' %} + if [ -f /usr/sbin/so-elastic-fleet-common ]; then + . /usr/sbin/so-elastic-fleet-common + fi + {% endif %} -if [ -f "$file" ]; then + default_conf_dir=/opt/so/conf + + # Define a default directory to load pipelines from + ELASTICSEARCH_TEMPLATES="$default_conf_dir/elasticsearch/templates/" + + {% if GLOBALS.role == 'so-heavynode' %} + file="/opt/so/conf/elasticsearch/templates/index/so-common-template.json" + {% else %} + file="/usr/sbin/so-elastic-fleet-common" + {% endif %} + + if [ -f "$file" ]; then # Wait for ElasticSearch to initialize echo -n "Waiting for ElasticSearch..." COUNT=0 @@ -59,12 +65,32 @@ if [ -f "$file" ]; then exit 0 fi {% endif %} - set -e cd ${ELASTICSEARCH_TEMPLATES}/component/ecs echo "Loading ECS component templates..." - for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE-mappings"; so-elasticsearch-query _component_template/$TEMPLATE-mappings -d@$i -XPUT 2>/dev/null; echo; done + for i in *; do + TEMPLATE=$(echo $i | cut -d '.' -f1) + echo "$TEMPLATE-mappings" + SUCCESSFUL="no" + while [[ "$TRYCOUNT" -le 4 ]]; do + RESPONSE=$(so-elasticsearch-query _component_template/${TEMPLATE}-mappings -d@$i -XPUT 2>/dev/null); + if [ "$RESPONSE" == '{"acknowledged":true}' ]; then + SUCCESSFUL="yes" + break + else + ((TRYCOUNT+=1)) + sleep 5 + echo -n "Attempt $TRYCOUNT/5 unsuccessful..." + fi + done + if ! [ "$SUCCESSFUL" == "yes" ];then + echo -n "Could not load template." + echo -n "$RESPONSE" + exit 1 + fi + done + echo cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent @@ -74,13 +100,54 @@ if [ -f "$file" ]; then {% else %} component_pattern="*" {% endif %} - for i in $component_pattern; do TEMPLATE=${i::-5}; echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done - + for i in $component_pattern; do + TEMPLATE=${i::-5} + echo "$TEMPLATE" + SUCCESSFUL="no" + while [[ "$TRYCOUNT" -le 4 ]]; do + RESPONSE=$(so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null); + if [ "$RESPONSE" == '{"acknowledged":true}' ]; then + SUCCESSFUL="yes" + break + else + ((TRYCOUNT+=1)) + sleep 5 + echo -n "Attempt $TRYCOUNT/5 unsuccessful..." + fi + done + if ! [ "$SUCCESSFUL" == "yes" ];then + echo -n "Could not load template." + echo -n "$RESPONSE" + exit 1 + fi + done + echo + # Load SO-specific component templates cd ${ELASTICSEARCH_TEMPLATES}/component/so echo "Loading Security Onion component templates..." - for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE"; so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; echo; done + for i in *; do + TEMPLATE=$(echo $i | cut -d '.' -f1); + echo "$TEMPLATE" + SUCCESSFUL="no" + while [[ "$TRYCOUNT" -le 4 ]]; do + RESPONSE=$(so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null); + if [ "$RESPONSE" == '{"acknowledged":true}' ]; then + SUCCESSFUL="yes" + break + else + ((TRYCOUNT+=1)) + sleep 5 + echo -n "Attempt $TRYCOUNT/5 unsuccessful..." + fi + done + if ! [ "$SUCCESSFUL" == "yes" ];then + echo -n "Could not load template." + echo -n "$RESPONSE" + exit 1 + fi + done echo # Load SO index templates @@ -94,18 +161,41 @@ if [ -f "$file" ]; then pattern="*" {% endif %} for i in $pattern; do - TEMPLATE=${i::-14}; - echo "$TEMPLATE"; - so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT 2>/dev/null; - echo; + TEMPLATE=${i::-14} + echo "$TEMPLATE" + SUCCESSFUL="no" + while [[ "$TRYCOUNT" -le 4 ]]; do + RESPONSE=$(so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT 2>/dev/null); + if [ "$RESPONSE" == '{"acknowledged":true}' ]; then + SUCCESSFUL="yes" + break + else + ((TRYCOUNT+=1)) + sleep 5 + echo -n "Attempt $TRYCOUNT/5 unsuccessful..." + fi + done + if ! [ "$SUCCESSFUL" == "yes" ];then + echo -n "Could not load template." + echo -n "$RESPONSE" + exit 1 + fi done - echo + else + {% if GLOBALS.role == 'so-heavynode' %} + echo "Common template does not exist. Exiting..." + {% else %} + echo "Elastic Fleet not configured. Exiting..." + {% endif %} + RETURN_CODE=1 + exit 0 + fi + cd - >/dev/null + if [[ "$RETURN_CODE" != "1" ]]; then + touch /opt/so/state/estemplates.txt + else + echo "Errors were detected. This script will run again during the next application of the state." + fi else - {% if GLOBALS.role == 'so-heavynode' %} - echo "Common template does not exist. Exiting..." - {% else %} - echo "Elastic Fleet not configured. Exiting..." - {% endif %} - exit 0 + exit $RETURN_CODE fi - cd - >/dev/null From 1dcca0bfd37503f47130e30ff82bb94f08318c74 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 7 Nov 2023 12:17:51 -0500 Subject: [PATCH 354/417] Change pipeline to 1.13.1 --- .../component/elastic-agent/logs-elastic_agent@package.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json index d0f4fbee5..2390705f3 100644 --- a/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json +++ b/salt/elasticsearch/templates/component/elastic-agent/logs-elastic_agent@package.json @@ -5,7 +5,7 @@ "name": "logs" }, "codec": "best_compression", - "default_pipeline": "logs-elastic_agent-1.8.0", + "default_pipeline": "logs-elastic_agent-1.13.1", "mapping": { "total_fields": { "limit": "10000" From 1676c84f9c6ef6b2332c7668dfbe8d1d8f58cac0 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 7 Nov 2023 19:56:50 +0000 Subject: [PATCH 355/417] Use the retry function so-elasticsearch-query --- .../tools/sbin/so-elasticsearch-pipelines | 42 +--- .../so-elasticsearch-templates-load | 198 +++++------------- 2 files changed, 60 insertions(+), 180 deletions(-) diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines index 2ddc5fa52..4afc9bd4d 100755 --- a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines @@ -15,52 +15,16 @@ ELASTICSEARCH_INGEST_PIPELINES="/opt/so/conf/elasticsearch/ingest/" # Wait for ElasticSearch to initialize if [ ! -f /opt/so/state/espipelines.txt ]; then -echo "State file /opt/so/state/espipelines.txt not found. Running so-elasticsearch-pipelines." - + echo "State file /opt/so/state/espipelines.txt not found. Running so-elasticsearch-pipelines." echo -n "Waiting for ElasticSearch..." - COUNT=0 - ELASTICSEARCH_CONNECTED="no" - while [[ "$COUNT" -le 240 ]]; do - curl -K /opt/so/conf/elasticsearch/curl.config -k --output /dev/null --silent --head --fail -L https://"$ELASTICSEARCH_HOST":"$ELASTICSEARCH_PORT" - if [ $? -eq 0 ]; then - ELASTICSEARCH_CONNECTED="yes" - echo "connected!" - break - else - ((COUNT+=1)) - sleep 1 - echo -n "." - fi - done - if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then - echo - echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" - echo - exit 1 - fi + retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" cd ${ELASTICSEARCH_INGEST_PIPELINES} echo "Loading pipelines..." for i in .[a-z]* *; do echo $i; - SUCCESSFUL="no" - while [[ "$TRYCOUNT" -le 4 ]]; do - RESPONSE=$(curl -K /opt/so/conf/elasticsearch/curl.config -k -XPUT -L https://${ELASTICSEARCH_HOST}:${ELASTICSEARCH_PORT}/_ingest/pipeline/$i -H 'Content-Type: application/json' -d@$i 2>/dev/null); - if [ "$RESPONSE" == '{"acknowledged":true}' ]; then - SUCCESSFUL="yes" - break - else - ((TRYCOUNT+=1)) - sleep 5 - echo -n "Attempt $TRYCOUNT/5 unsuccessful..." - fi - done - if ! [ "$SUCCESSFUL" == "yes" ];then - echo -n "Could not load pipeline." - echo -n "$RESPONSE" - exit 1 - fi + retry 5 5 "so-elasticsearch-query _ingest/pipeline/$i -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load pipeline: $i" done echo diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index d1e5dc41a..c0d4f9cba 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -32,155 +32,71 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then {% endif %} if [ -f "$file" ]; then - # Wait for ElasticSearch to initialize - echo -n "Waiting for ElasticSearch..." - COUNT=0 - ELASTICSEARCH_CONNECTED="no" - while [[ "$COUNT" -le 240 ]]; do - so-elasticsearch-query / -k --output /dev/null --silent --head --fail - if [ $? -eq 0 ]; then - ELASTICSEARCH_CONNECTED="yes" - echo "connected!" - break - else - ((COUNT+=1)) - sleep 1 - echo -n "." - fi - done - if [ "$ELASTICSEARCH_CONNECTED" == "no" ]; then - echo - echo -e "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" - echo - exit 1 - fi + # Wait for ElasticSearch to initialize + echo -n "Waiting for ElasticSearch..." + retry 240 1 "so-elasticsearch-query / -k --output /dev/null --silent --head --fail" || fail "Connection attempt timed out. Unable to connect to ElasticSearch. \nPlease try: \n -checking log(s) in /var/log/elasticsearch/\n -running 'sudo docker ps' \n -running 'sudo so-elastic-restart'" + {% if GLOBALS.role != 'so-heavynode' %} + SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}') + INSTALLED=$(elastic_fleet_package_is_installed {{ SUPPORTED_PACKAGES[0] }} ) + if [ "$INSTALLED" != "installed" ]; then + echo + echo "Packages not yet installed." + echo + exit 0 + fi + {% endif %} - {% if GLOBALS.role != 'so-heavynode' %} - SESSIONCOOKIE=$(curl -s -K /opt/so/conf/elasticsearch/curl.config -c - -X GET http://localhost:5601/ | grep sid | awk '{print $7}') - INSTALLED=$(elastic_fleet_package_is_installed {{ SUPPORTED_PACKAGES[0] }} ) - if [ "$INSTALLED" != "installed" ]; then - echo - echo "Packages not yet installed." - echo - exit 0 - fi - {% endif %} + cd ${ELASTICSEARCH_TEMPLATES}/component/ecs - cd ${ELASTICSEARCH_TEMPLATES}/component/ecs + echo "Loading ECS component templates..." + for i in *; do + TEMPLATE=$(echo $i | cut -d '.' -f1) + echo "$TEMPLATE-mappings" + retry 5 5 "so-elasticsearch-query _component_template/${TEMPLATE}-mappings -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE-mappings" + done + echo - echo "Loading ECS component templates..." - for i in *; do - TEMPLATE=$(echo $i | cut -d '.' -f1) - echo "$TEMPLATE-mappings" - SUCCESSFUL="no" - while [[ "$TRYCOUNT" -le 4 ]]; do - RESPONSE=$(so-elasticsearch-query _component_template/${TEMPLATE}-mappings -d@$i -XPUT 2>/dev/null); - if [ "$RESPONSE" == '{"acknowledged":true}' ]; then - SUCCESSFUL="yes" - break - else - ((TRYCOUNT+=1)) - sleep 5 - echo -n "Attempt $TRYCOUNT/5 unsuccessful..." - fi - done - if ! [ "$SUCCESSFUL" == "yes" ];then - echo -n "Could not load template." - echo -n "$RESPONSE" - exit 1 - fi - done - echo + cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent - cd ${ELASTICSEARCH_TEMPLATES}/component/elastic-agent - - echo "Loading Elastic Agent component templates..." - {% if GLOBALS.role == 'so-heavynode' %} - component_pattern="so-*" - {% else %} - component_pattern="*" - {% endif %} - for i in $component_pattern; do - TEMPLATE=${i::-5} - echo "$TEMPLATE" - SUCCESSFUL="no" - while [[ "$TRYCOUNT" -le 4 ]]; do - RESPONSE=$(so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null); - if [ "$RESPONSE" == '{"acknowledged":true}' ]; then - SUCCESSFUL="yes" - break - else - ((TRYCOUNT+=1)) - sleep 5 - echo -n "Attempt $TRYCOUNT/5 unsuccessful..." - fi - done - if ! [ "$SUCCESSFUL" == "yes" ];then - echo -n "Could not load template." - echo -n "$RESPONSE" - exit 1 - fi - done - echo + echo "Loading Elastic Agent component templates..." + {% if GLOBALS.role == 'so-heavynode' %} + component_pattern="so-*" + {% else %} + component_pattern="*" + {% endif %} + for i in $component_pattern; do + TEMPLATE=${i::-5} + echo "$TEMPLATE" + retry 5 5 "so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" + done + echo - # Load SO-specific component templates - cd ${ELASTICSEARCH_TEMPLATES}/component/so + # Load SO-specific component templates + cd ${ELASTICSEARCH_TEMPLATES}/component/so - echo "Loading Security Onion component templates..." - for i in *; do - TEMPLATE=$(echo $i | cut -d '.' -f1); - echo "$TEMPLATE" - SUCCESSFUL="no" - while [[ "$TRYCOUNT" -le 4 ]]; do - RESPONSE=$(so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT 2>/dev/null); - if [ "$RESPONSE" == '{"acknowledged":true}' ]; then - SUCCESSFUL="yes" - break - else - ((TRYCOUNT+=1)) - sleep 5 - echo -n "Attempt $TRYCOUNT/5 unsuccessful..." - fi - done - if ! [ "$SUCCESSFUL" == "yes" ];then - echo -n "Could not load template." - echo -n "$RESPONSE" - exit 1 - fi - done - echo + echo "Loading Security Onion component templates..." + for i in *; do + TEMPLATE=$(echo $i | cut -d '.' -f1); + echo "$TEMPLATE" + retry 5 5 "so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" + done + echo - # Load SO index templates - cd ${ELASTICSEARCH_TEMPLATES}/index + # Load SO index templates + cd ${ELASTICSEARCH_TEMPLATES}/index - echo "Loading Security Onion index templates..." - shopt -s extglob - {% if GLOBALS.role == 'so-heavynode' %} - pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)" - {% else %} - pattern="*" - {% endif %} - for i in $pattern; do - TEMPLATE=${i::-14} - echo "$TEMPLATE" - SUCCESSFUL="no" - while [[ "$TRYCOUNT" -le 4 ]]; do - RESPONSE=$(so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT 2>/dev/null); - if [ "$RESPONSE" == '{"acknowledged":true}' ]; then - SUCCESSFUL="yes" - break - else - ((TRYCOUNT+=1)) - sleep 5 - echo -n "Attempt $TRYCOUNT/5 unsuccessful..." - fi - done - if ! [ "$SUCCESSFUL" == "yes" ];then - echo -n "Could not load template." - echo -n "$RESPONSE" - exit 1 - fi - done + echo "Loading Security Onion index templates..." + shopt -s extglob + {% if GLOBALS.role == 'so-heavynode' %} + pattern="!(*1password*|*aws*|*azure*|*cloudflare*|*elastic_agent*|*fim*|*github*|*google*|*osquery*|*system*|*windows*)" + {% else %} + pattern="*" + {% endif %} + for i in $pattern; do + TEMPLATE=${i::-14} + echo "$TEMPLATE" + retry 5 5 "so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" + done else {% if GLOBALS.role == 'so-heavynode' %} echo "Common template does not exist. Exiting..." From 7772657b4bc9342a2800560f4919fbff0e4d03d2 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 7 Nov 2023 21:06:35 +0000 Subject: [PATCH 356/417] Remove RETURN_CODE --- .../tools/sbin/so-elasticsearch-pipelines | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines index 4afc9bd4d..fa0e57fc4 100755 --- a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines @@ -6,7 +6,6 @@ . /usr/sbin/so-common -RETURN_CODE=0 ELASTICSEARCH_HOST=$1 ELASTICSEARCH_PORT=9200 @@ -30,11 +29,4 @@ if [ ! -f /opt/so/state/espipelines.txt ]; then cd - >/dev/null - if [[ "$RETURN_CODE" != "1" ]]; then - touch /opt/so/state/espipelines.txt - else - echo "Errors were detected. This script will run again during the next application of the state." - fi -else - exit $RETURN_CODE -fi + touch /opt/so/state/espipelines.txt From 570624da7eb806bc01a1f7b4aec6cebeaa262be0 Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 7 Nov 2023 21:09:29 +0000 Subject: [PATCH 357/417] Remove RETURN_CODE --- .../sbin_jinja/so-elasticsearch-templates-load | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index c0d4f9cba..d870ada90 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -7,8 +7,6 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %} -RETURN_CODE=0 - if [ ! -f /opt/so/state/estemplates.txt ]; then echo "State file /opt/so/state/estemplates.txt not found. Running so-elasticsearch-templates-load." @@ -103,15 +101,9 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then {% else %} echo "Elastic Fleet not configured. Exiting..." {% endif %} - RETURN_CODE=1 exit 0 fi + cd - >/dev/null - if [[ "$RETURN_CODE" != "1" ]]; then - touch /opt/so/state/estemplates.txt - else - echo "Errors were detected. This script will run again during the next application of the state." - fi -else - exit $RETURN_CODE -fi + + touch /opt/so/state/estemplates.txt From 69ec1987af24a2e36a24e649e667f7ad8dc0f3f6 Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 7 Nov 2023 17:28:37 -0500 Subject: [PATCH 358/417] Fix if statement --- salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines index fa0e57fc4..71c40c1ca 100755 --- a/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-pipelines @@ -28,5 +28,5 @@ if [ ! -f /opt/so/state/espipelines.txt ]; then echo cd - >/dev/null - touch /opt/so/state/espipelines.txt +fi From 749e22e4b9502205bbbc9a43b3577ab186c76c3f Mon Sep 17 00:00:00 2001 From: weslambert Date: Tue, 7 Nov 2023 17:29:38 -0500 Subject: [PATCH 359/417] Fix if statement --- .../tools/sbin_jinja/so-elasticsearch-templates-load | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index d870ada90..985630402 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -105,5 +105,5 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then fi cd - >/dev/null - touch /opt/so/state/estemplates.txt +fi From de9f9549afa6ef300982690a9beadfd01a48ea4e Mon Sep 17 00:00:00 2001 From: Wes Date: Tue, 7 Nov 2023 23:55:03 +0000 Subject: [PATCH 360/417] Extend template loading to 24 attempts and a total of ~2 minutes --- .../tools/sbin_jinja/so-elasticsearch-templates-load | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index 985630402..a25d49390 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -50,7 +50,7 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1) echo "$TEMPLATE-mappings" - retry 5 5 "so-elasticsearch-query _component_template/${TEMPLATE}-mappings -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE-mappings" + retry 24 5 "so-elasticsearch-query _component_template/${TEMPLATE}-mappings -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE-mappings" done echo @@ -65,7 +65,7 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then for i in $component_pattern; do TEMPLATE=${i::-5} echo "$TEMPLATE" - retry 5 5 "so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" + retry 24 5 "so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" done echo @@ -76,7 +76,7 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); echo "$TEMPLATE" - retry 5 5 "so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" + retry 24 5 "so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" done echo @@ -93,7 +93,7 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then for i in $pattern; do TEMPLATE=${i::-14} echo "$TEMPLATE" - retry 5 5 "so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" + retry 24 5 "so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" done else {% if GLOBALS.role == 'so-heavynode' %} From b46e86c39b0c1730c5051d86e39156915f7ba31f Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 8 Nov 2023 02:29:09 +0000 Subject: [PATCH 361/417] Extend index template loading to 60 attempts and a total of ~5 minutes --- .../tools/sbin_jinja/so-elasticsearch-templates-load | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index a25d49390..c3c5ff69f 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -93,7 +93,7 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then for i in $pattern; do TEMPLATE=${i::-14} echo "$TEMPLATE" - retry 24 5 "so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" + retry 60 5 "so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" done else {% if GLOBALS.role == 'so-heavynode' %} From 653fda124f13303a0111c8c126ea2d7e8b6a42f9 Mon Sep 17 00:00:00 2001 From: Wes Date: Wed, 8 Nov 2023 13:02:17 +0000 Subject: [PATCH 362/417] Check expected with retry --- .../tools/sbin_jinja/so-elasticsearch-templates-load | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index c3c5ff69f..268b6138d 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -93,7 +93,7 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then for i in $pattern; do TEMPLATE=${i::-14} echo "$TEMPLATE" - retry 60 5 "so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" + retry 60 5 "so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT" "{\"acknowledged\":true}" || fail "Could not load template: $TEMPLATE" done else {% if GLOBALS.role == 'so-heavynode' %} From d256be3eb3ba768872c83670995e50f4a8191a8e Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 8 Nov 2023 10:32:11 -0500 Subject: [PATCH 363/417] allow template loads to partially succeed only on the initial attempt --- salt/common/tools/sbin/so-common | 6 +- .../so-elasticsearch-templates-load | 64 +++++++++++++++---- 2 files changed, 57 insertions(+), 13 deletions(-) diff --git a/salt/common/tools/sbin/so-common b/salt/common/tools/sbin/so-common index 8089db28b..e09d2c8ae 100755 --- a/salt/common/tools/sbin/so-common +++ b/salt/common/tools/sbin/so-common @@ -397,6 +397,10 @@ retry() { echo "" echo "$output" echo "" + if [[ $exitcode -eq 0 ]]; then + echo "Forcing exit code to 1" + exitcode=1 + fi fi elif [ -n "$failedOutput" ]; then if [[ "$output" =~ "$failedOutput" ]]; then @@ -405,7 +409,7 @@ retry() { echo "$output" echo "" if [[ $exitcode -eq 0 ]]; then - echo "The exitcode was 0, but we are setting to 1 since we found $failedOutput in the output." + echo "Forcing exit code to 1" exitcode=1 fi else diff --git a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load index 268b6138d..33caff435 100755 --- a/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load +++ b/salt/elasticsearch/tools/sbin_jinja/so-elasticsearch-templates-load @@ -7,8 +7,42 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {%- set SUPPORTED_PACKAGES = salt['pillar.get']('elasticfleet:packages', default=ELASTICFLEETDEFAULTS.elasticfleet.packages, merge=True) %} -if [ ! -f /opt/so/state/estemplates.txt ]; then - echo "State file /opt/so/state/estemplates.txt not found. Running so-elasticsearch-templates-load." +STATE_FILE_INITIAL=/opt/so/state/estemplates_initial_load_attempt.txt +STATE_FILE_SUCCESS=/opt/so/state/estemplates.txt + +if [[ -f $STATE_FILE_INITIAL ]]; then + # The initial template load has already run. As this is a subsequent load, all dependencies should + # already be satisified. Therefore, immediately exit/abort this script upon any template load failure + # since this is an unrecoverable failure. + should_exit_on_failure=1 +else + # This is the initial template load, and there likely are some components not yet setup in Elasticsearch. + # Therefore load as many templates as possible at this time and if an error occurs proceed to the next + # template. But if at least one template fails to load do not mark the templates as having been loaded. + # This will allow the next load to resume the load of the templates that failed to load initially. + should_exit_on_failure=0 + echo "This is the initial template load" +fi + +load_failures=0 + +load_template() { + uri=$1 + file=$2 + + echo "Loading template file $i" + if ! retry 3 5 "so-elasticsearch-query $uri -d@$file -XPUT" "{\"acknowledged\":true}"; then + if [[ $should_exit_on_failure -eq 1 ]]; then + fail "Could not load template file: $file" + else + load_failures=$((load_failures+1)) + echo "Incremented load failure counter: $load_failures" + fi + fi +} + +if [ ! -f $STATE_FILE_SUCCESS ]; then + echo "State file $STATE_FILE_SUCCESS not found. Running so-elasticsearch-templates-load." . /usr/sbin/so-common @@ -44,13 +78,14 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then fi {% endif %} + touch $STATE_FILE_INITIAL + cd ${ELASTICSEARCH_TEMPLATES}/component/ecs echo "Loading ECS component templates..." for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1) - echo "$TEMPLATE-mappings" - retry 24 5 "so-elasticsearch-query _component_template/${TEMPLATE}-mappings -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE-mappings" + load_template "_component_template/${TEMPLATE}-mappings" "$i" done echo @@ -64,8 +99,7 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then {% endif %} for i in $component_pattern; do TEMPLATE=${i::-5} - echo "$TEMPLATE" - retry 24 5 "so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" + load_template "_component_template/$TEMPLATE" "$i" done echo @@ -75,8 +109,7 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then echo "Loading Security Onion component templates..." for i in *; do TEMPLATE=$(echo $i | cut -d '.' -f1); - echo "$TEMPLATE" - retry 24 5 "so-elasticsearch-query _component_template/$TEMPLATE -d@$i -XPUT | grep '{\"acknowledged\":true}'" || fail "Could not load template: $TEMPLATE" + load_template "_component_template/$TEMPLATE" "$i" done echo @@ -92,9 +125,8 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then {% endif %} for i in $pattern; do TEMPLATE=${i::-14} - echo "$TEMPLATE" - retry 60 5 "so-elasticsearch-query _index_template/$TEMPLATE -d@$i -XPUT" "{\"acknowledged\":true}" || fail "Could not load template: $TEMPLATE" - done + load_template "_index_template/$TEMPLATE" "$i" + done else {% if GLOBALS.role == 'so-heavynode' %} echo "Common template does not exist. Exiting..." @@ -105,5 +137,13 @@ if [ ! -f /opt/so/state/estemplates.txt ]; then fi cd - >/dev/null - touch /opt/so/state/estemplates.txt + + if [[ $load_failures -eq 0 ]]; then + echo "All template loaded successfully" + touch $STATE_FILE_SUCCESS + else + echo "Encountered $load_failures templates that were unable to load, likely due to missing dependencies that will be available later; will retry on next highstate" + fi +else + echo "Templates already loaded" fi From f46aef16114b1f4c86662370d81e3393b8199917 Mon Sep 17 00:00:00 2001 From: m0duspwnens Date: Wed, 8 Nov 2023 11:23:19 -0500 Subject: [PATCH 364/417] remove comments from BPFs --- salt/bpf/macros.jinja | 10 ++++++++++ salt/bpf/pcap.map.jinja | 3 +++ salt/bpf/suricata.map.jinja | 3 +++ salt/bpf/zeek.map.jinja | 3 +++ 4 files changed, 19 insertions(+) create mode 100644 salt/bpf/macros.jinja diff --git a/salt/bpf/macros.jinja b/salt/bpf/macros.jinja new file mode 100644 index 000000000..38cb8ed0d --- /dev/null +++ b/salt/bpf/macros.jinja @@ -0,0 +1,10 @@ +{% macro remove_comments(bpfmerged, app) %} + +{# remove comments from the bpf #} +{% for bpf in bpfmerged[app] %} +{% if bpf.strip().startswith('#') %} +{% do bpfmerged[app].pop(loop.index0) %} +{% endif %} +{% endfor %} + +{% endmacro %} diff --git a/salt/bpf/pcap.map.jinja b/salt/bpf/pcap.map.jinja index a160f2f7a..c1d7562cc 100644 --- a/salt/bpf/pcap.map.jinja +++ b/salt/bpf/pcap.map.jinja @@ -1,4 +1,7 @@ {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} +{% import 'bpf/macros.jinja' as MACROS %} + +{{ MACROS.remove_comments(BPFMERGED, 'pcap') }} {% set PCAPBPF = BPFMERGED.pcap %} diff --git a/salt/bpf/suricata.map.jinja b/salt/bpf/suricata.map.jinja index bec763783..fe4adb663 100644 --- a/salt/bpf/suricata.map.jinja +++ b/salt/bpf/suricata.map.jinja @@ -1,4 +1,7 @@ {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} +{% import 'bpf/macros.jinja' as MACROS %} + +{{ MACROS.remove_comments(BPFMERGED, 'suricata') }} {% set SURICATABPF = BPFMERGED.suricata %} diff --git a/salt/bpf/zeek.map.jinja b/salt/bpf/zeek.map.jinja index 1bfb6799e..fdcc5e99f 100644 --- a/salt/bpf/zeek.map.jinja +++ b/salt/bpf/zeek.map.jinja @@ -1,4 +1,7 @@ {% import_yaml 'bpf/defaults.yaml' as BPFDEFAULTS %} {% set BPFMERGED = salt['pillar.get']('bpf', BPFDEFAULTS.bpf, merge=True) %} +{% import 'bpf/macros.jinja' as MACROS %} + +{{ MACROS.remove_comments(BPFMERGED, 'zeek') }} {% set ZEEKBPF = BPFMERGED.zeek %} From 3701c1d847eb6b6e7f0b5cb22d4c7b9441641cf3 Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 8 Nov 2023 11:50:56 -0500 Subject: [PATCH 365/417] ignore retry logging --- setup/so-verify | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup/so-verify b/setup/so-verify index e4d90b937..dc84b1e88 100755 --- a/setup/so-verify +++ b/setup/so-verify @@ -37,7 +37,7 @@ log_has_errors() { # Failed to restart snapd.mounts-pre.target: Operation refused, unit snapd.mounts-pre.target # may be requested by dependency only (it is configured to refuse manual start/stop). - # Exit code 100 failure is likely apt-get running in the background, we wait for it to unlock. + # Command failed with exit code is output during retry loops. grep -E "FAILED|Failed|failed|ERROR|Result: False|Error is not recoverable" "$setup_log" | \ grep -vE "The Salt Master has cached the public key for this node" | \ @@ -57,7 +57,7 @@ log_has_errors() { grep -vE "Login Failed Details" | \ grep -vE "response from daemon: unauthorized" | \ grep -vE "Reading first line of patchfile" | \ - grep -vE "Command failed with exit code 100; will retry" | \ + grep -vE "Command failed with exit code" | \ grep -vE "Running scope as unit" &> "$error_log" if [[ $? -eq 0 ]]; then From 36098e631472760f55011f71856afc5b13f534ba Mon Sep 17 00:00:00 2001 From: weslambert Date: Wed, 8 Nov 2023 14:32:58 -0500 Subject: [PATCH 366/417] Remove template files --- salt/manager/tools/sbin/soup | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 27c6cb98d..7c3c85256 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -530,7 +530,8 @@ up_to_2.4.20() { up_to_2.4.30() { determine_elastic_agent_upgrade - + rm -f /opt/so/state/estemplates*.txt + INSTALLEDVERSION=2.4.30 } From 33a8ef1568bbbbe859a934ef81953d5d93915c2c Mon Sep 17 00:00:00 2001 From: Jason Ertel Date: Wed, 8 Nov 2023 18:24:23 -0500 Subject: [PATCH 367/417] add yaml helper script; refactor python testing --- pyci.sh | 22 ++++ .../files/analyzers/pytest.ini => pytest.ini | 0 salt/manager/tools/sbin/so-firewall | 18 +--- salt/manager/tools/sbin/so-yaml.py | 97 ++++++++++++++++++ salt/manager/tools/sbin/so-yaml_test.py | 59 +++++++++++ salt/sensoroni/files/analyzers/build.sh | 19 +--- ...linux_2_12_x86_64.manylinux2010_x86_64.whl | Bin 661819 -> 0 bytes .../dnspython-2.3.0-py3-none-any.whl | Bin 283679 -> 0 bytes 8 files changed, 183 insertions(+), 32 deletions(-) create mode 100755 pyci.sh rename salt/sensoroni/files/analyzers/pytest.ini => pytest.ini (100%) create mode 100755 salt/manager/tools/sbin/so-yaml.py create mode 100644 salt/manager/tools/sbin/so-yaml_test.py delete mode 100644 salt/sensoroni/files/analyzers/spamhaus/source-packages/PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl delete mode 100644 salt/sensoroni/files/analyzers/spamhaus/source-packages/dnspython-2.3.0-py3-none-any.whl diff --git a/pyci.sh b/pyci.sh new file mode 100755 index 000000000..b0e48cf98 --- /dev/null +++ b/pyci.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +if [[ $# -ne 1 ]]; then + echo "Usage: $0 " + echo "Runs tests on all *_test.py files in the given directory." + exit 1 +fi + +HOME_DIR=$(dirname "$0") +TARGET_DIR=${1:-.} + +PATH=$PATH:/usr/local/bin + +if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then + echo "Missing dependencies. Consider running the following command:" + echo " python -m pip install flake8 pytest pytest-cov" + exit 1 +fi + +pip install pytest pytest-cov +flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini" +python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR" \ No newline at end of file diff --git a/salt/sensoroni/files/analyzers/pytest.ini b/pytest.ini similarity index 100% rename from salt/sensoroni/files/analyzers/pytest.ini rename to pytest.ini diff --git a/salt/manager/tools/sbin/so-firewall b/salt/manager/tools/sbin/so-firewall index 6c47a3719..742427518 100755 --- a/salt/manager/tools/sbin/so-firewall +++ b/salt/manager/tools/sbin/so-firewall @@ -1,19 +1,9 @@ #!/usr/bin/env python3 -# Copyright 2014-2023 Security Onion Solutions, LLC -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. import os import subprocess diff --git a/salt/manager/tools/sbin/so-yaml.py b/salt/manager/tools/sbin/so-yaml.py new file mode 100755 index 000000000..c8f102770 --- /dev/null +++ b/salt/manager/tools/sbin/so-yaml.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 + +# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one +# or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at +# https://securityonion.net/license; you may not use this file except in compliance with the +# Elastic License 2.0. + +import os +import sys +import time +import yaml + +lockFile = "/tmp/so-yaml.lock" + + +def showUsage(args): + print('Usage: {} [ARGS...]'.format(sys.argv[0])) + print(' General commands:') + print(' remove - Removes a yaml top-level key, if it exists. Requires KEY arg.') + print(' help - Prints this usage information.') + print('') + print(' Where:') + print(' YAML_FILE - Path to the file that will be modified. Ex: /opt/so/conf/service/conf.yaml') + print(' KEY - Top level key only, does not support dot-notations for nested keys at this time. Ex: level1') + sys.exit(1) + + +def loadYaml(filename): + file = open(filename, "r") + content = file.read() + return yaml.safe_load(content) + + +def writeYaml(filename, content): + file = open(filename, "w") + return yaml.dump(content, file) + + +def remove(args): + if len(args) != 2: + print('Missing filename or key arg', file=sys.stderr) + showUsage(None) + return + + filename = args[0] + content = loadYaml(filename) + + content.pop(args[1], None) + + writeYaml(filename, content) + return 0 + + +def main(): + args = sys.argv[1:] + + if len(args) < 1: + showUsage(None) + return + + commands = { + "help": showUsage, + "remove": remove, + } + + code = 1 + + try: + lockAttempts = 0 + maxAttempts = 30 + while lockAttempts < maxAttempts: + lockAttempts = lockAttempts + 1 + try: + f = open(lockFile, "x") + f.close() + break + except Exception: + if lockAttempts == 1: + print("Waiting for lock file to be released from another process...") + time.sleep(2) + + if lockAttempts == maxAttempts: + print("Lock file (" + lockFile + ") could not be created; proceeding without lock.") + + cmd = commands.get(args[0], showUsage) + code = cmd(args[1:]) + finally: + try: + os.remove(lockFile) + except Exception: + print("Lock file (" + lockFile + ") already removed") + + sys.exit(code) + + +if __name__ == "__main__": + main() diff --git a/salt/manager/tools/sbin/so-yaml_test.py b/salt/manager/tools/sbin/so-yaml_test.py new file mode 100644 index 000000000..3505e8d30 --- /dev/null +++ b/salt/manager/tools/sbin/so-yaml_test.py @@ -0,0 +1,59 @@ +from io import StringIO +import sys +from unittest.mock import patch, MagicMock +import unittest +import importlib +soyaml = importlib.import_module("so-yaml") + + +class TestRemove(unittest.TestCase): + + def test_main_missing_input(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + sys.argv = ["cmd"] + soyaml.main() + sysmock.assert_called_once_with(1) + self.assertIn(mock_stdout.getvalue(), "Usage:") + + def test_main_help(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + sys.argv = ["cmd", "help"] + soyaml.main() + sysmock.assert_called() + self.assertIn(mock_stdout.getvalue(), "Usage:") + + def test_remove(self): + filename = "/tmp/so-yaml_test-remove.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: abc }, key2: false}") + file.close() + + soyaml.remove([filename, "key1"]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "key2: false\n" + self.assertEqual(actual, expected) + + def test_remove_missing_args(self): + with patch('sys.exit', new=MagicMock()) as sysmock: + with patch('sys.stderr', new=StringIO()) as mock_stdout: + filename = "/tmp/so-yaml_test-remove.yaml" + file = open(filename, "w") + file.write("{key1: { child1: 123, child2: abc }, key2: false}") + file.close() + + soyaml.remove([filename]) + + file = open(filename, "r") + actual = file.read() + file.close() + + expected = "{key1: { child1: 123, child2: abc }, key2: false}" + self.assertEqual(actual, expected) + sysmock.assert_called_once_with(1) + self.assertIn(mock_stdout.getvalue(), "Missing filename or key arg\n") diff --git a/salt/sensoroni/files/analyzers/build.sh b/salt/sensoroni/files/analyzers/build.sh index 386cc92d5..65334cc0b 100755 --- a/salt/sensoroni/files/analyzers/build.sh +++ b/salt/sensoroni/files/analyzers/build.sh @@ -3,23 +3,6 @@ COMMAND=$1 SENSORONI_CONTAINER=${SENSORONI_CONTAINER:-so-sensoroni} -function ci() { - HOME_DIR=$(dirname "$0") - TARGET_DIR=${1:-.} - - PATH=$PATH:/usr/local/bin - - if ! which pytest &> /dev/null || ! which flake8 &> /dev/null ; then - echo "Missing dependencies. Consider running the following command:" - echo " python -m pip install flake8 pytest pytest-cov" - exit 1 - fi - - pip install pytest pytest-cov - flake8 "$TARGET_DIR" "--config=${HOME_DIR}/pytest.ini" - python3 -m pytest "--cov-config=${HOME_DIR}/pytest.ini" "--cov=$TARGET_DIR" --doctest-modules --cov-report=term --cov-fail-under=100 "$TARGET_DIR" -} - function download() { ANALYZERS=$1 if [[ $ANALYZERS = "all" ]]; then @@ -36,5 +19,5 @@ function download() { if [[ "$COMMAND" == "download" ]]; then download "$2" else - ci + ../../../../pyci.sh $@ fi diff --git a/salt/sensoroni/files/analyzers/spamhaus/source-packages/PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl b/salt/sensoroni/files/analyzers/spamhaus/source-packages/PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl deleted file mode 100644 index d2b6c37f9d432b8001f7034cd4a314a7a28cfc3e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 661819 zcmaI71CS^|kS08~ZQHizjcwbuZQHhOTW@UJw*9}`y}P*Gi2Xadsym{yszeH8b zO96wR0Q|2bIa5aIKW_ea1@rGJ@1Y?mBTZ**Vdz9J|36Zq{$EO3HaZ46V+$u|S_@lK zyZ@giVE>xvc^KGO|Nqc{{-gOXBoP0>V;DSYzw)0v{F1wm zR$wV;C8x)w|ItWIQOX`eNl3RTiqkQ)E`cIHgPqvrgL_8j;fQM zn%)OO{%;)#Vm=g*0|Nl8fCB*F{!bmrh$svG5mXk;QQ3^$WJB;dRht5@KC%ufuJH$R z=%KV6mW_5tCWvEUO)Wd2C27$;zT{4Bo}tnpaM1&i$aXSyH+63xC9%pdzK0YhF{6Gf z=|;OP9)M!oK7IEjouQ_mG&9`9A{JH?)CHm!9tK!g@efrzcx*osm1Hu6x$P=N&fihDC!v2tf?tlQ-&nSqRJQSE`mbF9(CXN*0EqJ-m~UiwQ_ zWECW2D-xv9yl?kQ(&gq86XFP17QKlF3axfH+e*f!Np`}HI%PwSra3C^C1iUFyHnuG zrv|G<*Bv9Tat8#pXY8^m#dH{wGYV;IfxLlbmEOi$a4b~Gj48KA;))Z9bMM;~+;J+q z%h-G|c3|KE@q5*ryx)c5OI(i%wjdrvssZRd?^r@p1A+Kkj$5?woZly~`y>RcF1;mt)*N zo~-6U;3fz*_!jOty;#f0C*vITO1WAZzuMl+)4_zJMbEdFmb&&B8BiLi=66IE$E@=u zk~wYHqTTJ$=0p6)ZU{wC$kjogovh;gvOdLIz$6fhm^C#{?Bzt*syM{|>VumtektN= zXE=-=#ub#D?;b&gU>6^qHxf+>!!DR`w+LE*#|JQ3DB7R55&kY0V^nm|ksRl4_4xU( z11LT%Wh-8I%`06owSGmcHPWdasgEnH{<(Uj_J#KlS$5LcSCJlcz$3P*i{b;YL~uHX zZm8$qJY!_9Kx=|wp_mf%`J#i|3PK`WV{N)95YgrHHCv6H+UPjuB-Ch zm)wyD-J}bv*$eSC(B5W(`bG%bSnM`3Y_F6JG2?>RQ`vqCt7t=Xe=eTYLmP?{vadhmAg&dWHJv zxehELUU?ezU*}s+_}URcz)jr1h%I_)+i%Km%(!|{6bSMDX`Ca7aqF_D@(S9+lDyS1 zkva<6P#GF`5+72KzeqC;CN!GmKM`WKdibO zz^;bFtiFfQdSBfRC%hD+V$r51(&1pexx-J*+=2B8OElf8zSSj&?dWzm1=8$M&j;uD@mao1v6Fo2WnCSVUv>2-c8vf|wq5)1 zhqr6^EV>lqeM5PKdp-q<^e$c1hKvWLQTd@lPUsItchCJ(pX!-g$3h@NP7x#lp#o0d(gA?T=)}* zZ$qkwDB}UoB2*k=sq%-B3oSGvpju7;ln+9%*g$a?7_{5d@)A6+%7; zLPQ>s+Q3xG1MaujZ@~X5$<1S_!%qLC82X>B!2X{kSy4n-PVtY+^0;G>I0D#S7x9No z|FoiY7$<>i0EvLVKXkyl8wdn~9KU7QvXP9l9*6HyUWQ7^F7HeJz!CQY9{M2&y3NLS z_us&LK_fgvkoUMk!VszUG0Yx}-V)pf-0-`0Srd{JX|DDfd{3N-bJ!=A;DV(%_HVhP zrDwsMDT!~sn#Cl%4W`&pRhI(ww`|^XeVEj|C;6lFy_`s2e z>JdJLq6D?l#1DWD@~nXl-E>(3#9?DOT>&?ACA$O<8L=vkAO;Y^-$6?#a-7*gCEA+G zphJ~%=Vs?rWolU?=B}UE9?_f&3@J;*QaLV;iXSS}cddrf{v2t%K;LfA*ZK-SFu{2= zC4}zR*V8o=3W0ijyF5j2s}JM1 zlQT`0_m;LdO}P#k!?7rjEWyhh@2Q=ZZo^%Rc2y;rU~Y&ahl|OIE(Tu*Yw}?B;|=A5 z45et3sc)h8P`@u#N_^~S~+;j*M zjC&bXl%?mOR88JcHdKgdFk<{9#$p-1NMIO4qo6|-FaXMg( z2lcLoZ-%mHHQLckq6=;}l5Un%t8k<(X2ZcD=AM*3hS>1lnO#b^qe~b#Ud5-ai>G_l zKe{EfJRCx$K=Vk@N-16%VlZMEX4$4CxP9Qo2m(}MG87>8R`}8CpmEx_xA!AdRwU|%4z>JI7_b_5nNUImR zvV!#xh2)bCcE!t(>|vRQe&V&XTh?u5kR70w2_p`$D&nQJU)|Faw|yICqExZ*93j#8 zbgt>KNoFlF>KzychD*;lL=B#ixbT}^)4BWvsRs5PCku6uNwS=*kVEn_jcYGCt8XkhQ%#i3$?2}Z|yt!F}i^><*fb$ ztT)v-T&aCokIcR#r7EEj|LTvw=0a-5A!d_fFgULvbTy4;g29N~`9R0u;~HQn=L20N z5Log*{J=#ZX6}fAI3p1_^D(4Il;bwf5`$ckAG-DrU;=G1cLklmKW3ugq$y=!a=!`0 zqZr5^5_m?vyoxZr3&SOb-G|woBpR8NS*y`3N z+ce?gLHtUDOUny_iUp0bxP4NZFidGZ8sC*0`O&M8TIKNb-(ja(X4m1EnuVz7Ex}7F z*gB1JsQ#_LNt_g=ZW}_{+aAJDilwvbOaFL{TTRQ-6mcC4!C@p3Zw_s5j#A>FJnGQ9 zp#yBq)6Ay9*X_0hB^~Uh+S_+%9_nVTYc^`0Vuz1FzIpDL9Vo2HNV`3uN!CzRanw}NR+Bn0L36HX zoy{QR-+J0=;$;^czmRmGPKX1E`otkZ?>%={r5Q4zt!a6B?U0ZK4%%ckoKVZ1yqo_c z+`Vb7xjXEbF7BoFQNOvBy34jjCm3QOyy)MsH0)GRiAK+S%k%l)yix<<35Wnd007i~ z5hUC{{rHc#*x1F!-o){ra$GC-*ak5m?07)gk)TchD^Do4;BvEUS57$Yi_0;xHOeeL zom=&TVauQX84T+9<>tE~E=YEerCd*XeG0H}x30ue&P;gfpAc|b5KBy!R5b=i#R87h zSCKOLo5O4+|Ci%uh8M~b6l+*0u|?_!cOxmAxn`VW?xf;2En#Sm@bat9NXPE1%H}pH zcuh$_fz2F?9nK5Aq5#{PB;D-H{ZxA>2eX+F)D)(h z1_cy%tkR5ndYx`;BF#mssnMhH{-y(p0ROdlSqKOM8`8-9$e`Rb%>C_TLp*n?u z;5#vNBs8U{+8}x`M`{i<2!B|1CjC)01-q z__`-ltY3f$RO?T8AoaKEhYsgA8MoIp%2%8(^|R{5Hqx_gpsU}1b-ebH#CH<^O%^P~ z|1(+Eb_T}(Iaw+Ll0yOrJ*U)o?&j2l_yq3q14)4tyy4>MWXPTE>$Ke1QolPgS?v~9 zEoTxA^y4#L+>a-W>m(ksnx~vI_bi|P1R44*IW#1+kv1g)*T`&3g842noc zwmUr|t7>&SP4z~ovig0}yc|>u2t{{-8|o;SKQhs&v~K`?W4nJy(WkBE_P+{!_4*E(fM?O`)4tQSc5$A8Fgn=80`d^NK*j`dWuu>HS$7O3FV9e!S1z%GDHr^ zF-wv1p{j)>S5KkX?^H!HHbv9Y8F4t_W+;{H#aH!kfq|x3hP2Muq&r&=hJ+9A4_?4* zBzqFmE#Hy7O{=QMt>=vO`*ogeJ?H7+dgRfr{!toiSR z!TI>(ipzYfK>xxcKwqK_!7R1zI1jw0s-5B9NbHFGeIl+=IdyE3ixR#oPw28ilL1kv zkCMaqpLS)&Gvd`?o&>n>4u7c7@DDe_4FJRsC%_(ZAKmgEyoG#;jCkpZ%0e4s!cX;6`C(=HY|rv;CRp;)Y)oVHAOvaKx!z1}PgrZ+GN zy1E9Wn1xW2TWL6u7&7GGbH}1w+TtSarL?Dc+57QPI@tAs!K$Cc*d?sY>ieGGk_}8k zt^CqhI7=CX(}0h^b+b$H7VtHGOcqgLw9*^{$tHhq4R#HRD4R8<{rRREU9;r`$o40u zd_m*7O4fAqM!(8BBpe)K9)S<2AU(%@KJh$JYYVYv@C+$i_Z^UuUo?HgNpKB=mZ>)y z=I5d{b(O4ymjmogoKYl z`V2-^hKV$|siiyBI$3?925V-pO8J1Oj2w&Ftwd&(Sdu;ZSZ*#4p?GpG!dKUtx$;A5 zZXbEs)Jwz6ism~RC|Y`2z%^2CR7r;xpd5BmGKGDjK`xZ}@Ez#ifgTJooyKvF8Dek7*p8y41QxX3R42+ng(>@NAL=x_nqmh|Y%AsQwz*~>a`Vbw zA8@foc)5}g7OwZG3v7z8xyK*i|B7JXpArHE|Ljbf(f@;B_6Ckl|AAn~I95)J?Jr(_ zA+5M%605M2zE{?5Nwfizb7;0&?L!PX`bZ_uHAZGO;D2epZ@r!xNiPl7A?JGjrC5hk zV`n}|Ik~;gF3^fRu*S|$DLMpiMa|02$e_`q7=#sKW#vBQz$m0mk0zO>9C?KCS1JBr z=GHdyd<4(KoGN9baC=>Oux+ge&3Yv4+Xob1-rYgy=;}m^nSrON6-7~_(55=D#6*tM z&fVIsN~18sxLQ5WXcp8gY6k+pC;k_b_|ZAAg?jZc*n@ z97YAP*shv)V&UR{U#*HVN+0A)$QmP!3vM2UabSavz6J%j1(lllOhQ(X(=+TfQGxbn4S1 z-0SIJpW-|PuGS2AV9QW@r$~sg7>bCT*gd}A2Vd{O2L`BaeZP)hwd;v-xS`8_++04) zFM*6}Z9)kh1&w;~&sR^&ewJ4)hsFNs095OlfceqRdj{#|r2q4(8w9at8_mJZ2vg7nxJ zj1L^|;DkYNE&ETuRlJFAZhd9~Q;xCr8^xMJn+km?;zrN{?`B93a^yiH zHeI~Tu)sayNLamERl&fyh5zafGPtmG^@Y(9BbD=;DF_m!D8t~^8=9jDp^a4%Dzb+R zhlR~g+c3&C%^I)?i=^*@o-v=?nNmtF0By%RS|XBJvB;o9OHR?w+}Vf0d2#u&c{+Kr z0%B?d;s0(R0#>{#-Nt`I7JEIJ)qw&fmQ^ytTm*X?Y$A6Gm1n4(#geE0P#~k?7^~ppx5XA59 zQ^gkm^)&00Bk%Y52x0OV6e9V8Y$`0YLOY&h8FOiTnF9?yi-e-^#;mTVXgoWN%%7)M zWQk!!=`&st3_T?PKyc-|8`jhpTbD0n>KWMsp@gU80^G>kDMbvRCUzAHRTz$ZMk;ca z3gPbD0R9E$g!ngI+}=sJ0G8 zJ=-t(Mq)qvat^>ZQ3MhXa{uHq48-S>FV|8tbIS{TsGYctWTfF7*`Mpg>q$X$fO3XR z-EF?I4vcgR%nmI-oLS`6!ASwAQFG zLS|EEph8)HLL72Ny1oc7=0juxf~!8B8LBu2=$&uEwT1!j+!|Yy z`7`t5EHVnXOI=zaP&=~XapxC=u{4@N^jQQc3Bg`>$&6MJP@yj14A|mGj z%OCXE0Nj+?0JD0Wo}x-1A81sbJFRm@{SZf}ik;g@`aGTSXkyB>l(x~WshC!jf1WEc zrS>XZ4eCHJ1Mc&4#%4n*Bvo-~>AGAQke`~0R@{3{5^aU_y%JlY$Ct5DPo{+B0;U4B zGK$8YTurithzLxVChD0aFVrFZ`Fp!;hVCXf^}MkZ1*%L^n`a@CN0~f3$crksKks~66_W% zGYZrE4sG9WkwzejLZWHmOJm)VATCyvAGd&fH#`-Bs$qnQrZBF7K+Ju+cke7|pKsqX z^j90OqG_NO+mRVwI?~JtB-X{n|6(*BR$2mB-kT?`m*p`vh8(w|GuJL>HD$D)%Fy8N zDS_Rgx?NBa66o#vuGJopQP_!6%@Nf#hK6_mn*)bTnpKBEJ|{Ie!^mPh+&->u+F<;p zdd>A8FeK#H#OzVbbRQe4v~yrH{_)FZ1pe7U({i;}WIV$ z4GDrz9OTjqi~LmTD)N9UrQ{?5>vT`7zdlIfC-Y=w0VI*%av&8^ZCt3u)vG7Q^0b0U zOi`LQ`DTR9C^-UB%rzYf6Pw7%VEOZ3nui$au2Iu#Y2o0}%hhrwVEvlvH!Q9wVg@@s zzI&KdAw+3@OM2%P;azA?QwlD(cw~!OnIjav*@}P$GxnRB83BGL_${2GH}fGjHW>71 zmS-GU63M=Gg);k6!eE8-E~q7M9kk zS4s7Pw8!9Wx6If{7+L1jG9?SO0#Q!et6k!V!wt6E$k1tFIJUS%->bo^Mm3tOo`Q*u z5-`k2%45xxDh48HyD|C2FBwVCIIQEuBwmM> zP^+1NaJQ;P(UD0pavn=<)P4%eVj>)9tTVw4d?Q9kaxu!M4Bo()JQrT=(zh0+$Se=NE=z@ZIpCk(txl1(+KNcgrlt5mg`ji5{v!L zKIqX0?`0Oqsim#~Tb%0iXK%b^-IV(BH%1Vk(bo;5cc>lvxjGltg`@Zw6NJP_1wS1p z-ez*)KGb#Ewg&VAm|3?7tb2H{c{efAk=oSOmq?V^}B^rKotQB0+4 zsNLqo4$lj<__th$uk1Q->}R*GAA-TIX@oMpw{=5fs$VK=V3mh?tGQ*2bL$`61Y2=; z%`*JV#`U}YlV2?k>+W~UA6}eQ>qEPPcCxd+C?f*Xq}|biJ&w7@QLW$i`vCgt?MIV9`v_RHmVHfL8Q5PdOWV#G%A z1muSLBlt!}9TukL@j%06n6lxSJH=$#ZV1w@9Ir#U;Ha&;w#C{b zK-hVWdfSWDE(7r_U|(zBsa@FnS%3TGYPoiOoK{?=8{EE1hEAM|R%AVtS`G5n+s%&K zF2z)K$j>TnJLNwPVXFm4=8_39?w-aGE1Rl*pUC1Dr8+8`MICJ%lNPvR7Au*d;=`h~ zCRbH*-X%8p=Pk8+r6g4y{9A&)8{X9KYvt z#6Te%y_JM?<7{*q?0we+N9nXrIl=%=T+>Dcx-k%r<3{$7kzp(#nFk6gE zsG;<44K(uKZlV91J9RYq-|4s(^=sSBcBJpyvh+)$c$mphWalm$d35u642n5d^eD8b z!kH9{jM$b0(x5^tvpiojaZMspDjUyND0@RIK6g_N9-IeA?~M}Dt0a3t3&Y;X;jTme;-wsJj4WpqSLNH`dN z4?ysNlI}4=Hg7KXzzILcwL6>4qH7&MiP4F~GyyVAxVwOe&3YQDggXePrRX`Hs+V_& zky~T=K|K}9fPg&-Lbm<~*y6@uqS>1QDny5fAwzSC%w2(8GMshDby*&@YmKiE)m6_!h|H=9^d;tiDH#l;J}Z$rBsx}!<-o9 z3!sx~>RK^$;K0$LP`C#i(@L0Q2IEaOAe@^<=4B+Jup8xc7>=_OL}K*%c#S!Ul%hhv z+Je`>CC_eZCAlpgyscI8YM0%k`&8!A6f1gH01k!H-e^b8QI`l3B8@!z1yRg_&%9`L zC9swcDlKq9O-yqcp}~lYd+c8&)_ddk)~*iS!A*`C@PMhYVT(b5EMdn8DWNir`_{}P z6}u-eLga{u1Z~asaYWqc39E8u6GU!cqAjSik~gygDNQ8%;NvU9GtXCwS8`cvzxeZ7 zN>B8EE~*pi)qh8jEy%wAxRvQ8vH2hwvEf`8$hscL5tZ2%om&_q zvYSs-^%FqBk7QYGX0>WSbo{{Ki>Nedt8LXP!3VMNy}Tz`)6v#)35Z{ti;}Z$FjOmd zYnutTjJoJ%^-B5f9f_wRr5dNzZEoW6K1chmGEh@V@$(jFAVN6wSW>BeEMy&3{zc<{ zzkg5P6e)M_#Ls0W-mp@uAPah90o@lKK3~zwQwq8Zm9osg2nX_v^BK4!rL9sEmMeAU zrNe;x%JI);^5Dq!%&6#mC7^(a@Qq z%dP6859}2M^g4>4BLt-v$!Cw@%f7>3ma7Jb4soxO&CTIkF-7OmT0nZG8;2)%>cUnWkIVEY#?I<%Xhg!|eEbCN zIqVAw2#j9Riz??~S(!+sv*40n?Uo=#YK>SZPME0B6^wBQq2Dj`qZ}!>umR6a5mX+8$V^8@-MF{8BLDvvctZRhX5r@cp_r`ufDb<@5FYKKV}J zDloLTg;$R-xO?uKJiG$+)jd4DesE{ZqKWI%rR?SI9mQYX)I*OlabeAxj~g+(cYVj~ z%bQ(_ntif~nss5QOcN{{P{a=Fgtc6#H)}L-0N-yuk|ybr*TdH!t&rZp7IbP>J2;P_ zs1!cCnO`~Y)wn&Z?_rn0p~JNDtIXB>0g;lr(Y8^KUf9x^16=3A_VOTM_@;VRI}RYH zoZKj5GK6-b!PYc3B3s`{;cebJM!~0j$tLo|4z*hCck3)?w<8}B*Y5qsncv7>Yf|sN zT7g@sE>+gkas@6eTV&3~qv|+wjvrOJ>%MLi&V$eW0={@a#{A=ATol`T&d-0z`fWJ9N1`8y~Q0ECGA`u+f$I zfo{lL?L=~e-|2KwWEL~tWAm7yxqZS#`kghKH>1nbWIjF9*zsEmofGdY7Dv`cCr=*3 zdul$JpX0uJJiW#~A9{CIVg_>Qd5ZsE%`YNlH$cEY)B95f0094g*HdgPoSpx3IgHoB zZBsOH_nCTQg^GYyQrOasEq0|*tf8X2+svyw^^vKo5Dz#ZKAch99Ad7OZ2ae?<^tMB z892X|1M@Hl&e_eyWn0Hb-zW+8)Ix(yD%HDFn#^P%5q=!V_tJ4Qw|d6Fcz>H8bl_3P&P?rwSJzH6xMc8w*k`UK!0!IRhf8>q5swR<3{ zkMDPX`RCf3s_3G`pE~Q|qYiGj`~BzHt-%S? zx4ekzyAu(G(-zLt8j1cv1nP>%JtwHY!I=>>A??>&y6jl`OD6m;)JDL|c<0}RdrSbI zSv}|D`nz@G+kPquVCAU?6*iNmH{rpUWgbei_hx;5AMH|;2jnPpCD#me5zkbehP;_3 z6~<4CRYYOHbAdn>?tgo=Nt@9b=v-6lzktu&xI!2kny4r5I2n+a@B9fr(4;LK76V7- zOtgU(?u|naz8GoE4oDd|`O!;2shu!(4Ko8gB%zcT+A(!yCYG5fy9Mt{^_yfPfN*HU zlC-`XFgU&<(@G*P9+w>>M6)(&WhF@(-r!+wsT{;;ud;S4%nufUFgX))AQU1dS@h~* zu#7u}W#T4H9tfw^LaHxgHF`-u&`Cjn#0WeJ*}>7;kbz==SK7i8=_ihoR^ORsC-46x z3nmqDP~&73XRG+oo)=&V1V96F-}7}mTx6ur9GLI`YDh&f<)DZQl8~!I>r7-)4eRBYN0Zl|YFEUOcVPphO z?B59lH97+cFP8wo!RTjj|IqYFqaA{DW-3;5O{6`x)65%6Co#z~t93UUgsgnnO$duK zSNa5h?bdpNxf)le7|NlSo+j#Pzk*#we7r(pKT$(M_9OZs@Hz~^qvE|z=MHOKiGMvW z1-+ZoFO>!U^+V=Wy#b~YDH1VuwMGfdrc2kFn}{thwDKPpaLY<7ydeTKoOv{1LaX7@5fVyH zVlL1-Nz3tFt(#Q1K>+P7+cMq_(wRElqdX*{B!~_aAOwqYie(y{Ns2>2^QKy!x~2(u zji|rXmvyBMbPuvm2gxh|r?FfM(k4SNr?Cc>jD9cnvs_1p2r+2xmlHh1^CF}@W(>s^ z4r3J+Lp>(#4T{v25Dnx@rE|5J^NPvWgY4^OJMv>EM2B4eYY(2O02W73J)@=?rlMcC zlg|KQS_~b99pp2XnNW23CVehgoMeK47ih*JdJWSxX!_|7TU`i1=%O|>vUDll1MSwd z*qCL6w`l23Xb)~}wwEZD%wyowmoUO`YCb9 z7(PO(5*J`)hAt3Vb5Ou8@7)@`@4_1j2xuVWz5h^&2EIOek$GXN#GQ{7Q4oG;V5IK7 zK4sBgfRjJ;6eive#@vtHu8EH}FRBJ569d+TuV2qGKb}_VESL;28Yvd=B27wABy{q` zQ-*wfb0c&`Gpex*M625-8bcE3JJzUAbJ}T|&Yfy9EMPUSKv2ez27LNRc%Ghvpm{WW z`-Pig78G%Ita8neaPWf>!r?ggj`XMVuz4-yoi8}|Cx7mJ5(d%T4{|gtF0OyKl@8+b zPcs)Ownq^^qF*);&U~yjS)_O>A%nseV&9jb${HA9_(ANOh%m!j3@w!|_TT3zU!2zI zytGZKln}|pKNru9~9!$r>o%y$X1j&-LgjopC!y-)brfboO0(+Ee}X zWx%h}CFLE!X=ypzp#;1qRcamJQ6*3E5#t(73qj0qbK=le zFBS*IPw$x{pHM6olTjSj{K+5QoOn;1sH*xk0n_=hVir7|KCehg8g7f0pa`qeT}S<9 z>D6p$-B*bCx-fwb;hb7%hp3D!){ai$+Q$iltS4RwvN{4N)|(+`$V;*qmwi(C^S7@O z7g2HFDJ8iL*iu)8L>v=4VIwRK%;?RA zRK_+m0juszl>P2vG06!%QhgWBzoJN2N%ZO`f?TDV_1gIxJ7FfQvGiKB=@k1K4477V zQO?p>7r>EdC@#05UJ`a$o}Bu+6Rl@Z zf)M(Vk$?%FMEon#s(977sZ+cTc9EjcUBWPPt4zQa5u7F1G8LoTvNzSMDSJXPqQt@} zB8gt7mjBl>yvI9MK1y1ZhqQ>COXj(2-#!YHj247f6GHRxV5Y>9w)vNWh+?{Wqi$W_ zBn8^m#MtjAI=VWOOut&s$S}t8dRcDNtgpc5``OK0Z(8N95!!p;NBqpV+}aD>luP++43=3lm$VhAy2mSMPN#x;5vWauinXPpA;_N)Y2wOXKQ zwS{FtUE6@0xe+#PU~cgg*GIg0jG#PEw7u-vlK+K3zN#&Rk(;T*HAQ$T7xT>FAe^e{ z!o|x~dPuul@f+fvha+s&q z$gZvY`86}F4ZguL+~Gg~mBXvL^^Bz2PlDxxxA=pV>0Ah) znZ!!0-iS9y=)jw}T(L?!%qu(*XyOw(N>q@NQ+V6Ok@<_+&n@ZV4pm*maKT$Ovq5yH z$hr6wt(J#y-fb*t0lc0yhF^J4KNuaN$-bGwt)*^DBIAqxoUk#?gl5_@+ycT-v(ok} zW_^DbCnFAwMPLO}tG*%YuCkzx54g|A(lhJufW1W$vgNil#Aw9Ze-n1#CSDuEOWA$&djmNK(cr?6qhIl zDzJE(<0)@11woNCT4zEeVLZXC=%^i`l#-9`S!=m@-UY9p4JPhhy6tZO6@@+WWPu5+ z4tYVFP^(T~#UQbrAyQ>u$9=82iz~!yy^A(3D!CXvBxdNACT2TLkGHfTyk3dFUy0w zEbjH{&w#vqMXk8;pyLIj*IQN4PcH~I2qz=$z$BwkF3GL0(h_`k2{pE+N&P_yQar|1Fm)oCd;7caE4q|b+rwZnq3x`Ep z%eay>eq;eFef3522p|VN7h;^9vJEt|_&cu@;#4&Sst-z8>{oNB=EceYFJ3x)I~flb zi!wbjZag(yCAAjcSv$Kp^b!Z}aG8g9jm|x{HLNvL37+GwjHB%)II2eFZF0-B)O-+) z_6E9At;e>2(yyAu84Ud0XJKtKE%ip4;e}|0w{Lnwz!mzTELI#5a&-$GS_!Acp8VJF zTkh)UK!`dr$ekuHSCegd?fp8-=-JxQpW}Y8r?duWZEu(Yxc16@K=fB&DCEF_0e?(4 z5UZ2B2jujHM;PtA7|HHL*QfLQJh7^tpBTi)HMm*&breHF{%CS za|r!FgTau4a(}YPcP7Q<)|Hc>h!&8G_fq88s~t&8&iW|;Q-r8K`SWcZK~A8)nE@E= z2U||Y{AqF5OmB~#PWAdcpZ3zq(&@H8Cgp~mlcGRrCzO}jG7XIGt$$B?4qW1k&lciW z#ZV6SaS`F`7UyY^V`D4X*BN)}typq~q48`4y&+?9YcN73Cg#ULKhupha*UsH3yim- z=~4H6>0x$IU`lDPau_+xao#M$?O%CI)(wZ^I>~v9Pf&>=FE8PTW2w6V%hfd%Et(M! zAI}zZQMKdve0Yn247Kx>WB}2Z-{x$)J@23A6Z)uyc)O=9*KWCMVq;@9 z`p%ZA{Gkvd^%#esVr{4O-l}IRPQ;4cH**7Hk*42*0c`Q%gPn@ylMvU%QPhiyb-dq? zvbr?CzyeY2*`W%^^4~~?lqkm>Y$L9hZyy!&=;52!v%)9l z#WCGutie!xOR-)KSt28_f0y&x&8sg*>#HQlW0ru}V_Szl(bD-#* zBN$xPAW%mHbzeap6}FUC6ApFbRvk`t!(Jt{EssozY4temqlgS+MH%Xf7S&Y>k{am$ zQr%>f#g}`?ro3zfGLdAnHpSi@b7&KU57z{FHl67^d^;3=+-=M$H))yd=+D6*h-<0O zmWm=7*{C}^mgs+QiDfn}(y6ZB_s$ty6?Ie;9uVLAP81|@X`HB?%E-_tH101-XqNtH zg&2iaKPNDS*&4ZJ+#b`{bDn{h2`EVgiC++}N%mPXYNn|qt>=F8t6AjgnCGGw1^w{C zKyCJZVK}N`RSA0d$pfI)@LDjJ{1LwL2<$tD7(C~%d<_d_m4-O96RUVf?VWwjT7M+% zEeM+t#PAPAOTl8)Jut$Fds`?V4#FJlPjLi~VGQQUdM?Gvc($u-S(e{*gG79>=?^b` z4jAW#rNP*}K^iTM%|a^Tgu$>jBo0U6#ktHgAubMa_#JH+*oigR?>{zy&+C})L~p(( z%ZYAYS?%aa`1Tb`ET^qt7Gg*OhH|OsgF}Q(eA+@tjFzGor3-RQ~*bT%BWg zU{SZMW81c!bZpzUZQHihv2EM7ZQDlY=3I^UoG<$qtg&|0tXa<@n`P$ISqo0EO8rN^ z{*|Yy8p5&ex(98m%B=ovq{hTYcG|TUCb;b9O6{>4&OH-S#*Y0IUOO5>5{`rRufU}L zW)z7iAZ*s=LY&IM8K1-0hUK9a?4DjLL8d%K$j4a3=Ty$eVP+>-$AQe-&D}&aV!INl z>*EC%l;ll7Kr0b%+z$H&!#<)C=E)5@0{&{r$&ClDw%;U(U$DGB^#iFg0Q)v;>x*Xo z;G1K2rc$92P0y@J(nI)gcSg(`@*!t==%+2b8CTUBr_5fSsj+$ZwJvfR%yioeJ8T+T zPbO=8g-4uCYaJ$tUHo5u^8CXaRse7+!w%DGwRgaI0RF7urkt zOf)uuXo8!U68z5~w9-eB5ZSCSR~w`CIAykzSTlVegU2H@Y+LFpLo z7kbxlZipjsR0^D(7XVy9B9(0j^tSd%PBg?G4IKP?3F#udy}4EFB7Bf>6J8fFk}2}6 zfDy4sCUWEE+VY;o4)y_`3?AqJ87cZhLe)ZZ)on@rY@8xVp{8V<`-JvXOasWGf~RuL zB8y=orgR2;-bs|q6l>Ga{>Ob*qk1~Bwuo_7Cscq-M!_zRk(8Jb2HhTD6cs*FXUanAEzZLxg z1-=b6zt_SXg}d^fW|~OD(!F{rb|&2%94?eAaYq|$O9e&G49CpAr;*jm;@H9CIobTHjkX1BY_+T3U{80 zli(!5k}G({?On@uM}#HAf#X#_tK@C-l~ndk^U0MR-e_}r(A&7E=VWA>OHm~v@`!xJ z00V;`45%e$9Ed+bL`)Va5rE;lXou(;E!)d}h|-U8gr`>iIc#)283G2Iv0+Qq>ZH$5nNry#k* zo=Adj4iX!$OSwWD-E~-bhh@}VVBFV`9&05xeRXOb-+^ATd4*2e21*A@wSO>1 z=bbaY2H2T_j&ik#o_{yJHQv4|J>N`gJ0&aHw`wv24npi#ie0LGw)=?Ih8}X4hFxXXhRL&iw?NT+pA(7pk zM!`P=?haR_zXyFR{LnU0aDw%#q9zM)d5dgxYI;jK34UNn%@X8VmY)JxGd!7hyE zjg#-L>atXJkrgh+kG~h*7PmKU|6yy5QrSN7&Qj^+pbwi(v)`@s`i^1IYgyRR0$Jd! zQWtVrk-)5?>UhYTp& zpVT0vYhY~n@??LSni?@TG>WFkK=RkvYOu5DshbTj=nE4j1F z3<4Sw0RzIk-5n&budRB|yaP(TL5-?9ZwNI}=dXkQWlI5Zd%^RL(hEa#bt$zg1 zt{I_}vzbf{y>Qbi)DG3ny|`tg;hI@4=Q>e@Bsg|fwP3WFDRi@U7{zqHcJTuThf|;U zGvSE+F&DJ~+=ha-mnt-?)LnrK&*`Fa=iC}xDmmHjJje=GF=@ZHnrRYZJMLR6#VJzi zJg4xN5;>OV?yhlSd7i9p2D4Gg2gkBsN2(sKm;0;v>!+nrb4ey#06&XF#7M|I`N0IH#g*#9SmlKZtoc`d zx7qM9rbP*>vMqB^S?UYloToWnfBp_L(nhkAVLP_I$)~t11Lz44`R+RE-Vc9Ur`}hP zP{+5DjJj#Z;~vMw|MO`7l+kb@lj{>+T=%xiX`li<-KU{6UqqmvbVRr{k@GG^E=l`OxG8s`lO!Za(JrXIMf1Lw;KOcWNv^{wo>v zpLk^vEo#hD#0luPa-Y{_pFmR;J^RU)RQyFA^=|J_L^7K58q-8ka#}Qr1q#EWlk=lG_F{JPJ>_I2k2!E9E zWf^C7Q`zgsu_%@zljSkWNGWY_#J_%)pyL&(2FgQ$PKMWH1LllRjND9NAM3h$xV3nN z2XwuS3ZbxBE7!}n`2W^yYVGuDwe8fs)L1%z1D(+tuQVOUBxB7@9B;^<cqc6lcqj24my?}~TVxhe7vMJ^xcuPm^lO2tCz)enZG}P zr9vHR6<1mt0e1coXPsp91)FIyv2?wVxh2EAO*^XadU@l!MaGuANt0r3oQ=ACs{T6d za)s~rh*ohO0`J!#$9wCPmcj!#YXFWFi z{Z^3LTLGKWtkjKjGBeK0EnB;j^D#TIY`vGXeDU_}H|)uu0k#{=G>f-CY^QbSUnGet zKVpu@t@1~z^VT^OpxS6AO9EplUm1fF%fg%!^PtbKBpUB@URi4-q0`EFZlHl7mek-O ztDK>%1HSLpACBUTan}W{A;4#>_A1ik6qSQ_CiYD&t^a@gvPdbg+=70q#;6ScajpCZ zI^kM#+wK6Pz3J+wYs*52Zo{k3W_4tYIt1-Cql@8gzJ<~8dsF)Ir#5&8<&qn z@*zyI)S~{JFrmDY_;z#`H!hh>N~d~V+g!(jY&BC->&2MKp}M65t=J-?`znUQrnb2X z%D@v%c^Epsvq7YLr%8)k!YW_iy_b9(q|`?}8=Z-V ziMnozs@g@g#Nfn5Dym_j$V@fG$(7(8HiBV;u`B6XW7_O`1Dz9eAA?pv8>my7=A9LT z5(L6Q>N1!^>tj+}7lM@QWf%|3fM%D*mvYL%);?t#4Raye+YnEm6tvc5;`jILIpJ`? z->ftxBfzNVXdQ$}w3}S~t@U-xZ$Hq$=*HdL?lyhdocjiSY#me=x-mGA(5xJ+h+T4$5T|zl%8|o{ zs+0-MsWhC`L|ma9bUznydl;T&GwiOPbA5Oj11 z5LnDwf+oD42YLY~bwBWI_peu8Og}t7xVgN5U?3l}WjCBYV?UA=o!)+Lb$s=^<5lo) zbSn$8Iw4$-x^sA@BRz#WX9M4v!=S*dAS#qjHC^d6(DE;Qwk8H@H@sAR02P~6V7O&6 zU0I36W9FqRI}9QFe(*+)6s4r>PoVN3b=i}%Y5C5tT2&f%cowIp`TUrzP9S6M2?d~1 zN3d$m{-_wRL)}`2EgFnIpFk~xk7~v*@3X!x*XoalFtj8GuP@N>c3BxBBpS==~m*aixLDAKKI$hT~gCrA}?lg-zxkqu{MpTs!y;rYBn-)jACN9}Kop|XTK zX+d652=LXgS3;nm+)#H<_;`pcf`56ScCD)!at)G;KN9%%P0={^f4ns3nBpr+w59x9 z)mQ&9BAoDYR?2Xz_d^Ha1H&5S{-Q;mjPw=dbKGoBDk5IJrl6MIcY^zJxZ8VZr*h`7 zAWA=@$>?**T3XLWJNL%u@UtGn@hFEo4%?z3I=WQFldGX1>6&O0`fdQTAANT`Q0~f_ zrfY^*XztQK$GS3BjDO6Q>M>bJ)gP#341lXH*@~%tnyt4>?7Og22JVZ%mE5P0f-Jbn zdMPLTj+kS?%68$W_9XUvgST-1+-c!_C0Jy>q<}lU3h3R&*Xe%Xp6%ri_y{aIF%$%y z!hRxR9SlW?-w~)^<&Db{xGNYtUMB^(h5>m(2@U0I3uL6X2c^wep6OY@49SDHn2Gee zj|-x|naVkgVGIB{r^K?ujJ3HKOxyGX%77C&lHNxT#7&Zx?GVDl;Lab7{OaevpmqU5 zGpCzI=p2%Kc|nIU8c|)UsphY?37l2@O_}jFdaNPLE9lf>{Q= za6CQts2kN9o+-i*#}TgLNTJp&UT;wooYQpM>fb`~2KZDV9L(a)!ZCyEY?$?0tWs}S z(w-|QGlXQRci$0DIrv6@<(!*iAg7b<=&rMIDH!i!g3mnDF?1=i0L(I(kPd@44iqb} z&dCi~+3CE9sb#6BX9G%BM=ZGd8=>jvaCh<`u6B+M%_1vpdV)9u$h7tA(24#v?9F9(bQARevvp=QdFT{xSfRcOSl=dAQ}@4E@F8GefR%wvr;q04c11o1&*?qfA@V-a z$Jo4N*7qkik5}2B>`QPskp@U?1~2w&P796_W}=|Rr&gF<2w``Ww&R?dKKH6vmo-5O ztF+;F(0sGgb?4mmi+nkQm5a~pk4XYj!}ebb(FO6LMhKmc!M_@X3!KoFseQ*?&fVgc6Zb#d1!j7S z`T&P2DnsE>i|1q%cd5Fq%+8Q`hoGSJ$y+TyvO$(G13r1dXPOedx9!Ji<2Q?|wnZ`=6FzKa@+E~*3*gfq(EskS z<94vK;D0@K%r9Z%{}(cT!Kb#)|K+iN*Q{-`7!Z0MQBn&~3G#jXCB4Eoa6HB7ytGA9 z(L-a9jgU93lUi>vGJ6F(HBy3s@eb{Ld+!mp<2MWIGmcY_0x^M2u|=wdZ)ON=3SwvD zH=neG?#n}p zC-wLpA;Yy7v^)DV`dCuYBs`u?S?8jrjT7#qZ;Or^3n2}e9f7>SBcD$#Qmhari~^SS zvuYwSXIo2eY}itZSV*B3Q%I-+7Ox|yZXmwrLERa*-1a)1S31*{>Ox2Dg`_~h*ZZjn z>i;;9zRIqb&XYTHpcZivWv`P4K&?Ne^hA8bD6!sA|H9a2|N(%{s#N+M`u)An=#~9>(UVaht_p8ak8^^ z{omWf)fzTVnc_(QYV;U)x?%Mvle0I|v{bx#w{yCVsl>*~WS5WY;0!RVsUXz(tvX{j zcYC&B!!a@VHN;3KVVwECIeB=dhFY>2nrvK0Xs&Z*M|SUN;QS8DSxq)>JX!v}S#%kY z$t|**^piy_X5p%`6|Nm>I5{cm6nir{2$4^!$29WWejk24qv~KbLI%kJk+2k(^;-+Z z{WQ0w`?Ynn?EWzKX7o1Jp7I6NXx?m1=VZy4kDPBm9gmMsYNVECOx3*|4;sEN8v2L) zP&Ibt^!j+Uf3@ct9g=94qwTczFEH2#ZOKw+|Iz>4{lN|#8o%9yKQABkG2IjZ>1xTj zpR3IZz{km8Nt(vTWg{mzYE|VimYDe7Qz#-H@Qc@D6c|Z$7&0N96hlDw{^b#QZF@1NL70A47@Bg2iF0l; ztZsi&25&NClk6vyUJ63M8j(O3eW5S`+{&$&5zHdsMjBL&swcVbGpeQ z03tu}k2s31fR6^LVh;V9GAihAXaa6O7H|f)6xP_gM?ujiK!>2}=RPX?6D}vx=A9Y* z>xelM0?}?tDM2O=r?iT3h!Sp(L}G*vMryD}Ff7g&YXE}gZ6@+B-8(#u>e zipSuu9yQI@uK?R{fhg5a!mvx@ha_-3Fv;NA1BpaZ&h~i$hzT7W$KWAq{Xtzh46I9_g8Z1lVTBYPeb64tO4!!cF}>D{ ziMp2>(U+>yxwN^J^KxVJ+5A_=B_*h(Mz_?$XWBI7FF-KuSd{d1MB_;V)KrHgKcan- ze1?0DlmMlyCm~s?HwLyWaFL!QD8*Eh!I(8cokCd9L?7K;qy*h~h%i`#*a z2g2RP?s4Qz#s`jzy`@P>7^_PqVx#P@PMV{o)W;VRG|88)Q3~YQLpFWN9AT7^UL0Md zST@Cb4ZUeEm6V&h+tcf!jDzxyFSP|vO_2F2M>QzMfg4BM1|uj93YMLo0vX~ zKBL?DxLKXg0Ng1JnV8_TPtiJdHaJ(hMS zh-yTb^n0RiFwZDBecH-`A&*mWvml93FC8nHU9oAO8|H2BjUPi=jFc%dC|(iQ)D|~L zJsk8AF99xSg*WaGV%uDDbz=$QJg=|DcEw!v_~~cY3Etwmag-PH_H@{>$Xg;P0d-@Dmf$;>wNbWdQFomrM7>V zYiPPv5eH>!3tign!1XLMB*#tlwv}mzse8C`E;B@LUtZSwe9&kpAu`h9 zpy3xgqC%t!?wF8Phrpi22_@T^Qm4qe8RkD16d>4GBanoP8}43K5}}WLt-`imp6PH? z)qSV|UBn3DpGoc9%I$*kYky$UzYaA1u2oDZYi{raE8MCK!H(dW4?xCX$*mPc4?>o* zT_=(2aH}WIWM5>Ke8?My@=1l(_@%njn7q-h=(n(M9)~tF!QD`eDa7 zw*4$Mm&VOA^}f&_uwmiBau2%lMQ3}jxnI7SyTty;#rkRQ$8~cu+fH3T;ol8IMTGB- zv8!#WJ67?g#T2!&E2P|3bdJo+rU3e5Bd-OH(bL`AdF1PgKodn}m8o-18gl4i2??rc>djAgH87YBH@Ec0@#u;$cEa-8SoM&I$g`f z*Mk|149}*xz8pX0bR1md+BubUKB`p9i_8lw?p{!xO~SMpOaL12Rr`eHh$8*=ZtGB7 zWN5ymxpw2;jJq?7;G1nGf@j6y?2utf41d-}@r_MM<0&uLGXq*}XO_47R*1RuR#Cqq z3tM26q^F?O0hU!sh4Z?Aj}W9xpZ}qBeXdAWHSu6!o`HRXJ9biBw7ZTPCnJ~B(j z(_56N!4EU|@((j>X3#D?r0_1#osHV2&Hg$Oz^OORBy$K2^G@S%%7)1N31j~TQE%OH zX{pPV6a?Q*lK)9qW{#J%(8cq#08h`QXXcDrf@68)C^n}r`u{vRqyGXp{ujAXbzFAv7i)S-?Om8xhVQ=w4Jr#kn2Icz zDnX{dzm{?haD3?y{t8kLK^31N#PlcUI_7%|4Fh?EK@{3qhv4yw20lCxS->)$t5|_v z9Jz87*C@erSf%4;h+^GfwWh#0!E*SZgaV6Fvx$s0^YCAcc_)%$;W1_clyO9=80|7v z(Q}qsAT@2LB=ouW>sAJ2Uhq76cxosrBsC;dTc|^_2ErAiMj3_zlnCRC{5#jPyT-0LAm11LoasQZD7kqp=(9yS~G6Uf!i-vs0WCz@;= zT*%~>YOxA%Z!1kN42S?*?ux!5)s19s6Oj|zWp98h zmJXC$;jgQ)o}NLI5FKd?iQh)2jNV(qJ3A))$H5NnoaMq&qG&@7vDTX98Pl|?BY(GG zu2*C0{f5of@yEUG&ncyv9I7J@*;t1=Pm59v5cb-LL)sd@un?(I~eZbqTHkYg+muyRlg@;Ab z=ebi{9M7cFfO?LYhBJ#EaB1-TZr`bead$O)?faJ}>*k#&HZ6a*sB>p!dvp)&>}*pv z%Z?zah?S2`kE#pg%BV(vI6{IN;f*6{X(L1~E+?#yoY_WOm;2B8Fu3vXQCu`A^hv{k2)P zL3$*svVh>mgfQspx>n`Sy>Z<;1_;l1Dr{9IdrE?$8lnvkQN8rg63k2`ejoM+l!u3sRIua9yQ9jJgyN zR}FgJzp-|!-ra+64u7+kRbam|;(lpO^X`|jJk-P@stt$g{ZqVSM7pixmRV8UPox&h zZ7O{SsWslZWC9cAyw1~SUCbwYSywLQ(+D)*J|4WGiYI{>efI};x$I>{G(Q9Z+u*fl zvZx(bIGm##?1Q(o^h0@f{ta*25_h$Owjf2}qF}B{TxaqElSH9N1=i0`OX>MEDwjqF zmy{y`#gD&Su9p4yZG~b1pS-68mZ6fUol)NDGoS{+aSY`kkRd~|@|T+4()n%h(zL$C z3RNG{nUexcB#5!$SAV?~MYE}w0YI1zVl*c4OBSs(&zAl!dMAFg)I=EwUUIMCY8?KF zF;L%FcFBOX!}6FVSk(Qndr0T`=RXsyic7P@`!}v7Wp~&M#0(k@*%G(@@C+n#qwYh^ zwtt~F46G{sYAddjSq@c84oBIV8ZO}bfOQXIiJiizwN)tjt2fNL*zX`(<_J3Gt&eyT zyL4twuYydLfqw2cegvEC0c1!@b3U+O%8C2=AMnt1-%*;IUvDZm_8(ui1U1#&P^}lf&E{?=xLTNJD7MYWA=ZJeAc=h%Vg~STUj6 zZOz6kagy^4j^~D#^f)%khA`1Sa9ez~39UCMV2*I}5!+DZR%VfhTLa6jLe#>H3Dv;O_LxsA@%=|Xw`@%~b&Ca3qM--U{r%kA~vs1vu7 zvHVm})j>7Z38dro4X?ug{dh?C@5|zIeN*SH`}^wrB~yTB=o+2N&+EP8-Ouhb>?D_s zYjd{ccvQhYRaAl%YT|-vxc2Vb1xF@Gf;V$mLj_DibWE-3?~ziGFyhn$5nPD_DyaoW zsAnr2Tmdr=I|OTxTRshLEYS0*t-_JW+rO3SL!w;JLyRBT#B~i_<^IRAHw3w#`tL8y zltdAddy~I*cH0?63L0QIJAGvwTWzR)PGH9u2l9L|zlhIaatt3Fa4|*(Mfru&Jt1$% zEY%@}5MaFB0>{NSc6N60h9XXOjARE-Zlo#q_-QjS2S%vKH@(NA@KR#_Ip!V=P>V!B zR&o!{TtH5N6hGI&X|gf$^vUi zYjD3Dato|1QT#&!BC@GJ=3QAs>&$`|%zMV~7P2z3Nd+A+Gfo)b;Zfrj-0%L0KpfBW zlc!64B(uCDA|gOX%%<^)SJt|qi)E4i6#bht0%Z6`euYg_LFSyaLFjh5`(oBm9RnW* zuVq=eguhD~a~Z?S7QD0?9uZ%S?Ee_VSyw}!!M%l=8{dH9TQsu=!V8Epl%rKLK195; zv|$#epYt1iH z;bsyl!;AGs6=@VuyH0hRN&!ec){tYSvj>)ro&bvK6BaQQFjqO!cZ_n}GU}>PB!6pf^~ zEe#TZsgun(2%v&B+xGj{u|;zqm~%`0PcTwR&&mi0;JCOXj5I6EMQdxfe}Iu$u^8P7 zq=l6ElZe8H%22&>j!go;nv0SB&~zRU4P|1_R6tKCcqJNO!bp!()E5)Ple}~0C;Hpl zxTE+v%}EB$0H~4(dQrFr6LvNjVc?I~AVM&{FhZ2WQlUxQteR=7>I3*xGc7%xvf$)! zLHW}-LMZLCM9B?9sFRXtIArxhNk{!nRoGon(fR$ScchVOTYka{2(ZI!{ruA#n ztsMW2vhxD-Mv*l+0%tJ=jL(>DU>GAjrJXR;VX-2krWqwl-(}Yxk?JAA{RHX+O8{ts zIqIcCSEm>Q)Nsa*U@$D<+T$iA#%3Tl#(64Qr4pp-D@s4NbE#|VJdM|Yot{FP0~KAw z6-Dw1F8dZKd*XDAung&OFlK5~!hBG`Fd*dv{5xKxEvM_vz%L#brlgnVB14s;p;8q1 z=jxb2&OjK7gUqE4vtFS}5$q0xLIipw%q77)z_qc$g3HY(6VJRhO%q!RF3vTt&!KO# zWP})1qFO-*f~yamt&7<+kAmL4+PkkbwA?iYvitBlhF^eWo7lAs=nYf=N0Bab0C+jv z+5AeZVPmfL*i5s_=mJ?DfQ3&SDjK+25Gw1hU?0qBM~6PZ40aF4Q(TRn`nAfPGiroA z0bPV;AU#7GRLP4zPlDy_yh6&~9-e(%{|UQd*<41x!282sbA7dATDKDbnZ9fm)NH^L z5_Lpev-5(}(qWq?@C>8kOwI|4-f%D`qr<$orm;#NV$C1mhT;S&q9i4sun%C5A}BG= z2QvByWYR-;qW=(J?*z#OjvK#I(-1^>!$=ePHmZKOUeY7Xtj44ezz2FQZjS0WN)^LU z=3VqWGBKHASXH+n@CTe3rhj{5XWvDdtL>|7%3_5}Z3}d6G{lFOD=Jc538rm;WF%M$ z@46;ormh_XHYP@(}L`-J9F6t2#{5%27ufWyDpg zhQjMz$-8kZ4Fw8<+GnNfXG82po2%;p#qK;Ze^TY|J<11B-|c0PU~qeUn1(%3p`?{8 zk1l1dB)DWGMOyg%HGY#so?nB>xxqVO2F%pMt*!%Vk`IE4oUt~DnScEufMbim#J`=q z={G$jd`KqpK0L`&yevm#WVE%XY)vP?5+lHpz-ii@=|uRhLr$OcQqA9@l&qO6VrI7X zj2H8>Q0o9$FShyn+!j(R>$h*egI7rd%f(D6A1&)ijcY)!fza;8L783@$oOjN8vl#= z6dIngiooqfgzDNhD$~Cv7lX+QAlhdg86`ruB*(b5m9x-g3Y806RWzq6B!b8V5-Oc) zpG^tHiVNV(e!O9hNV|@GMV;3QRS|N!yjHFNK@qkgl%`UklwpZv(HXoSsykh)@|fzp zN(IQxZ3gi@%Co$GuK2p`I)EG7Rm^$cR6}8l4gH^lILM4 z0v{U^vG_cRO|aSjraM+HIWg*$>A!j>3_qzjLvO$+48o1Ua=_&bzx6tE3DGlcEkdIo z)6h+x1z!h8QBWFfL$?#T1-;2~WILn!m)1VaX#qaYlhUb5n$p zGj)zLA`a);Fli;roIXQgy1>pA00~-&8svgSPF||Q&?mtjaYFC|fJ0y=3J2VSAC?*m z#dM%t%AWt1Uhr^{nUeY^8iOuKD(Nd1>{*PWl3xz*(E=4WBAapH=Erws60fjtk59vNQXO0>2uyr0q5rh36t;~(< zhj_4A@BxG`q>EhPg{JXa++|so>@0fJu7kOE%t>w6aVEFmC{%zxM6rgrC(>x?oGwYr z9zCVhGo|&b;<-{u)Qo{Yow#;~O@XF`P`Xyjf>(J{^JJ^gD0l6*B`$zEsw&jA{I#DSIf?M&LsM+!M^3>N+{Hg4}gvh-FEAA21KJ~W( z&XZ-U7|kI=W2JtEm{9vMkx3!c8#fGL*=3P4y0s&AcjnsQnmK<`9l({3#UqM-UX@3X z>ft%=kN{~aILL|=6-M+j$O#T>=)y#F{+dk)-~C#3oT#9D8HsoK*b(Nk4I0r-TBF%v z)CYaiab8girsXE|IiL*oMz{Q@tXg;exAiKx&N)gO3dwPa)WPJ0%}jXBGm@#7#LMWm zVJal1<;52DRpu0Hn$4jgt4eF<^>8E(^i8^E1>{hRD`VWJt&@Mx3o!VY_Lg~bXk#w? zm~jZiKP82ASJKX|p{{dc?CIOqlBQ39kSK^|jMk<$%+E6GVD!y`_IPY%V?}xs95Sdk zCa3u9jCru)u7A6&215}0H{%{Sl4mJDI5O)kw~NVwwkGyVT@(t0x{ zI@CxgQ1O74+8{^LOEH*RD6iB=+1MmIl!mG`2B~2L+Y~y@P>6+Gu`wU8vFj&^Jf)D& zka0lG$N&}mGr9-@Pm3(*$bFTms=d~x05GerY4!Z0whF6f3%U0dyQv$Q`TJ<8+s!K^ zdIumU5b$%J;UqIU!YjoGhGfAWXf`3GTy>rfX84ukHk+@ak>yt?2=fXRgAIh1A*;#W*~NvWd!cr4qbi?P2owsR_Hc$+0iLrsh=8C6@BZtwSDtHa z1?MPhFg9~_+;5BOu|s>^m#lCB_Mgc+`Q1wl$o#+<+~~84rY9YNGmG)E&8hWLaB=aF z+2VZXc{2tk6;sr;!?peNHh@UaJ?3atS^|X2?Cd-*@mu%#^8w@d*hK82_t@-w)K-ucdh+qll)rUZiMdIdgXmw&D%LBV^e7PEI ziElo>O+wd~?v)7Nv%X2GdP{7kW|p_i`IpI?y09<1^Ofs3Lr0+S8_kwdQY75FuY~6w z%9h9#RC5Quj5~X*OtRQk8$XxDhD>5UrKHJKMoWZisFbAKafWtollo}4 zjfD#Pw(Al*Z_kX>DYKA}R7mtf&Oc?Gk(^tR`23xakFgKo)LtML*EL3$MowpjYf5I$ zRfBw+HpS&!mkd2?k#TN*C7{MjfGFItFrt6u^bQQtnCeRkr2p7vGs8ShZM_t$pi)*!M!CUh^FnKBay-$kAA;JGXMg{jCkO&{7mfB0D`(}?6jhLr`6W{k-CX;sSpA<6A+qahvWleb^;f$cjmFcJ z;>60H`cHE>^=W7wmfL#*gfbftSP_%wWJ{KV&Ksi^3ymE#2K;Hc3Bgk6oQb2WSJR)g ztiMrGzzkCdUeLB@136?5*U<|06x}1^5Rjdq6x)qA(f}zY=w=Hi@9WS<)t!-N$_CnH zsjbH#zKoavjxn9tI9vfZ(a8rluQY0ITxu`hv>j|lOLccGaG-1N-khr+Bw-+=X;lzF z#wsImgLh(}>6C66Bg6n{3<0b#Of3eAbfEi7OUV=upRhKmUuLjsS&63 zRQ{A_>Iie?Um5Eu^1VZ@dm+M#-~`vRpg>V1^P>L&kh|sQJ8WH?5{uF9A1H0V)#mrE z;qu@%!v?3Fs$vxsMg$k7Xi+RMmY@Sh6+C(3Fkhqb}kKuHnnU zsgh(c1616GqiN+)0RwHfqzdHjv8Ou1t8g*|VLTD!as(@44-{GQaeaRlS=nJosL0HO zxDZ`;jA18unPt; z0+3(?xkEd1LtGC~?c|gJ|DHd*3OTsr^n1)aE+q^mcLGxZ=!Je-_4_vKU73BN?|wV@ z`TAY|b9;LHoHNL{W^{M+$A@=cmu~_Rq;)z2xq*i19vobC$N9ys+Xh;NQPihh>U1-6 z4SWGt0rTDbGs-iWE3^&PGKhk$4?4x}^Nj6=1zcm{w0kJGie5G2D8@}?F{UdidiD^}P3U z2iNlr=I0&7XL}f@cRwoh1^6)+4mW_^d~merA!Oi7HgvG!+SrOHyK(7T2x3AdOe~3B=zxr?qQj6d z85ODoc@Xg&IPcl}a8q#pD2f@&-sb;?wDG*{n{ z`wXaanS~E$6Nk>6wAnl+m^7Ke0OMpu2I+%x;#$qUXcWwL>-NHvqBy#_i)VZG1L|S>qHXoLC3j*H z@$A%WTSNv<_$S}G*6*|Y?5O_op|ll9P!DNP5DGWl)oVrNh~X5{8A6$1t{2N+p`&Z zRijDPyhveUvY8+;r@D~JeL6l-vj zwJ98?26Os2q3wcM!6E1kNhID#nvND5oT@ruE?vWXI9paE==GYOguYwU2PosjNpd`>TU z5%S4ve~E9rlsk=qp*#6i5mgVel#?*0HpO+%Docn{s7?bj6m}k3-ksEF5?F5+FE!&| zH3eeFAYTj9tdMLzLOh3E6Ve|fLj%n>cq`KM%>3o5PY;=Wnn}Of01(c8+U3srjhbc1H!F&$he7IvJv1Uj_9KRybTBGd4(-c(5 zuuhp|Vx55i3>)(3FLgY)pHv3AO(G;D1A`jQpQQbm!tD{aN`Hr{s*MGUYF$U|ZRk8r zP$bma!deiXv_Le#v!$aRuy--9FV<=$6nohVD;64tt%W)gfhZt$1Rrb(^^+^dI3o#h zH|ooVggRT+rHzY9<}KH}doZ&?r3W3CayR6A(RyC`M61HDN{xh6NtoS|3#)i;o@gev z*4wLTSUpXigcAp4`8LX~xQ#rLkxdHKR6u`M*JQ(O1PC9`^POuD2XxE5TC= ztf22M_{)tx_{(iqMaZ4GEYfyv=3KXLn6*6iZf>4lL-Eq~Ojb@0&h}nku&LBG?ki3j z+V~-qwI>G0$p*h#0>1A!PVfudlHN9FV*tgk4xO>5y-xfF?xxipS%@p#9NA|0MCQq}Bw|%woiAH<0 z)IbwG{SqtVSrOHO6CCfasydhhNAGJO)qpmPSR#k47iPd%7T)R6v3y%7%d)DC!^I)>2 z|H>yCyTEloO8=R?WuO*CHl!6Z5?$JI-Cm(6nPcxMW7yDoQWvAWNno7S2t9wI*il68 zXif)hCSa!0#AYg-%;51cc#QHB6A?NfNA)Ii^VVd;MC;~OjVWe=;C0>eMpvQz+Jf9M zWvH>t5mCv~jTMh(C3JIUcXLh7ntE(^z{s)W+AFu3ICc>Ri9Xe7RRhhmq}!I9aKph8 zV~wSOu8;mC0pL|0J&;G1$P(NB$lNl6j%+ry%7yC&^;nyeVF&27sLWX{5#_}%Qo9N>~vX_n3UZO zLUQq7lhEvIo9Ga|jn+%u}`i&rD_tgb83L?jECD)9Ntn6Tq#S z-RSeA_@kn4XyMBRW07koJ_HHM3^V#Om1Io-04G+^Ya6rV@QzKHM;lpDVoUqKxcbVd zHlw9oiWG`F#l6KT#ofJFDeh9--GdYfF2%J#p}4z4aCdhr?ixaFdd_#(y5If#CMz?~ zGqQKy*?VTZhn2%j=9yz(1w2(lEMD2!Wb?_ruA=?W*mEH?rzfR6tOH`d9N-PEd;r)( zUN4}RgQAcp!9&7y>bC~#N$S4GRzAP{3BL$;G@9yD9$Sm_6fbAOX2f%SL=8P<`K!A3 zeEi@CZfvyrifb9RK(=xQ(MPPo;lgc$e0H%rWdh40gT?- z5zihrxv~r)Vj#>(($xy4{n9u5lxHTp(JJ>cs0fknnA18a>5R3RT(9BAtUN8eU!khp zedoMfNn#e-XnSsQUH2sQXL`NfmG86uM`N-~8#Mda4%IGBIERE~nr-jFeat87z8rRf zg;108&uk>6`>2@n_L(-j=q;H<`O@b{sY7H@&Ox9NgxqE0Hyry16%^$Se3L8z7JE#) zcEn{p@D#OR*Cu`7$^p*4(4yl;;#<=4GJ~&%u_0ZvQ1nCrK=GzHxe{XxZFos6TUCS} zU9dcfthCRt7!LKV9i2A*CMeA_{xq?!hQcQp+*8@MZdcD`sb-=m{|-ZA^2mx7K}PN? zHNRMY-5;ukd}SJxRxyFx$aqrb^q7N~PgSm|58a+Vw8t*3xJ}iZ5(G3EK7?i$JL8@Q z<71}L%i4V-L4RwCF6xyWwGsZP*e}3&X8Nz_c|iR$OE&^)Gi$z`b}lamH27ZnJj~af zWv~CMmTrh?WsFkyHK>|jEE$3{#19&IoPQ-S^@#cXt z^C9jLyn6t_1Q>B$P~JQ^Ljnv!@a8TqYDBkO(_wgpJme9%t+ZCWW6swvYi20$CpZ%jY;G;_kkw zDZYj9V@a(pX_uCT*bUx!^S?vg%TXrT?vl_iTyG-N4joU=n@`;z_3M3N>Qo?; z57DfQk(4@ia2z9G97Y*i+?(5ot!pfaRsF_4 zaJkM;4-~%jsa{P+3k`k^4ktPN#tIr z5kuZSsf9Yr-paGTte(L!Y~DX;{|QxQK9q=-mG^gPb)Gz)nPky>Zjhs4iBeE)PCC1q zs9CWkzUx|4u}jZ(mo^s}FiBVM*D@Y@)HC*=r8pAW6EcDN(a^T+a{6>$$v%3$U5)vH zl;GJMf9(D6<|xz9nU?#L#XCE!)#Q)?3Wy+^%lKs!B$qxIEJa9%rmh)fK*u>tv{T{;YD%8G`ayY-A z={ludEJx`pG%+3^tHZ_ORDAl`O<=L)w(whOcn_145K%EW*5zk)S`&V=L6@jl>H-wB zI+<5dAhjm=fgRjCbjD#w6_F+b$PNy*e(T8Y?)Wf&dg@?Rx0iXUiNEM5saQ0;vw_h$ z8w1qB!c3q&+f}XNShL``OZSkPB#L3{DaNW;lB<;FywlJ0ICh@8+3}$T^^a#rmX^n*UnMD^ZzM6#Q0%m6&e{uF_|2MdgE^ zkIr7IvWj1&q&lT;Wp-WxZ2}R+x#9iX0ADFatE~>h7gJ`mZ@%DX#$(Gv=Zs2;b>c3D zKgH*_^hv`n`<{BF0EorS1|SA=pb7f03UT`6_wZWzzy*BzO}y`teL(T0flGc4d4v*W zQh~qPYID%T{Z3z?GCufsE>QyqO{&TCSEHv5bCi}0FXWsq-9}!s+3#Zp${qU?Ms6Ou zIMZlh!fi`k(yZKBhY#z+*HstnT{tC<39I|>yEgmSH&%KL>>tU|-X*9i(a%EtjhZ|4 zity09N>F!qtQ#n{vzh4m2S&Hw4{c32Mt&k#u`JTVo+$Yv)2Yn zVfZ|A3$F`v#$4$n7D^}3okp@wy!Xo&{fz-FWy);as8}hDwa9;}Iue+B8Lwo3AKy$> zdLPhybkZ7jU$?)!+*--Qa`4DI%DYDxUr>(#cG5jHX}HN65^D*)q1=2Wf(U2h z4eAAI7?9bIPokLSIz*b!bMxkH^-D2k>OBic5PGWywjWHKU;g%q?liY00vqq zClimKFR1E+j3np%8p<+X5GVt!b*I!^`VO8g(d& zEtZU)sNbh@+d6uW>!3m%nI01~%GSeDyT3eBk4N3MZdJL5XEe37{``!qOLjawNB^NY zVDLf8d0;D3{C+s;BY>%NUU0$giAe=5rO`gy=t6hHuW|d#KAAfj{V3ybA4Anv$FTNi z@=N7XoyPbZ@1mM_>p|Jy?DTH1x&>Cb+dJW6>bZ6G2jJzj7=?#DxLAr!Wy`6h!YH{;SzT+_QSUpbajZHYcSQ3RjO!8GKy1ji-F!ph0rX^fk9 z8dt#+wxk}y{~ViE6vJ=|?B$wP!1pKrK>9u=_~G)|efhLoVs*8ip70H~3NJdc08(gR zXvBGjePT%_aSJu_*YJ`Vj1nrA@#XjwxSO{DO@;FXKl&%sJ@2Uq&3b=maMAXLJ^sP% zcUqKv-z9l?&Vx*kxE?<4-`#@fyPS>LM*mVy&GPrqs;+viilm_{=(v~8v^r3 zi>3y8*7+VD4tX+R{xHmJYwyT*+nT+ctH=NSi3QbA*C4g0)8FSxTE}gu_fI7?Uwd87 z<4O_|I0h%t48WnF`2v0yV0_hm#v_vk?c>lYCa!ogLCQuSW5T4tya_i~Vn zZx;pCQonffyIjvq!@5xyMEU;B9CM4W>KRkk!too8w`iAA*fj3+Op@nZR-@r%&z;{5 z!pkhP{M>h*H`lGApq%y&9M+hiDvCQ199y|@oXApwqmQd5`_p`XP9d7tf$*Z~*4#+R zQIuabsw@Q7?~i3Zx4|j=%?fp$GL0JkCE?Ot*kopn5dd}MK0^Li2H1%#g5p8;;!8w1lB(df-4)Y%GhL)#hbmSgmo_$O))Gwsk{dZ&TE7ceVB zlrUvk`ZCwujTy$VNCd((Gg3ar*mVU5KJv2 z&t;}&e){czmtIV`=^#>0o-LKDg+0rf#aI7_Z{SHxPMZOXAtC#LVimJmt6Qr)w+At~ zHzjqv^9a6yp{#HyQui^xjXHJ!ztP0f=#jV^Wt`ZLexZ1nvPQ;EK;hv%|5)ZA8rWAu4o*de|vzp>>bny6M8=Rk&{ToBXFAx4F*PyO= z+DSJ>v>o%PM*q`?$KpsFGuM_kXg}iC{O*3jYx-pJXcO_P{jO0PP-{Q&M`P5s`e+oL z2p5O3H6_51Wge~?Sb0krQtYKsNh(@2gG5<0p(G7fp!d_(M32d=C?7>cDC19Cv_P}^ z$P=!H5BcN`Sts(%SukQ_V;3Fs+q)xs)oZs;?P6Kl zT-m?D9`ZC=(`@s3Kb^HSGHNi|MP{12MKWcZj?w!@3lN3lVNmRrLzg#MJiAD(ZTd>K zy_(zkvS|cLMT88bMnxTtme}#JAa7+T8XILC*}h!d6@RkR{y+is?4q5~bn^N0yVMZr zL^rkk3FFNBuQYxTh}XSgbw{KbhivNny#@754NnwGCXJcc&>Y!&8_uBa2{LRejyHSv z={}LW*sLL)h8hQhkKcpW#t^Zz>$@&K(V&JGb=y=cBmBKqj`@&QEra}pE5mpMyW+M1 z^6qmelWI!wz;Y4t=HM#>&&}I$>&yr-IQ~D;xyyQg*-`piWoa(o)TyBcw<~j zr9`*=IFgt({nIL+Ht^svS(hS;s@g@Qt||>$%KU99wul_r#?nf{)9ZKBDGqUbyMuo- zUzKg9Yh>0B#t9?!IX-2rn;Z!$IWtYBN~_10RbJA+@iQ_RsC;5t7nhn?{S)+8msqaz z`5cV4md#S5Tf&?y+j?*KDsm_-b5?2V9oU<`8>tVv&zo(D3x4_Quk z)t~lMb-w5W%4+>6L+`j~mvQt!NZ1dpBHBg#MRfElDKra(Z}3es58z60D%ej)ijmgw zX<$Y|MyU|ZC<1&^c&I%iffZ%jG}%)c2~PgH)ZwHMKKk)FD^c$Q-lf8Asd8%%2co=5 z%%xd(4P@ylQ;a^+i7(%xuew6^mQU&=cWIi8zN;(*;0AR`yaY58R4t0>XyzJOKmK~y zDks2yLN0r44WGT_C%ca}F$jywM)1|U6dqeL#)whJ+I|LD{bF^S2{$!&MkkmllDAXf}napNjV}3G7G~Qpy(|J#7yF(DpAg+g3^@i@eGsCOR zL-n(_K$Psy30zbp?gW*;4^w)iv_nYN5v>BhHRM0!X