diff --git a/salt/common/tools/sbin/so-image-common b/salt/common/tools/sbin/so-image-common index e8f604681..833b9a7d8 100755 --- a/salt/common/tools/sbin/so-image-common +++ b/salt/common/tools/sbin/so-image-common @@ -164,8 +164,8 @@ update_docker_containers() { # Pull down the trusted docker image run_check_net_err \ "docker pull $CONTAINER_REGISTRY/$IMAGEREPO/$image" \ - "Could not pull $image, please ensure connectivity to $CONTAINER_REGISTRY" >> "$LOG_FILE" 2>&1 - + "Could not pull $image, please ensure connectivity to $CONTAINER_REGISTRY" >> "$LOG_FILE" 2>&1 + # Get signature run_check_net_err \ "curl --retry 5 --retry-delay 60 -A '$CURLTYPE/$CURRENTVERSION/$OS/$(uname -r)' $sig_url --output $SIGNPATH/$image.sig" \ @@ -189,11 +189,24 @@ update_docker_containers() { HOSTNAME=$(hostname) fi docker tag $CONTAINER_REGISTRY/$IMAGEREPO/$image $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1 || { - echo "Unable to tag $image" >> "$LOG_FILE" 2>&1 + echo "Unable to tag $image" >> "$LOG_FILE" 2>&1 exit 1 } - docker push $HOSTNAME:5000/$IMAGEREPO/$image >> "$LOG_FILE" 2>&1 || { - echo "Unable to push $image" >> "$LOG_FILE" 2>&1 + # Push to the embedded registry via a registry-to-registry copy. Avoids + # `docker push`, which on Docker 29.x with the containerd image store + # represents freshly-pulled images as an index whose layer content + # isn't reachable through the push path. The local `docker tag` above + # is preserved so so-image-pull's `:5000` existence check still works. + # Pin to the digest already gpg-verified above so we copy exactly the + # bytes we approved. + local VERIFIED_REF + VERIFIED_REF=$(echo "$DOCKERINSPECT" | jq -r ".[0].RepoDigests[] | select(. | contains(\"$CONTAINER_REGISTRY\"))" | head -n 1) + if [ -z "$VERIFIED_REF" ] || [ "$VERIFIED_REF" = "null" ]; then + echo "Unable to determine verified digest for $image" >> "$LOG_FILE" 2>&1 + exit 1 + fi + docker buildx imagetools create --tag $HOSTNAME:5000/$IMAGEREPO/$image "$VERIFIED_REF" >> "$LOG_FILE" 2>&1 || { + echo "Unable to copy $image to embedded registry" >> "$LOG_FILE" 2>&1 exit 1 } fi diff --git a/salt/common/tools/sbin/so-log-check b/salt/common/tools/sbin/so-log-check index f355e1bfe..a3d9c51d0 100755 --- a/salt/common/tools/sbin/so-log-check +++ b/salt/common/tools/sbin/so-log-check @@ -227,7 +227,7 @@ if [[ $EXCLUDE_KNOWN_ERRORS == 'Y' ]]; then EXCLUDED_ERRORS="$EXCLUDED_ERRORS|from NIC checksum offloading" # zeek reporter.log EXCLUDED_ERRORS="$EXCLUDED_ERRORS|marked for removal" # docker container getting recycled EXCLUDED_ERRORS="$EXCLUDED_ERRORS|tcp 127.0.0.1:6791: bind: address already in use" # so-elastic-fleet agent restarting. Seen starting w/ 8.18.8 https://github.com/elastic/kibana/issues/201459 - EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint|armis|o365_metrics|microsoft_sentinel|snyk).*user so_kibana lacks the required permissions \[(logs|metrics)-\1" # Known issue with integrations starting transform jobs that are explicitly not allowed to start as a system user. (installed as so_elastic / so_kibana) + EXCLUDED_ERRORS="$EXCLUDED_ERRORS|TransformTask\] \[logs-(tychon|aws_billing|microsoft_defender_endpoint|armis|o365_metrics|microsoft_sentinel|snyk|cyera|island_browser).*user so_kibana lacks the required permissions \[(logs|metrics)-\1" # Known issue with integrations starting transform jobs that are explicitly not allowed to start as a system user. This error should not be seen on fresh ES 9.3.3 installs or after SO 3.1.0 with soups addition of check_transform_health_and_reauthorize() EXCLUDED_ERRORS="$EXCLUDED_ERRORS|manifest unknown" # appears in so-dockerregistry log for so-tcpreplay following docker upgrade to 29.2.1-1 fi diff --git a/salt/elastic-fleet-package-registry/enabled.sls b/salt/elastic-fleet-package-registry/enabled.sls index e2833f5be..a5a8b9d22 100644 --- a/salt/elastic-fleet-package-registry/enabled.sls +++ b/salt/elastic-fleet-package-registry/enabled.sls @@ -51,6 +51,16 @@ so-elastic-fleet-package-registry: - {{ ULIMIT.name }}={{ ULIMIT.soft }}:{{ ULIMIT.hard }} {% endfor %} {% endif %} + +wait_for_so-elastic-fleet-package-registry: + http.wait_for_successful_query: + - name: "http://localhost:8080/health" + - status: 200 + - wait_for: 300 + - request_interval: 15 + - require: + - docker_container: so-elastic-fleet-package-registry + delete_so-elastic-fleet-package-registry_so-status.disabled: file.uncomment: - name: /opt/so/conf/so-status/so-status.conf diff --git a/salt/elasticfleet/manager.sls b/salt/elasticfleet/manager.sls index 00fead9cf..1728f2010 100644 --- a/salt/elasticfleet/manager.sls +++ b/salt/elasticfleet/manager.sls @@ -18,17 +18,6 @@ so-elastic-fleet-auto-configure-logstash-outputs: - retry: attempts: 4 interval: 30 - -{# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #} -so-elastic-fleet-auto-configure-logstash-outputs-force: - cmd.run: - - name: /usr/sbin/so-elastic-fleet-outputs-update --certs - - retry: - attempts: 4 - interval: 30 - - onchanges: - - x509: etc_elasticfleet_logstash_crt - - x509: elasticfleet_kafka_crt {% endif %} # If enabled, automatically update Fleet Server URLs & ES Connection diff --git a/salt/elasticfleet/tools/sbin/so-elastic-fleet-common b/salt/elasticfleet/tools/sbin/so-elastic-fleet-common index 92532082a..91fa787f2 100644 --- a/salt/elasticfleet/tools/sbin/so-elastic-fleet-common +++ b/salt/elasticfleet/tools/sbin/so-elastic-fleet-common @@ -240,7 +240,7 @@ elastic_fleet_policy_create() { --arg DESC "$DESC" \ --arg TIMEOUT $TIMEOUT \ --arg FLEETSERVER "$FLEETSERVER" \ - '{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER}' + '{"name": $NAME,"id":$NAME,"description":$DESC,"namespace":"default","monitoring_enabled":["logs"],"inactivity_timeout":$TIMEOUT,"has_fleet_server":$FLEETSERVER,"advanced_settings":{"agent_logging_level": "warning"}}' ) # Create Fleet Policy if ! fleet_api "agent_policies" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index f045bf753..8630799d8 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -235,6 +235,16 @@ function update_kafka_outputs() { {% endif %} +# Compare the current Elastic Fleet certificate against what is on disk +POLICY_CERT_SHA=$(jq -r '.item.ssl.certificate' <<< $RAW_JSON | openssl x509 -noout -sha256 -fingerprint) +DISK_CERT_SHA=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt -noout -sha256 -fingerprint) + +if [[ "$POLICY_CERT_SHA" != "$DISK_CERT_SHA" ]]; then + printf "Certificate on disk doesn't match certificate in policy - forcing update\n" + UPDATE_CERTS=true + FORCE_UPDATE=true +fi + # Sort & hash the new list of Logstash Outputs NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}') diff --git a/salt/firewall/defaults.yaml b/salt/firewall/defaults.yaml index e9c82401d..5c1229787 100644 --- a/salt/firewall/defaults.yaml +++ b/salt/firewall/defaults.yaml @@ -398,6 +398,7 @@ firewall: - elasticsearch_rest - docker_registry - influxdb + - postgres - sensoroni - yum - beats_5044 @@ -410,6 +411,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - beats_5044 @@ -427,6 +429,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - sensoroni searchnode: portgroups: @@ -437,6 +440,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -450,6 +454,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -459,6 +464,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -492,6 +498,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - elastic_agent_control @@ -502,6 +509,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -610,6 +618,7 @@ firewall: - elasticsearch_rest - docker_registry - influxdb + - postgres - sensoroni - yum - beats_5044 @@ -622,6 +631,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - beats_5044 @@ -639,6 +649,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - sensoroni searchnode: portgroups: @@ -649,6 +660,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -662,6 +674,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -671,6 +684,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -702,6 +716,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - elastic_agent_control @@ -712,6 +727,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -820,6 +836,7 @@ firewall: - elasticsearch_rest - docker_registry - influxdb + - postgres - sensoroni - yum - beats_5044 @@ -832,6 +849,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - beats_5044 @@ -849,6 +867,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - sensoroni searchnode: portgroups: @@ -858,6 +877,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -870,6 +890,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -879,6 +900,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -912,6 +934,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - elastic_agent_control @@ -922,6 +945,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -1040,6 +1064,7 @@ firewall: - elasticsearch_rest - docker_registry - influxdb + - postgres - sensoroni - yum - beats_5044 @@ -1052,6 +1077,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - beats_5044 @@ -1063,6 +1089,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - beats_5044 @@ -1074,6 +1101,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - redis @@ -1083,6 +1111,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - redis @@ -1093,6 +1122,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -1129,6 +1159,7 @@ firewall: portgroups: - docker_registry - influxdb + - postgres - sensoroni - yum - elastic_agent_control @@ -1139,6 +1170,7 @@ firewall: - yum - docker_registry - influxdb + - postgres - elastic_agent_control - elastic_agent_data - elastic_agent_update @@ -1482,6 +1514,7 @@ firewall: - kibana - redis - influxdb + - postgres - elasticsearch_rest - elasticsearch_node - elastic_agent_control diff --git a/salt/firewall/map.jinja b/salt/firewall/map.jinja index 61f8215b8..58d8c189d 100644 --- a/salt/firewall/map.jinja +++ b/salt/firewall/map.jinja @@ -1,6 +1,5 @@ {% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKERMERGED %} -{% from 'telegraf/map.jinja' import TELEGRAFMERGED %} {% import_yaml 'firewall/defaults.yaml' as FIREWALL_DEFAULT %} {# add our ip to self #} @@ -56,16 +55,4 @@ {% endif %} -{# Open Postgres (5432) to minion hostgroups when Telegraf is configured to write to Postgres #} -{% set TG_OUT = TELEGRAFMERGED.output | upper %} -{% if TG_OUT in ['POSTGRES', 'BOTH'] %} -{% if role.startswith('manager') or role == 'standalone' or role == 'eval' %} -{% for r in ['sensor', 'searchnode', 'heavynode', 'receiver', 'fleet', 'idh', 'desktop', 'import'] %} -{% if FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r] is defined %} -{% do FIREWALL_DEFAULT.firewall.role[role].chain["DOCKER-USER"].hostgroups[r].portgroups.append('postgres') %} -{% endif %} -{% endfor %} -{% endif %} -{% endif %} - {% set FIREWALL_MERGED = salt['pillar.get']('firewall', FIREWALL_DEFAULT.firewall, merge=True) %} diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index a838e3275..c0f8b61c1 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -485,6 +485,130 @@ elasticsearch_backup_index_templates() { tar -czf /nsm/backup/3.0.0_elasticsearch_index_templates.tar.gz -C /opt/so/conf/elasticsearch/templates/index/ . } +elasticfleet_set_agent_logging_level_warn() { + . /usr/sbin/so-elastic-fleet-common + + local current_agent_policies + if ! current_agent_policies=$(fleet_api "agent_policies?perPage=1000"); then + echo "Warning: unable to retrieve Fleet agent policies" + return 0 + fi + + # Only updating policies that are within Security Onion defaults and do not already have any user configured advanced_settings. + local policies_to_update + policies_to_update=$(jq -c ' + .items[] + | select(has("advanced_settings") | not) + | select( + .id == "so-grid-nodes_general" + or .id == "so-grid-nodes_heavy" + or .id == "endpoints-initial" + or (.id | startswith("FleetServer_")) + ) + ' <<< "$current_agent_policies") + + if [[ -z "$policies_to_update" ]]; then + return 0 + fi + + while IFS= read -r policy; do + [[ -z "$policy" ]] && continue + + local policy_id policy_name policy_namespace + policy_id=$(jq -r '.id' <<< "$policy") + policy_name=$(jq -r '.name' <<< "$policy") + policy_namespace=$(jq -r '.namespace' <<< "$policy") + + local update_logging + update_logging=$(jq -n \ + --arg name "$policy_name" \ + --arg namespace "$policy_namespace" \ + '{name: $name, namespace: $namespace, advanced_settings: {agent_logging_level: "warning"}}' + ) + + echo "Setting elastic agent_logging_level to warning on policy '$policy_name' ($policy_id)." + if ! fleet_api "agent_policies/$policy_id" -XPUT -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$update_logging" >/dev/null; then + echo " warning: failed to update agent policy '$policy_name' ($policy_id)" >&2 + fi + done <<< "$policies_to_update" +} + +check_transform_health_and_reauthorize() { + . /usr/sbin/so-elastic-fleet-common + + echo "Checking integration transform jobs for unhealthy / unauthorized status..." + + local transforms_doc stats_doc installed_doc + if ! transforms_doc=$(so-elasticsearch-query "_transform/_all?size=1000" --fail --retry 3 --retry-delay 5 2>/dev/null); then + echo "Unable to query for transform jobs, skipping reauthorization." + return 0 + fi + if ! stats_doc=$(so-elasticsearch-query "_transform/_all/_stats?size=1000" --fail --retry 3 --retry-delay 5 2>/dev/null); then + echo "Unable to query for transform job stats, skipping reauthorization." + return 0 + fi + if ! installed_doc=$(fleet_api "epm/packages/installed?perPage=500"); then + echo "Unable to list installed Fleet packages, skipping reauthorization." + return 0 + fi + + # Get all transforms that meet the following + # - unhealthy (any non-green health status) + # - metadata has run_as_kibana_system: false (this fix is specific to transforms started prior to Kibana 9.3.3) + # - are not orphaned (integration is not somehow missing/corrupt/uninstalled) + local unhealthy_transforms + unhealthy_transforms=$(jq -c -n \ + --argjson t "$transforms_doc" \ + --argjson s "$stats_doc" \ + --argjson i "$installed_doc" ' + ($i.items | map({key: .name, value: .version}) | from_entries) as $pkg_ver + | ($s.transforms | map({key: .id, value: .health.status}) | from_entries) as $health + | [ $t.transforms[] + | select(._meta.run_as_kibana_system == false) + | select(($health[.id] // "unknown") != "green") + | {id, pkg: ._meta.package.name, ver: ($pkg_ver[._meta.package.name])} + ] + | if length == 0 then empty else . end + | (map(select(.ver == null)) | map({orphan: .id})[]), + (map(select(.ver != null)) + | group_by(.pkg) + | map({pkg: .[0].pkg, ver: .[0].ver, transformIds: map(.id)})[]) + ') + + if [[ -z "$unhealthy_transforms" ]]; then + return 0 + fi + + local unhealthy_count + unhealthy_count=$(jq -s '[.[].transformIds? // empty | .[]] | length' <<< "$unhealthy_transforms") + echo "Found $unhealthy_count transform(s) needing reauthorization." + + local total_failures=0 + while IFS= read -r transform; do + [[ -z "$transform" ]] && continue + if jq -e 'has("orphan")' <<< "$transform" >/dev/null 2>&1; then + echo "Skipping transform not owned by any installed Fleet package: $(jq -r '.orphan' <<< "$transform")" + continue + fi + + local pkg ver body resp + pkg=$(jq -r '.pkg' <<< "$transform") + ver=$(jq -r '.ver' <<< "$transform") + body=$(jq -c '{transforms: (.transformIds | map({transformId: .}))}' <<< "$transform") + + echo "Reauthorizing transform(s) for ${pkg}-${ver}..." + resp=$(fleet_api "epm/packages/${pkg}/${ver}/transforms/authorize" \ + -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' \ + -d "$body") || { echo "Could not reauthorize transform(s) for ${pkg}-${ver}"; continue; } + + (( total_failures += $(jq 'map(select(.success != true)) | length' <<< "$resp" 2>/dev/null) )) + done <<< "$unhealthy_transforms" + + if [[ "$total_failures" -gt 0 ]]; then + echo "Some transform(s) failed to reauthorize." + fi +} + ensure_postgres_local_pillar() { # Postgres was added as a service after 3.0.0, so the new pillar/top.sls # references postgres.soc_postgres / postgres.adv_postgres unconditionally. @@ -553,6 +677,12 @@ post_to_3.1.0() { # file_roots of its own and --local would fail with "No matching sls found". salt-call state.apply postgres.telegraf_users queue=True || true + # Update default agent policies to use logging level warn. + elasticfleet_set_agent_logging_level_warn || true + + # Check for unhealthy / unauthorized integration transform jobs and attempt reauthorizations + check_transform_health_and_reauthorize || true + POSTVERSION=3.1.0 } diff --git a/salt/soc/defaults.map.jinja b/salt/soc/defaults.map.jinja index 00a9604f6..2821bb8e5 100644 --- a/salt/soc/defaults.map.jinja +++ b/salt/soc/defaults.map.jinja @@ -24,11 +24,6 @@ {% do SOCDEFAULTS.soc.config.server.modules.elastic.update({'username': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'password': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass}) %} -{% if GLOBALS.postgres is defined and GLOBALS.postgres.auth is defined %} -{% set PG_ADMIN_PASS = salt['pillar.get']('secrets:postgres_pass', '') %} -{% do SOCDEFAULTS.soc.config.server.modules.update({'postgres': {'hostUrl': GLOBALS.manager_ip, 'port': 5432, 'username': GLOBALS.postgres.auth.users.so_postgres_user.user, 'password': GLOBALS.postgres.auth.users.so_postgres_user.pass, 'adminUser': 'postgres', 'adminPassword': PG_ADMIN_PASS, 'dbname': 'securityonion', 'sslMode': 'require', 'assistantEnabled': true, 'esHostUrl': 'https://' ~ GLOBALS.manager_ip ~ ':9200', 'esUsername': GLOBALS.elasticsearch.auth.users.so_elastic_user.user, 'esPassword': GLOBALS.elasticsearch.auth.users.so_elastic_user.pass, 'esVerifyCert': false}}) %} -{% endif %} - {% do SOCDEFAULTS.soc.config.server.modules.influxdb.update({'hostUrl': 'https://' ~ GLOBALS.influxdb_host ~ ':8086'}) %} {% do SOCDEFAULTS.soc.config.server.modules.influxdb.update({'token': INFLUXDB_TOKEN}) %} {% for tool in SOCDEFAULTS.soc.config.server.client.tools %} diff --git a/salt/strelka/filecheck/filecheck b/salt/strelka/filecheck/filecheck index 758248083..35b47ce71 100644 --- a/salt/strelka/filecheck/filecheck +++ b/salt/strelka/filecheck/filecheck @@ -15,7 +15,7 @@ from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler with open("/opt/so/conf/strelka/filecheck.yaml", "r") as ymlfile: - cfg = yaml.load(ymlfile, Loader=yaml.Loader) + cfg = yaml.safe_load(ymlfile) extract_path = cfg["filecheck"]["extract_path"] historypath = cfg["filecheck"]["historypath"]