mirror of
https://github.com/Security-Onion-Solutions/securityonion.git
synced 2025-12-06 09:12:45 +01:00
250 lines
11 KiB
Plaintext
Executable File
250 lines
11 KiB
Plaintext
Executable File
|
|
#!/bin/bash
|
|
|
|
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
|
|
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
|
|
# this file except in compliance with the Elastic License 2.0.
|
|
{% from 'vars/globals.map.jinja' import GLOBALS %}
|
|
|
|
INTCA=/etc/pki/tls/certs/intca.crt
|
|
|
|
. /usr/sbin/so-common
|
|
. /usr/sbin/so-elastic-fleet-common
|
|
|
|
# Deleting Elastic Fleet data...
|
|
|
|
# Check to make sure that Elasticsearch is up & ready
|
|
RETURN_CODE=0
|
|
wait_for_web_response "https://localhost:9200/_cat/indices/.kibana*" "green open" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
|
RETURN_CODE=$?
|
|
|
|
if [[ "$RETURN_CODE" != "0" ]]; then
|
|
status "Elasticsearch not accessible, exiting Elastic Fleet setup..."
|
|
exit 1
|
|
fi
|
|
|
|
ALIASES=(.fleet-servers .fleet-policies-leader .fleet-policies .fleet-agents .fleet-artifacts .fleet-enrollment-api-keys .kibana_ingest)
|
|
for ALIAS in "${ALIASES[@]}"; do
|
|
# Get all concrete indices from alias
|
|
if INDXS_RAW=$(curl -sK /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/_resolve/index/${ALIAS}" --fail 2>/dev/null); then
|
|
INDXS=$(echo "$INDXS_RAW" | jq -r '.aliases[].indices[]')
|
|
# Delete all resolved indices
|
|
for INDX in ${INDXS}; do
|
|
status "Deleting $INDX"
|
|
curl -K /opt/so/conf/kibana/curl.config -s -k -L -H "Content-Type: application/json" "https://localhost:9200/${INDX}" -XDELETE
|
|
done
|
|
fi
|
|
done
|
|
|
|
# Restarting Kibana...
|
|
so-kibana-restart
|
|
|
|
# Check to make sure that Kibana API is up & ready
|
|
RETURN_CODE=0
|
|
wait_for_web_response "http://localhost:5601/api/fleet/settings" "fleet" 300 "curl -K /opt/so/conf/elasticsearch/curl.config"
|
|
RETURN_CODE=$?
|
|
|
|
if [[ "$RETURN_CODE" != "0" ]]; then
|
|
printf "Kibana API not accessible, exiting Elastic Fleet setup..."
|
|
exit 1
|
|
fi
|
|
|
|
printf "\n### Create ES Token ###\n"
|
|
if ESTOKEN_RAW=$(fleet_api "service_tokens" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
|
ESTOKEN=$(echo "$ESTOKEN_RAW" | jq -r .value)
|
|
else
|
|
echo -e "\nFailed to create ES token..."
|
|
exit 1
|
|
fi
|
|
|
|
### Create Outputs, Fleet Policy and Fleet URLs ###
|
|
# Create the Manager Elasticsearch Output first and set it as the default output
|
|
printf "\nAdd Manager Elasticsearch Output...\n"
|
|
ESCACRT=$(openssl x509 -in "$INTCA" -outform DER | sha256sum | cut -d' ' -f1 | tr '[:lower:]' '[:upper:]')
|
|
JSON_STRING=$(jq -n \
|
|
--arg ESCACRT "$ESCACRT" \
|
|
'{"name":"so-manager_elasticsearch","id":"so-manager_elasticsearch","type":"elasticsearch","hosts":["https://{{ GLOBALS.manager_ip }}:9200","https://{{ GLOBALS.manager }}:9200"],"is_default":false,"is_default_monitoring":false,"config_yaml":"","ca_trusted_fingerprint": $ESCACRT}')
|
|
|
|
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
|
echo -e "\nFailed to create so-elasticsearch_manager policy..."
|
|
exit 1
|
|
fi
|
|
printf "\n\n"
|
|
|
|
# so-manager_elasticsearch should exist and be disabled. Now update it before checking its the only default policy
|
|
MANAGER_OUTPUT_ENABLED=$(echo "$JSON_STRING" | jq 'del(.id) | .is_default = true | .is_default_monitoring = true')
|
|
if ! curl -sK /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_elasticsearch" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$MANAGER_OUTPUT_ENABLED"; then
|
|
echo -e "\n failed to update so-manager_elasticsearch"
|
|
exit 1
|
|
fi
|
|
|
|
# At this point there should only be two policies. fleet-default-output & so-manager_elasticsearch
|
|
status "Verifying so-manager_elasticsearch policy is configured as the current default"
|
|
|
|
# Grab the fleet-default-output policy instead of so-manager_elasticsearch, because a weird state can exist where both fleet-default-output & so-elasticsearch_manager can be set as the active default output for logs / metrics. Resulting in logs not ingesting on import/eval nodes
|
|
if DEFAULTPOLICY=$(fleet_api "outputs/fleet-default-output"); then
|
|
fleet_default=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default')
|
|
fleet_default_monitoring=$(echo "$DEFAULTPOLICY" | jq -er '.item.is_default_monitoring')
|
|
# Check that fleet-default-output isn't configured as a default for anything ( both variables return false )
|
|
if [[ $fleet_default == "false" ]] && [[ $fleet_default_monitoring == "false" ]]; then
|
|
echo -e "\nso-manager_elasticsearch is configured as the current default policy..."
|
|
else
|
|
echo -e "\nVerification of so-manager_elasticsearch policy failed... The default 'fleet-default-output' output is still active..."
|
|
exit 1
|
|
fi
|
|
else
|
|
# fleet-output-policy is created automatically by fleet when started. Should always exist on any installation type
|
|
echo -e "\nDefault fleet-default-output policy doesn't exist...\n"
|
|
exit 1
|
|
fi
|
|
|
|
# Create the Manager Fleet Server Host Agent Policy
|
|
# This has to be done while the Elasticsearch Output is set to the default Output
|
|
printf "Create Manager Fleet Server Policy...\n"
|
|
if ! elastic_fleet_policy_create "FleetServer_{{ GLOBALS.hostname }}" "Fleet Server - {{ GLOBALS.hostname }}" "false" "120"; then
|
|
echo -e "\n Failed to create Manager fleet server policy..."
|
|
exit 1
|
|
fi
|
|
|
|
# Modify the default integration policy to update the policy_id with the correct naming
|
|
UPDATED_INTEGRATION_POLICY=$(jq --arg policy_id "FleetServer_{{ GLOBALS.hostname }}" --arg name "fleet_server-{{ GLOBALS.hostname }}" '
|
|
.policy_id = $policy_id |
|
|
.name = $name' /opt/so/conf/elastic-fleet/integrations/fleet-server/fleet-server.json)
|
|
|
|
# Add the Fleet Server Integration to the new Fleet Policy
|
|
if ! elastic_fleet_integration_create "$UPDATED_INTEGRATION_POLICY"; then
|
|
echo -e "\nFailed to create Fleet server integration for Manager.."
|
|
exit 1
|
|
fi
|
|
|
|
# Now we can create the Logstash Output and set it to to be the default Output
|
|
printf "\n\nCreate Logstash Output Config if node is not an Import or Eval install\n"
|
|
{% if grains.role not in ['so-import', 'so-eval'] %}
|
|
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
|
|
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
|
|
LOGSTASHCA=$(openssl x509 -in $INTCA)
|
|
JSON_STRING=$( jq -n \
|
|
--arg LOGSTASHCRT "$LOGSTASHCRT" \
|
|
--arg LOGSTASHKEY "$LOGSTASHKEY" \
|
|
--arg LOGSTASHCA "$LOGSTASHCA" \
|
|
'{"name":"grid-logstash","is_default":true,"is_default_monitoring":true,"id":"so-manager_logstash","type":"logstash","hosts":["{{ GLOBALS.manager_ip }}:5055", "{{ GLOBALS.manager }}:5055"],"config_yaml":"","ssl":{"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets":{"ssl":{"key": $LOGSTASHKEY }},"proxy_id":null}'
|
|
)
|
|
if ! fleet_api "outputs" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
|
echo -e "\nFailed to create logstash fleet output"
|
|
exit 1
|
|
fi
|
|
printf "\n\n"
|
|
{%- endif %}
|
|
|
|
printf "\nCreate Kafka Output Config if node is not an Import or Eval install\n"
|
|
{% if grains.role not in ['so-import', 'so-eval'] %}
|
|
/usr/sbin/so-kafka-fleet-output-policy
|
|
{% endif %}
|
|
|
|
# Add Manager Hostname & URL Base to Fleet Host URLs
|
|
printf "\nAdd SO-Manager Fleet URL\n"
|
|
if [ "{{ GLOBALS.hostname }}" = "{{ GLOBALS.url_base }}" ]; then
|
|
JSON_STRING=$( jq -n '{"id":"grid-default","name":"grid-default","is_default":true,"host_urls":["https://{{ GLOBALS.url_base }}:8220"]}')
|
|
else
|
|
JSON_STRING=$( jq -n '{"id":"grid-default","name":"grid-default","is_default":true,"host_urls":["https://{{ GLOBALS.url_base }}:8220", "https://{{ GLOBALS.hostname }}:8220"]}')
|
|
fi
|
|
|
|
## This array replaces whatever URLs are currently configured
|
|
if ! fleet_api "fleet_server_hosts" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
|
echo -e "\nFailed to add manager fleet URL"
|
|
exit 1
|
|
fi
|
|
printf "\n\n"
|
|
|
|
### Create Policies & Associated Integration Configuration ###
|
|
# Load packages
|
|
/usr/sbin/so-elastic-fleet-package-load
|
|
|
|
# Load Elasticsearch templates
|
|
/usr/sbin/so-elasticsearch-templates-load
|
|
|
|
# Initial Endpoints Policy
|
|
if ! elastic_fleet_policy_create "endpoints-initial" "Initial Endpoint Policy" "false" "1209600"; then
|
|
echo -e "\nFailed to create endpoints-initial policy..."
|
|
exit 1
|
|
fi
|
|
|
|
# Grid Nodes - General Policy
|
|
if ! elastic_fleet_policy_create "so-grid-nodes_general" "SO Grid Nodes - General Purpose" "false" "1209600"; then
|
|
echo -e "\nFailed to create so-grid-nodes_general policy..."
|
|
exit 1
|
|
fi
|
|
|
|
# Grid Nodes - Heavy Node Policy
|
|
if ! elastic_fleet_policy_create "so-grid-nodes_heavy" "SO Grid Nodes - Heavy Node" "false" "1209600"; then
|
|
echo -e "\nFailed to create so-grid-nodes_heavy policy..."
|
|
exit 1
|
|
fi
|
|
|
|
# Load Integrations for default policies
|
|
so-elastic-fleet-integration-policy-load
|
|
|
|
# Set Elastic Agent Artifact Registry URL
|
|
JSON_STRING=$( jq -n \
|
|
--arg NAME "FleetServer_{{ GLOBALS.hostname }}" \
|
|
--arg URL "http://{{ GLOBALS.url_base }}:8443/artifacts/" \
|
|
'{"name":$NAME,"host":$URL,"is_default":true}'
|
|
)
|
|
|
|
if ! fleet_api "agent_download_sources" -XPOST -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"; then
|
|
echo -e "\nFailed to update Elastic Agent artifact URL"
|
|
exit 1
|
|
fi
|
|
|
|
### Finalization ###
|
|
|
|
# Query for Enrollment Tokens for default policies
|
|
if ENDPOINTSENROLLMENTOKEN_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
|
ENDPOINTSENROLLMENTOKEN=$(echo "$ENDPOINTSENROLLMENTOKEN_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key')
|
|
else
|
|
echo -e "\nFailed to query for Endpoints enrollment token"
|
|
exit 1
|
|
fi
|
|
|
|
if GRIDNODESENROLLMENTOKENGENERAL_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
|
GRIDNODESENROLLMENTOKENGENERAL=$(echo "$GRIDNODESENROLLMENTOKENGENERAL_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_general")) | .api_key')
|
|
else
|
|
echo -e "\nFailed to query for Grid nodes - General enrollment token"
|
|
exit 1
|
|
fi
|
|
|
|
if GRIDNODESENROLLMENTOKENHEAVY_RAW=$(fleet_api "enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json'); then
|
|
GRIDNODESENROLLMENTOKENHEAVY=$(echo "$GRIDNODESENROLLMENTOKENHEAVY_RAW" | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes_heavy")) | .api_key')
|
|
else
|
|
echo -e "\nFailed to query for Grid nodes - Heavy enrollment token"
|
|
exit 1
|
|
fi
|
|
|
|
# Store needed data in minion pillar
|
|
pillar_file=/opt/so/saltstack/local/pillar/minions/{{ GLOBALS.minion_id }}.sls
|
|
printf '%s\n'\
|
|
"elasticfleet:"\
|
|
" enabled: True"\
|
|
" config:"\
|
|
" server:"\
|
|
" es_token: '$ESTOKEN'"\
|
|
" endpoints_enrollment: '$ENDPOINTSENROLLMENTOKEN'"\
|
|
" grid_enrollment_general: '$GRIDNODESENROLLMENTOKENGENERAL'"\
|
|
" grid_enrollment_heavy: '$GRIDNODESENROLLMENTOKENHEAVY'"\
|
|
"" >> "$pillar_file"
|
|
|
|
#Store Grid Nodes Enrollment token in Global pillar
|
|
global_pillar_file=/opt/so/saltstack/local/pillar/global/soc_global.sls
|
|
printf '%s\n'\
|
|
" fleet_grid_enrollment_token_general: '$GRIDNODESENROLLMENTOKENGENERAL'"\
|
|
" fleet_grid_enrollment_token_heavy: '$GRIDNODESENROLLMENTOKENHEAVY'"\
|
|
"" >> "$global_pillar_file"
|
|
|
|
# Call Elastic-Fleet Salt State
|
|
salt-call state.apply elasticfleet queue=True
|
|
|
|
# Generate installers & install Elastic Agent on the node
|
|
so-elastic-agent-gen-installers
|
|
salt-call state.apply elasticfleet.install_agent_grid queue=True
|
|
exit 0
|