Merge pull request #10680 from Security-Onion-Solutions/2.4/fleetautogen

Automatically manage Fleet Logstash Config
This commit is contained in:
Josh Brower
2023-07-10 16:26:20 -04:00
committed by GitHub
16 changed files with 320 additions and 135 deletions

View File

@@ -2,7 +2,7 @@
{% set cached_grains = salt.saltutil.runner('cache.grains', tgt='*') %}
{% for minionid, ip in salt.saltutil.runner(
'mine.get',
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-helix ',
tgt='G@role:so-manager or G@role:so-managersearch or G@role:so-standalone or G@role:so-searchnode or G@role:so-heavynode or G@role:so-receiver or G@role:so-fleet ',
fun='network.ip_addrs',
tgt_type='compound') | dictsort()
%}

View File

@@ -2,10 +2,11 @@ elasticfleet:
enabled: False
config:
server:
custom_fqdn: ''
enable_auto_configuration: True
endpoints_enrollment: ''
es_token: ''
grid_enrollment: ''
url: ''
logging:
zeek:
excluded:

View File

@@ -7,6 +7,10 @@
{% if sls.split('.')[0] in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% from 'docker/docker.map.jinja' import DOCKER %}
{% import_yaml 'elasticfleet/defaults.yaml' as ELASTICFLEETDEFAULTS %}
{% set ELASTICFLEETMERGED = salt['pillar.get']('elasticfleet', ELASTICFLEETDEFAULTS.elasticfleet, merge=True) %}
{# This value is generated during node install and stored in minion pillar #}
{% set SERVICETOKEN = salt['pillar.get']('elasticfleet:config:server:es_token','') %}
@@ -14,6 +18,16 @@ include:
- elasticfleet.config
- elasticfleet.sostatus
{% if ELASTICFLEETMERGED.config.server.enable_auto_configuration %}
so-elastic-fleet-auto-configure-logstash-outputs:
cmd.run:
- name: /usr/sbin/so-elastic-fleet-outputs-update
#so-elastic-fleet-auto-configure-server-urls:
# cmd.run:
# - name: /usr/sbin/so-elastic-fleet-urls-update
{% endif %}
{% if SERVICETOKEN != '' %}
so-elastic-fleet:
docker_container.running:
@@ -52,8 +66,8 @@ so-elastic-fleet:
- FLEET_SERVER_SERVICE_TOKEN={{ SERVICETOKEN }}
- FLEET_SERVER_POLICY_ID=FleetServer_{{ GLOBALS.hostname }}
- FLEET_SERVER_ELASTICSEARCH_CA=/etc/pki/tls/certs/intca.crt
- FLEET_SERVER_CERT=/etc/pki/elasticfleet.crt
- FLEET_SERVER_CERT_KEY=/etc/pki/elasticfleet.key
- FLEET_SERVER_CERT=/etc/pki/elasticfleet-server.crt
- FLEET_SERVER_CERT_KEY=/etc/pki/elasticfleet-server.key
- FLEET_CA=/etc/pki/tls/certs/intca.crt
{% if DOCKER.containers['so-elastic-fleet'].extra_env %}
{% for XTRAENV in DOCKER.containers['so-elastic-fleet'].extra_env %}

View File

@@ -11,6 +11,16 @@ elasticfleet:
helpLink: zeek.html
config:
server:
custom_fqdn:
description: Custom FQDN for Agents to connect to.
global: True
helpLink: elastic-fleet.html
advanced: True
enable_auto_configuration:
description: Enable auto-configuration of Logstash Outputs & Fleet Host URLs.
global: True
helpLink: elastic-fleet.html
advanced: True
endpoints_enrollment:
description: Endpoint enrollment key.
global: True
@@ -29,8 +39,3 @@ elasticfleet:
helpLink: elastic-fleet.html
sensitive: True
advanced: True
url:
description: Agent connection URL.
global: True
helpLink: elastic-fleet.html
advanced: True

View File

@@ -0,0 +1,75 @@
# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one
# or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use
# this file except in compliance with the Elastic License 2.0.
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set CUSTOMFQDN = salt['pillar.get']('elasticfleet:config:server:custom_fqdn') %}
function update_logstash_outputs() {
# Generate updated JSON payload
JSON_STRING=$(jq -n --arg UPDATEDLIST $NEW_LIST_JSON '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":""}')
# Update Logstash Outputs
curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_logstash" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq
}
# Get current list of Logstash Outputs
RAW_JSON=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/outputs/so-manager_logstash')
# Check to make sure that the server responded with good data - else, bail from script
CHECKSUM=$(jq -r '.item.id' <<< "$RAW_JSON")
if [ "$CHECKSUM" != "so-manager_logstash" ]; then
printf "Failed to query for current Logstash Outputs..."
exit 1
fi
# Get the current list of Logstash outputs & hash them
CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON")
CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}')
# Create array & add initial elements
if [ "{{ GLOBALS.manager_ip }}" = "{{ GLOBALS.url_base }}" ]; then
NEW_LIST=("{{ GLOBALS.url_base }}:5055")
else
NEW_LIST=("{{ GLOBALS.url_base }}:5055" "{{ GLOBALS.manager_ip }}:5055")
fi
{% if CUSTOMFQDN != "" %}
# Add Custom Hostname to list
NEW_LIST+=("{{ CUSTOMFQDN }}:5055")
{% endif %}
# Query for the current Grid Nodes that are running Logstash
LOGSTASHNODES=$(salt-call --out=json pillar.get logstash:nodes | jq '.local')
# Query for Receiver Nodes & add them to the list
if grep -q "receiver" <<< $LOGSTASHNODES; then
readarray -t RECEIVERNODES < <(jq -r ' .receiver | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${RECEIVERNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Query for Fleet Nodes & add them to the list
if grep -q "fleet" <<< $LOGSTASHNODES; then
readarray -t FLEETNODES < <(jq -r ' .fleet | keys_unsorted[]' <<< $LOGSTASHNODES)
for NODE in "${FLEETNODES[@]}"
do
NEW_LIST+=("$NODE:5055")
done
fi
# Sort & hash the new list of Logstash Outputs
NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}")
NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}')
# Compare the current & new list of outputs - if different, update the Logstash outputs
if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then
printf "\nHashes match - no update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
exit 0
else
printf "\nHashes don't match - update needed.\n"
printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n"
update_logstash_outputs
fi

View File

@@ -20,10 +20,10 @@ JSON_STRING=$( jq -n \
curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/outputs" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING"
printf "\n\n"
printf "\nCreate Logstash Output if node is not an Import or Eval install\n"
printf "\nCreate Logstash Output Config if node is not an Import or Eval install\n"
{% if grains.role not in ['so-import', 'so-eval'] %}
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt)
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key)
LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-agent.crt)
LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-agent.key)
LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt)
JSON_STRING=$( jq -n \
--arg LOGSTASHCRT "$LOGSTASHCRT" \

View File

@@ -3,11 +3,11 @@
{% set HIGHLANDER = salt['pillar.get']('global:highlander', False) %}
{# ES_LOGSTASH_NODES is the same as LOGSTASH_NODES from logstash/map.jinja but heavynodes are removed #}
{# ES_LOGSTASH_NODES is the same as LOGSTASH_NODES from logstash/map.jinja but heavynodes and fleet nodes are removed #}
{% set ES_LOGSTASH_NODES = [] %}
{% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
{% for node_type, node_details in node_data.items() | sort %}
{% if node_type != 'heavynode' %}
{% if node_type not in ['heavynode', 'fleet'] %}
{% for hostname in node_data[node_type].keys() %}
{% do ES_LOGSTASH_NODES.append({hostname:node_details[hostname].ip}) %}
{% endfor %}

View File

@@ -731,7 +731,6 @@ firewall:
- yum
- beats_5044
- beats_5644
- beats_5056
- elastic_agent_control
- elastic_agent_data
- elastic_agent_update
@@ -1159,6 +1158,9 @@ firewall:
chain:
DOCKER-USER:
hostgroups:
fleet:
portgroups:
- beats_5056
sensor:
portgroups:
- beats_5044

View File

@@ -21,20 +21,21 @@ logstash:
- fleet
defined_pipelines:
fleet:
- so/0012_input_elastic_agent.conf
- so/9806_output_http_fleet.conf.jinja
- so/0012_input_elastic_agent.conf # Logs from agents
- so/9806_output_lumberjack_fleet.conf.jinja # Logstash to Logstash Output
manager:
- so/0011_input_endgame.conf
- so/0012_input_elastic_agent.conf
- so/0013_input_http_fleet.conf
- so/0012_input_elastic_agent.conf # Logs from agents
- so/0013_input_lumberjack_fleet.conf # Logstash to Logstash Input
- so/9999_output_redis.conf.jinja
receiver:
- so/0011_input_endgame.conf
- so/0012_input_elastic_agent.conf
- so/0012_input_elastic_agent.conf # Logs from agents
- so/0013_input_lumberjack_fleet.conf # Logstash to Logstash Input
- so/9999_output_redis.conf.jinja
search:
- so/0900_input_redis.conf.jinja
- so/9805_output_elastic_agent.conf.jinja
- so/9805_output_elastic_agent.conf.jinja # Elastic Agent data Output to ES (Final)
- so/9900_output_endgame.conf.jinja
custom0: []
custom1: []

View File

@@ -59,8 +59,10 @@ so-logstash:
- /etc/pki/filebeat.p8:/usr/share/logstash/filebeat.key:ro
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import', 'so-eval','so-fleet', 'so-heavynode', 'so-receiver'] %}
- /opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
- /opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.p8:/usr/share/logstash/elasticfleet-logstash.key:ro
- /etc/pki/elasticfleet-logstash.crt:/usr/share/logstash/elasticfleet-logstash.crt:ro
- /etc/pki/elasticfleet-logstash.key:/usr/share/logstash/elasticfleet-logstash.key:ro
- /etc/pki/elasticfleet-lumberjack.crt:/usr/share/logstash/elasticfleet-lumberjack.crt:ro
- /etc/pki/elasticfleet-lumberjack.key:/usr/share/logstash/elasticfleet-lumberjack.key:ro
{% endif %}
{% if GLOBALS.role in ['so-manager', 'so-managersearch', 'so-standalone', 'so-import'] %}
- /etc/pki/ca.crt:/usr/share/filebeat/ca.crt:ro

View File

@@ -1,21 +0,0 @@
input {
http {
additional_codecs => { "application/json" => "json_lines" }
port => 5056
tags => [ "elastic-agent" ]
ssl => true
ssl_certificate_authorities => ["/usr/share/filebeat/ca.crt"]
ssl_certificate => "/usr/share/logstash/filebeat.crt"
ssl_key => "/usr/share/logstash/filebeat.key"
ssl_verify_mode => "peer"
ecs_compatibility => v8
}
}
filter {
if "elastic-agent" in [tags] {
mutate {
remove_field => ["http","[metadata][input]","url","user_agent"]
}
}
}

View File

@@ -0,0 +1,22 @@
input {
elastic_agent {
port => 5056
tags => [ "elastic-agent", "fleet-lumberjack-input" ]
ssl => true
ssl_certificate => "/usr/share/logstash/elasticfleet-lumberjack.crt"
ssl_key => "/usr/share/logstash/elasticfleet-lumberjack.key"
ecs_compatibility => v8
id => "fleet-lumberjack-in"
codec => "json"
}
}
filter {
if "fleet-lumberjack-input" in [tags] {
mutate {
rename => {"@metadata" => "metadata"}
}
}
}

View File

@@ -1,11 +0,0 @@
output {
http {
url => 'https://{{ GLOBALS.manager }}:5056'
cacert => ["/usr/share/filebeat/ca.crt"]
http_method => post
retry_non_idempotent => true
format => json_batch
http_compression => true
ecs_compatibility => v8
}
}

View File

@@ -0,0 +1,25 @@
{% set FAILOVER_LOGSTASH_NODES = [] %}
{% set node_data = salt['pillar.get']('logstash:nodes', {GLOBALS.role.split('-')[1]: {GLOBALS.hostname: {'ip': GLOBALS.node_ip}}}) %}
{% for node_type, node_details in node_data.items() | sort %}
{% if node_type not in ['heavynode', 'fleet', 'searchnode'] %}
{% for hostname in node_data[node_type].keys() %}
{% do FAILOVER_LOGSTASH_NODES.append(node_details[hostname].ip) %}
{% endfor %}
{% endif %}
{% endfor %}
filter {
mutate {
add_tag => "fleet-lumberjack-{{ GLOBALS.hostname }}"
}
}
output {
lumberjack {
codec => json
hosts => {{ FAILOVER_LOGSTASH_NODES }}
ssl_certificate => "/usr/share/filebeat/ca.crt"
port => 5056
id => "fleet-lumberjack-{{ GLOBALS.hostname }}"
}
}

View File

@@ -515,7 +515,7 @@ function createFLEET() {
add_logstash_to_minion
create_fleet_policy
update_fleet_host_urls
update_logstash_outputs
#update_logstash_outputs
add_telegraf_to_minion
add_nginx_to_minion
}

View File

@@ -7,6 +7,7 @@
{% if sls in allowed_states %}
{% from 'vars/globals.map.jinja' import GLOBALS %}
{% set CUSTOMFQDN = salt['pillar.get']('elasticfleet:config:server:custom_fqdn') %}
{% set global_ca_text = [] %}
{% set global_ca_server = [] %}
@@ -129,15 +130,16 @@ rediskeyperms:
{% endif %}
{% if grains['role'] in ['so-manager', 'so-eval', 'so-managersearch', 'so-standalone', 'so-import', 'so-heavynode', 'so-fleet', 'so-receiver'] %}
# Create cert for Elastic Fleet Host
{% if grains['role'] not in [ 'so-heavynode', 'so-receiver'] %}
# Start -- Elastic Fleet Host Cert
etc_elasticfleet_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet.key
- name: /etc/pki/elasticfleet-server.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet.key') -%}
{% if salt['file.file_exists']('/etc/pki/elasticfleet-server.key') -%}
- prereq:
- x509: etc_elasticfleet_crt
{%- endif %}
@@ -145,15 +147,14 @@ etc_elasticfleet_key:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
etc_elasticfleet_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet.crt
- name: /etc/pki/elasticfleet-server.crt
- ca_server: {{ ca_server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- private_key: /etc/pki/elasticfleet-server.key
- CN: {{ GLOBALS.url_base }}
- subjectAltName: DNS:{{ GLOBALS.hostname }},IP:{{ GLOBALS.node_ip }} {% if CUSTOMFQDN != "" %},DNS:{{ CUSTOMFQDN }}{% endif %}
- days_remaining: 0
- days_valid: 820
- backup: True
@@ -161,22 +162,18 @@ etc_elasticfleet_crt:
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet.key -topk8 -out /etc/pki/elasticfleet.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_key
efperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet.key
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- group: 939
chownelasticfleetcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet.crt
- name: /etc/pki/elasticfleet-server.crt
- mode: 640
- user: 947
- group: 939
@@ -184,29 +181,16 @@ chownelasticfleetcrt:
chownelasticfleetkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet.key
- name: /etc/pki/elasticfleet-server.key
- mode: 640
- user: 947
- group: 939
# End -- Elastic Fleet Host Cert
{% endif %} # endif is for not including HeavyNodes & Receivers
# Create Symlinks to the keys to distribute it to all the things
elasticfleetdircerts:
file.directory:
- name: /opt/so/saltstack/local/salt/elasticfleet/files/certs
- makedirs: True
efcrtlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/elasticfleet/files/certs/elasticfleet.crt
- target: /etc/pki/elasticfleet.crt
- user: socore
- group: socore
{% if grains.role not in ['so-fleet'] %}
# Create Cert for Elastic Fleet Logstash Input (Same cert used across all Fleet nodes)
etc_elasticfleetlogstash_key:
{% if grains['role'] not in [ 'so-heavynode'] %}
# Start -- Elastic Fleet Logstash Input Cert
etc_elasticfleet_logstash_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-logstash.key
- keysize: 4096
@@ -220,15 +204,14 @@ etc_elasticfleetlogstash_key:
attempts: 5
interval: 30
# Request a cert and drop it where it needs to go to be distributed
etc_elasticfleetlogstash_crt:
etc_elasticfleet_logstash_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-logstash.crt
- ca_server: {{ ca_server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-logstash.key
- CN: {{ GLOBALS.hostname }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}, IP:{{ GLOBALS.node_ip }}
- CN: {{ GLOBALS.url_base }}
- subjectAltName: DNS:{{ GLOBALS.hostname }},IP:{{ GLOBALS.node_ip }} {% if CUSTOMFQDN != "" %},DNS:{{ CUSTOMFQDN }}{% endif %}
- days_remaining: 0
- days_valid: 820
- backup: True
@@ -239,7 +222,7 @@ etc_elasticfleetlogstash_crt:
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-logstash.key -topk8 -out /etc/pki/elasticfleet-logstash.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_key
- x509: etc_elasticfleet_logstash_key
eflogstashperms:
file.managed:
@@ -248,63 +231,150 @@ eflogstashperms:
- mode: 640
- group: 939
chownilogstashelasticfleetp8:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.p8
- mode: 640
- user: 947
- group: 939
chownilogstashelasticfleetlogstashcrt:
chownelasticfleetlogstashcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.crt
- mode: 640
- user: 947
- user: 931
- group: 939
chownilogstashelasticfleetlogstashkey:
chownelasticfleetlogstashkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-logstash.key
- mode: 640
- user: 931
- group: 939
# End -- Elastic Fleet Logstash Input Cert
{% endif %} # endif is for not including HeavyNodes
# Start -- Elastic Fleet Node - Logstash Lumberjack Input / Output
# Cert needed on: Managers, Receivers
etc_elasticfleetlumberjack_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-lumberjack.key
- bits: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-lumberjack.key') -%}
- prereq:
- x509: etc_elasticfleet_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleetlumberjack_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-lumberjack.crt
- ca_server: {{ ca_server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-lumberjack.key
- CN: {{ GLOBALS.node_ip }}
- subjectAltName: DNS:{{ GLOBALS.hostname }}
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-lumberjack.key -topk8 -out /etc/pki/elasticfleet-lumberjack.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_key
eflogstashlumberjackperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.key
- mode: 640
- group: 939
chownilogstashelasticfleetlumberjackp8:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.p8
- mode: 640
- user: 931
- group: 939
chownilogstashelasticfleetlogstashlumberjackcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.crt
- mode: 640
- user: 931
- group: 939
chownilogstashelasticfleetlogstashlumberjackkey:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-lumberjack.key
- mode: 640
- user: 931
- group: 939
# End -- Elastic Fleet Node - Logstash Lumberjack Input / Output
# Start -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
etc_elasticfleet_agent_key:
x509.private_key_managed:
- name: /etc/pki/elasticfleet-agent.key
- keysize: 4096
- backup: True
- new: True
{% if salt['file.file_exists']('/etc/pki/elasticfleet-agent.key') -%}
- prereq:
- x509: etc_elasticfleet_crt
{%- endif %}
- retry:
attempts: 5
interval: 30
etc_elasticfleet_agent_crt:
x509.certificate_managed:
- name: /etc/pki/elasticfleet-agent.crt
- ca_server: {{ ca_server }}
- signing_policy: elasticfleet
- private_key: /etc/pki/elasticfleet-agent.key
- CN: {{ GLOBALS.hostname }}
- days_remaining: 0
- days_valid: 820
- backup: True
- timeout: 30
- retry:
attempts: 5
interval: 30
cmd.run:
- name: "/usr/bin/openssl pkcs8 -in /etc/pki/elasticfleet-agent.key -topk8 -out /etc/pki/elasticfleet-agent.p8 -nocrypt"
- onchanges:
- x509: etc_elasticfleet_key
efagentperms:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- group: 939
chownelasticfleetagentcrt:
file.managed:
- replace: False
- name: /etc/pki/elasticfleet-agent.crt
- mode: 640
- user: 947
- group: 939
eflogstashkeylink:
file.symlink:
- name: /opt/so/saltstack/local/salt/elasticfleet/files/certs/elasticfleet-logstash.p8
- target: /etc/pki/elasticfleet.p8
- user: socore
- group: socore
eflogstashcrtlink:
file.symlink:
- name: /opt/so/saltstack/local/salt/elasticfleet/files/certs/elasticfleet-logstash.crt
- target: /etc/pki/elasticfleet.crt
- user: socore
- group: socore
{% endif %}
/opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.p8:
chownelasticfleetagentkey:
file.managed:
- replace: True
- source: salt://elasticfleet/files/certs/elasticfleet-logstash.p8
- makedirs: True
- replace: False
- name: /etc/pki/elasticfleet-agent.key
- mode: 640
- user: 931
- group: 939
/opt/so/conf/elastic-fleet/certs/elasticfleet-logstash.crt:
file.managed:
- replace: True
- source: salt://elasticfleet/files/certs/elasticfleet-logstash.crt
- makedirs: True
- mode: 640
- user: 931
- user: 947
- group: 939
# End -- Elastic Fleet Client Cert for Agent (Mutual Auth with Logstash Output)
{% endif %}