diff --git a/salt/elasticfleet/config.map.jinja b/salt/elasticfleet/config.map.jinja new file mode 100644 index 000000000..b95a3e895 --- /dev/null +++ b/salt/elasticfleet/config.map.jinja @@ -0,0 +1,34 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + +{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} + +{# advanced config_yaml options for elasticfleet logstash output #} +{% set ADV_OUTPUT_LOGSTASH_RAW = ELASTICFLEETMERGED.config.outputs.logstash %} +{% set ADV_OUTPUT_LOGSTASH = {} %} +{% for k, v in ADV_OUTPUT_LOGSTASH_RAW.items() %} +{% if v != "" and v is not none %} +{% if k == 'queue_mem_events' %} +{# rename queue_mem_events queue.mem.events #} +{% do ADV_OUTPUT_LOGSTASH.update({'queue.mem.events':v}) %} +{% elif k == 'loadbalance' %} +{% if v %} +{# only include loadbalance config when its True #} +{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %} +{% endif %} +{% else %} +{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %} +{% endif %} +{% endif %} +{% endfor %} + +{% set LOGSTASH_CONFIG_YAML_RAW = [] %} +{% if ADV_OUTPUT_LOGSTASH %} +{% for k, v in ADV_OUTPUT_LOGSTASH.items() %} +{% do LOGSTASH_CONFIG_YAML_RAW.append(k ~ ': ' ~ v) %} +{% endfor %} +{% endif %} + +{% set LOGSTASH_CONFIG_YAML = LOGSTASH_CONFIG_YAML_RAW | join('\\n') if LOGSTASH_CONFIG_YAML_RAW else '' %} diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 0f013e320..a3132d3f4 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -10,6 +10,14 @@ elasticfleet: grid_enrollment: '' defend_filters: enable_auto_configuration: False + outputs: + logstash: + bulk_max_size: '' + worker: '' + queue_mem_events: '' + timeout: '' + loadbalance: False + compression_level: '' subscription_integrations: False auto_upgrade_integrations: False logging: diff --git a/salt/elasticfleet/enabled.sls b/salt/elasticfleet/enabled.sls index ec8c8337e..db10a7182 100644 --- a/salt/elasticfleet/enabled.sls +++ b/salt/elasticfleet/enabled.sls @@ -36,12 +36,13 @@ so-elastic-fleet-auto-configure-logstash-outputs: {# Separate from above in order to catch elasticfleet-logstash.crt changes and force update to fleet output policy #} so-elastic-fleet-auto-configure-logstash-outputs-force: cmd.run: - - name: /usr/sbin/so-elastic-fleet-outputs-update --force --certs + - name: /usr/sbin/so-elastic-fleet-outputs-update --certs - retry: attempts: 4 interval: 30 - onchanges: - x509: etc_elasticfleet_logstash_crt + - x509: elasticfleet_kafka_crt {% endif %} # If enabled, automatically update Fleet Server URLs & ES Connection diff --git a/salt/elasticfleet/integration-defaults.map.jinja b/salt/elasticfleet/integration-defaults.map.jinja index 500a9e63c..69ce7f3af 100644 --- a/salt/elasticfleet/integration-defaults.map.jinja +++ b/salt/elasticfleet/integration-defaults.map.jinja @@ -121,6 +121,9 @@ "phases": { "cold": { "actions": { + "allocate":{ + "number_of_replicas": "" + }, "set_priority": {"priority": 0} }, "min_age": "60d" @@ -137,12 +140,31 @@ "max_age": "30d", "max_primary_shard_size": "50gb" }, + "forcemerge":{ + "max_num_segments": "" + }, + "shrink":{ + "max_primary_shard_size": "", + "method": "COUNT", + "number_of_shards": "" + }, "set_priority": {"priority": 100} }, "min_age": "0ms" }, "warm": { "actions": { + "allocate": { + "number_of_replicas": "" + }, + "forcemerge": { + "max_num_segments": "" + }, + "shrink":{ + "max_primary_shard_size": "", + "method": "COUNT", + "number_of_shards": "" + }, "set_priority": {"priority": 50} }, "min_age": "30d" diff --git a/salt/elasticfleet/soc_elasticfleet.yaml b/salt/elasticfleet/soc_elasticfleet.yaml index 450e044e6..d78189f96 100644 --- a/salt/elasticfleet/soc_elasticfleet.yaml +++ b/salt/elasticfleet/soc_elasticfleet.yaml @@ -50,6 +50,46 @@ elasticfleet: global: True forcedType: bool helpLink: elastic-fleet.html + outputs: + logstash: + bulk_max_size: + description: The maximum number of events to bulk in a single Logstash request. + global: True + forcedType: int + advanced: True + helpLink: elastic-fleet.html + worker: + description: The number of workers per configured host publishing events. + global: True + forcedType: int + advanced: true + helpLink: elastic-fleet.html + queue_mem_events: + title: queued events + description: The number of events the queue can store. This value should be evenly divisible by the smaller of 'bulk_max_size' to avoid sending partial batches to the output. + global: True + forcedType: int + advanced: True + helpLink: elastic-fleet.html + timeout: + description: The number of seconds to wait for responses from the Logstash server before timing out. Eg 30s + regex: ^[0-9]+s$ + advanced: True + global: True + helpLink: elastic-fleet.html + loadbalance: + description: If true and multiple Logstash hosts are configured, the output plugin load balances published events onto all Logstash hosts. If false, the output plugin sends all events to one host (determined at random) and switches to another host if the selected one becomes unresponsive. + forcedType: bool + advanced: True + global: True + helpLink: elastic-fleet.html + compression_level: + description: The gzip compression level. The compression level must be in the range of 1 (best speed) to 9 (best compression). + regex: ^[1-9]$ + forcedType: int + advanced: True + global: True + helpLink: elastic-fleet.html server: custom_fqdn: description: Custom FQDN for Agents to connect to. One per line. diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 4fa68298c..58baadca5 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -3,13 +3,16 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # this file except in compliance with the Elastic License 2.0. -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} +{%- from 'vars/globals.map.jinja' import GLOBALS %} +{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} +{%- from 'elasticfleet/config.map.jinja' import LOGSTASH_CONFIG_YAML %} . /usr/sbin/so-common FORCE_UPDATE=false UPDATE_CERTS=false +LOGSTASH_PILLAR_CONFIG_YAML="{{ LOGSTASH_CONFIG_YAML }}" +LOGSTASH_PILLAR_STATE_FILE="/opt/so/state/esfleet_logstash_config_pillar" while [[ $# -gt 0 ]]; do case $1 in @@ -19,6 +22,7 @@ while [[ $# -gt 0 ]]; do ;; -c| --certs) UPDATE_CERTS=true + FORCE_UPDATE=true shift ;; *) @@ -41,38 +45,45 @@ function update_logstash_outputs() { LOGSTASHKEY=$(openssl rsa -in /etc/pki/elasticfleet-logstash.key) LOGSTASHCRT=$(openssl x509 -in /etc/pki/elasticfleet-logstash.crt) LOGSTASHCA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) + # Revert escaped \\n to \n for jq + LOGSTASH_PILLAR_CONFIG_YAML=$(printf '%b' "$LOGSTASH_PILLAR_CONFIG_YAML") + if SECRETS=$(echo "$logstash_policy" | jq -er '.item.secrets' 2>/dev/null); then if [[ "$UPDATE_CERTS" != "true" ]]; then # Reuse existing secret JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \ --argjson SECRETS "$SECRETS" \ --argjson SSL_CONFIG "$SSL_CONFIG" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}') + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG,"secrets": $SECRETS}') else # Update certs, creating new secret JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \ --arg LOGSTASHKEY "$LOGSTASHKEY" \ --arg LOGSTASHCRT "$LOGSTASHCRT" \ --arg LOGSTASHCA "$LOGSTASHCA" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}') + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"certificate_authorities":[ $LOGSTASHCA ]},"secrets": {"ssl":{"key": $LOGSTASHKEY }}}') fi else if [[ "$UPDATE_CERTS" != "true" ]]; then # Reuse existing ssl config JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \ --argjson SSL_CONFIG "$SSL_CONFIG" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG}') + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": $SSL_CONFIG}') else # Update ssl config JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg CONFIG_YAML "$LOGSTASH_PILLAR_CONFIG_YAML" \ --arg LOGSTASHKEY "$LOGSTASHKEY" \ --arg LOGSTASHCRT "$LOGSTASHCRT" \ --arg LOGSTASHCA "$LOGSTASHCA" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}') + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":$CONFIG_YAML,"ssl": {"certificate": $LOGSTASHCRT,"key": $LOGSTASHKEY,"certificate_authorities":[ $LOGSTASHCA ]}}') fi fi fi @@ -84,19 +95,42 @@ function update_kafka_outputs() { # Make sure SSL configuration is included in policy updates for Kafka output. SSL is configured in so-elastic-fleet-setup if kafka_policy=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "http://localhost:5601/api/fleet/outputs/so-manager_kafka" --fail 2>/dev/null); then SSL_CONFIG=$(echo "$kafka_policy" | jq -r '.item.ssl') + KAFKAKEY=$(openssl rsa -in /etc/pki/elasticfleet-kafka.key) + KAFKACRT=$(openssl x509 -in /etc/pki/elasticfleet-kafka.crt) + KAFKACA=$(openssl x509 -in /etc/pki/tls/certs/intca.crt) if SECRETS=$(echo "$kafka_policy" | jq -er '.item.secrets' 2>/dev/null); then - # Update policy when fleet has secrets enabled - JSON_STRING=$(jq -n \ - --arg UPDATEDLIST "$NEW_LIST_JSON" \ - --argjson SSL_CONFIG "$SSL_CONFIG" \ - --argjson SECRETS "$SECRETS" \ - '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}') + if [[ "$UPDATE_CERTS" != "true" ]]; then + # Update policy when fleet has secrets enabled + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --argjson SSL_CONFIG "$SSL_CONFIG" \ + --argjson SECRETS "$SECRETS" \ + '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG,"secrets": $SECRETS}') + else + # Update certs, creating new secret + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg KAFKAKEY "$KAFKAKEY" \ + --arg KAFKACRT "$KAFKACRT" \ + --arg KAFKACA "$KAFKACA" \ + '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": {"certificate_authorities":[ $KAFKACA ],"certificate": $KAFKACRT ,"key":"","verification_mode":"full"},"secrets": {"ssl":{"key": $KAFKAKEY }}}') + fi else - # Update policy when fleet has secrets disabled or policy hasn't been force updated - JSON_STRING=$(jq -n \ - --arg UPDATEDLIST "$NEW_LIST_JSON" \ - --argjson SSL_CONFIG "$SSL_CONFIG" \ - '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}') + if [[ "$UPDATE_CERTS" != "true" ]]; then + # Update policy when fleet has secrets disabled or policy hasn't been force updated + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --argjson SSL_CONFIG "$SSL_CONFIG" \ + '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": $SSL_CONFIG}') + else + # Update ssl config + JSON_STRING=$(jq -n \ + --arg UPDATEDLIST "$NEW_LIST_JSON" \ + --arg KAFKAKEY "$KAFKAKEY" \ + --arg KAFKACRT "$KAFKACRT" \ + --arg KAFKACA "$KAFKACA" \ + '{"name": "grid-kafka","type": "kafka","hosts": $UPDATEDLIST,"is_default": true,"is_default_monitoring": true,"config_yaml": "","ssl": { "certificate_authorities": [ $KAFKACA ], "certificate": $KAFKACRT, "key": $KAFKAKEY, "verification_mode": "full" }}') + fi fi # Update Kafka outputs curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/outputs/so-manager_kafka" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" | jq @@ -119,7 +153,7 @@ function update_kafka_outputs() { # Get the current list of kafka outputs & hash them CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") - CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}') declare -a NEW_LIST=() @@ -142,10 +176,19 @@ function update_kafka_outputs() { printf "Failed to query for current Logstash Outputs..." exit 1 fi + # logstash adv config - compare pillar to last state file value + if [[ -f "$LOGSTASH_PILLAR_STATE_FILE" ]]; then + PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML=$(cat "$LOGSTASH_PILLAR_STATE_FILE") + if [[ "$LOGSTASH_PILLAR_CONFIG_YAML" != "$PREVIOUS_LOGSTASH_PILLAR_CONFIG_YAML" ]]; then + echo "Logstash pillar config has changed - forcing update" + FORCE_UPDATE=true + fi + echo "$LOGSTASH_PILLAR_CONFIG_YAML" > "$LOGSTASH_PILLAR_STATE_FILE" + fi # Get the current list of Logstash outputs & hash them CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") - CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}') declare -a NEW_LIST=() @@ -194,7 +237,7 @@ function update_kafka_outputs() { # Sort & hash the new list of Logstash Outputs NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") -NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') +NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}') # Compare the current & new list of outputs - if different, update the Logstash outputs if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then diff --git a/salt/elasticsearch/defaults.yaml b/salt/elasticsearch/defaults.yaml index 592f47a2b..5cfb9a0e0 100644 --- a/salt/elasticsearch/defaults.yaml +++ b/salt/elasticsearch/defaults.yaml @@ -72,6 +72,8 @@ elasticsearch: actions: set_priority: priority: 0 + allocate: + number_of_replicas: "" min_age: 60d delete: actions: @@ -84,11 +86,25 @@ elasticsearch: max_primary_shard_size: 50gb set_priority: priority: 100 + forcemerge: + max_num_segments: "" + shrink: + max_primary_shard_size: "" + method: COUNT + number_of_shards: "" min_age: 0ms warm: actions: set_priority: priority: 50 + forcemerge: + max_num_segments: "" + shrink: + max_primary_shard_size: "" + method: COUNT + number_of_shards: "" + allocate: + number_of_replicas: "" min_age: 30d so-case: index_sorting: false @@ -245,7 +261,6 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - warm: 7 so-detection: index_sorting: false index_template: @@ -584,7 +599,6 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - warm: 7 so-import: index_sorting: false index_template: @@ -932,7 +946,6 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - warm: 7 so-hydra: close: 30 delete: 365 @@ -1043,7 +1056,6 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - warm: 7 so-lists: index_sorting: false index_template: @@ -1127,6 +1139,8 @@ elasticsearch: actions: set_priority: priority: 0 + allocate: + number_of_replicas: "" min_age: 60d delete: actions: @@ -1139,11 +1153,25 @@ elasticsearch: max_primary_shard_size: 50gb set_priority: priority: 100 + forcemerge: + max_num_segments: "" + shrink: + max_primary_shard_size: "" + method: COUNT + number_of_shards: "" min_age: 0ms warm: actions: set_priority: priority: 50 + allocate: + number_of_replicas: "" + forcemerge: + max_num_segments: "" + shrink: + max_primary_shard_size: "" + method: COUNT + number_of_shards: "" min_age: 30d so-logs-detections_x_alerts: index_sorting: false @@ -3123,7 +3151,6 @@ elasticsearch: set_priority: priority: 50 min_age: 30d - warm: 7 so-logs-system_x_application: index_sorting: false index_template: diff --git a/salt/elasticsearch/soc_elasticsearch.yaml b/salt/elasticsearch/soc_elasticsearch.yaml index 097a53296..7fd4f8329 100644 --- a/salt/elasticsearch/soc_elasticsearch.yaml +++ b/salt/elasticsearch/soc_elasticsearch.yaml @@ -131,6 +131,47 @@ elasticsearch: description: Maximum primary shard size. Once an index reaches this limit, it will be rolled over into a new index. global: True helpLink: elasticsearch.html + shrink: + method: + description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size. + options: + - COUNT + - SIZE + global: True + advanced: True + forcedType: string + number_of_shards: + title: shard count + description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'. + global: True + forcedType: int + advanced: True + max_primary_shard_size: + title: max shard size + description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. + regex: ^[0-9]+(?:gb|tb|pb)$ + global: True + forcedType: string + advanced: True + allow_write_after_shrink: + description: Allow writes after shrink. + global: True + forcedType: bool + default: False + advanced: True + forcemerge: + max_num_segments: + description: Reduce the number of segments in each index shard and clean up deleted documents. + global: True + forcedType: int + advanced: True + index_codec: + title: compression + description: Use higher compression for stored fields at the cost of slower performance. + forcedType: bool + global: True + default: False + advanced: True cold: min_age: description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier. @@ -144,6 +185,12 @@ elasticsearch: description: Used for index recovery after a node restart. Indices with higher priorities are recovered before indices with lower priorities. global: True helpLink: elasticsearch.html + allocate: + number_of_replicas: + description: Set the number of replicas. Remains the same as the previous phase by default. + forcedType: int + global: True + advanced: True warm: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier. @@ -158,6 +205,52 @@ elasticsearch: forcedType: int global: True helpLink: elasticsearch.html + shrink: + method: + description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size. + options: + - COUNT + - SIZE + global: True + advanced: True + number_of_shards: + title: shard count + description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'. + global: True + forcedType: int + advanced: True + max_primary_shard_size: + title: max shard size + description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. + regex: ^[0-9]+(?:gb|tb|pb)$ + global: True + forcedType: string + advanced: True + allow_write_after_shrink: + description: Allow writes after shrink. + global: True + forcedType: bool + default: False + advanced: True + forcemerge: + max_num_segments: + description: Reduce the number of segments in each index shard and clean up deleted documents. + global: True + forcedType: int + advanced: True + index_codec: + title: compression + description: Use higher compression for stored fields at the cost of slower performance. + forcedType: bool + global: True + default: False + advanced: True + allocate: + number_of_replicas: + description: Set the number of replicas. Remains the same as the previous phase by default. + forcedType: int + global: True + advanced: True delete: min_age: description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion. @@ -287,6 +380,47 @@ elasticsearch: global: True advanced: True helpLink: elasticsearch.html + shrink: + method: + description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size. + options: + - COUNT + - SIZE + global: True + advanced: True + forcedType: string + number_of_shards: + title: shard count + description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'. + global: True + forcedType: int + advanced: True + max_primary_shard_size: + title: max shard size + description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. + regex: ^[0-9]+(?:gb|tb|pb)$ + global: True + forcedType: string + advanced: True + allow_write_after_shrink: + description: Allow writes after shrink. + global: True + forcedType: bool + default: False + advanced: True + forcemerge: + max_num_segments: + description: Reduce the number of segments in each index shard and clean up deleted documents. + global: True + forcedType: int + advanced: True + index_codec: + title: compression + description: Use higher compression for stored fields at the cost of slower performance. + forcedType: bool + global: True + default: False + advanced: True warm: min_age: description: Minimum age of index. ex. 30d - This determines when the index should be moved to the warm tier. Nodes in the warm tier generally don’t need to be as fast as those in the hot tier. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and warm min_age set to 30 then there will be 30 days from index creation to rollover and then an additional 30 days before moving to warm tier. @@ -314,6 +448,52 @@ elasticsearch: global: True advanced: True helpLink: elasticsearch.html + shrink: + method: + description: Shrink the index to a new index with fewer primary shards. Shrink operation is by count or size. + options: + - COUNT + - SIZE + global: True + advanced: True + number_of_shards: + title: shard count + description: Desired shard count. Note that this value is only used when the shrink method selected is 'COUNT'. + global: True + forcedType: int + advanced: True + max_primary_shard_size: + title: max shard size + description: Desired shard size in gb/tb/pb eg. 100gb. Note that this value is only used when the shrink method selected is 'SIZE'. + regex: ^[0-9]+(?:gb|tb|pb)$ + global: True + forcedType: string + advanced: True + allow_write_after_shrink: + description: Allow writes after shrink. + global: True + forcedType: bool + default: False + advanced: True + forcemerge: + max_num_segments: + description: Reduce the number of segments in each index shard and clean up deleted documents. + global: True + forcedType: int + advanced: True + index_codec: + title: compression + description: Use higher compression for stored fields at the cost of slower performance. + forcedType: bool + global: True + default: False + advanced: True + allocate: + number_of_replicas: + description: Set the number of replicas. Remains the same as the previous phase by default. + forcedType: int + global: True + advanced: True cold: min_age: description: Minimum age of index. ex. 60d - This determines when the index should be moved to the cold tier. While still searchable, this tier is typically optimized for lower storage costs rather than search speed. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and cold min_age set to 60 then there will be 30 days from index creation to rollover and then an additional 60 days before moving to cold tier. @@ -330,6 +510,12 @@ elasticsearch: global: True advanced: True helpLink: elasticsearch.html + allocate: + number_of_replicas: + description: Set the number of replicas. Remains the same as the previous phase by default. + forcedType: int + global: True + advanced: True delete: min_age: description: Minimum age of index. ex. 90d - This determines when the index should be deleted. It’s important to note that this is calculated relative to the rollover date (NOT the original creation date of the index). For example, if you have an index that is set to rollover after 30 days and delete min_age set to 90 then there will be 30 days from index creation to rollover and then an additional 90 days before deletion. diff --git a/salt/elasticsearch/template.map.jinja b/salt/elasticsearch/template.map.jinja index 414d8a6b4..2563f8e23 100644 --- a/salt/elasticsearch/template.map.jinja +++ b/salt/elasticsearch/template.map.jinja @@ -61,5 +61,55 @@ {% do settings.index_template.template.settings.index.pop('sort') %} {% endif %} {% endif %} + +{# advanced ilm actions #} +{% if settings.policy is defined and settings.policy.phases is defined %} +{% set PHASE_NAMES = ["hot", "warm", "cold"] %} +{% for P in PHASE_NAMES %} +{% if settings.policy.phases[P] is defined and settings.policy.phases[P].actions is defined %} +{% set PHASE = settings.policy.phases[P].actions %} +{# remove allocate action if number_of_replicas isn't configured #} +{% if PHASE.allocate is defined %} +{% if PHASE.allocate.number_of_replicas is not defined or PHASE.allocate.number_of_replicas == "" %} +{% do PHASE.pop('allocate', none) %} +{% endif %} +{% endif %} +{# start shrink action #} +{% if PHASE.shrink is defined %} +{% if PHASE.shrink.method is defined %} +{% if PHASE.shrink.method == 'COUNT' and PHASE.shrink.number_of_shards is defined and PHASE.shrink.number_of_shards %} +{# remove max_primary_shard_size value when doing shrink operation by count vs size #} +{% do PHASE.shrink.pop('max_primary_shard_size', none) %} +{% elif PHASE.shrink.method == 'SIZE' and PHASE.shrink.max_primary_shard_size is defined and PHASE.shrink.max_primary_shard_size %} +{# remove number_of_shards value when doing shrink operation by size vs count #} +{% do PHASE.shrink.pop('number_of_shards', none) %} +{% else %} +{# method isn't defined or missing a required config number_of_shards/max_primary_shard_size #} +{% do PHASE.pop('shrink', none) %} +{% endif %} +{% endif %} +{% endif %} +{# always remove shrink method since its only used for SOC config, not in the actual ilm policy #} +{% if PHASE.shrink is defined %} +{% do PHASE.shrink.pop('method', none) %} +{% endif %} +{# end shrink action #} +{# start force merge #} +{% if PHASE.forcemerge is defined %} +{% if PHASE.forcemerge.index_codec is defined and PHASE.forcemerge.index_codec %} +{% do PHASE.forcemerge.update({'index_codec': 'best_compression'}) %} +{% else %} +{% do PHASE.forcemerge.pop('index_codec', none) %} +{% endif %} +{% if PHASE.forcemerge.max_num_segments is not defined or not PHASE.forcemerge.max_num_segments %} +{# max_num_segments is empty, drop it #} +{% do PHASE.pop('forcemerge', none) %} +{% endif %} +{% endif %} +{# end force merge #} +{% endif %} +{% endfor %} +{% endif %} + {% do ES_INDEX_SETTINGS.update({index | replace("_x_", "."): ES_INDEX_SETTINGS_GLOBAL_OVERRIDES[index]}) %} {% endfor %} diff --git a/salt/manager/managed_soc_annotations.sls b/salt/manager/managed_soc_annotations.sls index d8f175df6..4357b53a2 100644 --- a/salt/manager/managed_soc_annotations.sls +++ b/salt/manager/managed_soc_annotations.sls @@ -25,13 +25,11 @@ {% set index_settings = es.get('index_settings', {}) %} {% set input = index_settings.get('so-logs', {}) %} {% for k in matched_integration_names %} - {% if k not in index_settings %} - {% set _ = index_settings.update({k: input}) %} - {% endif %} + {% do index_settings.update({k: input}) %} {% endfor %} {% for k in addon_integration_keys %} {% if k not in matched_integration_names and k in index_settings %} - {% set _ = index_settings.pop(k) %} + {% do index_settings.pop(k) %} {% endif %} {% endfor %} {{ data }} @@ -45,14 +43,12 @@ {% set es = data.get('elasticsearch', {}) %} {% set index_settings = es.get('index_settings', {}) %} {% for k in matched_integration_names %} - {% if k not in index_settings %} - {% set input = ADDON_INTEGRATION_DEFAULTS[k] %} - {% set _ = index_settings.update({k: input})%} - {% endif %} + {% set input = ADDON_INTEGRATION_DEFAULTS[k] %} + {% do index_settings.update({k: input})%} {% endfor %} {% for k in addon_integration_keys %} {% if k not in matched_integration_names and k in index_settings %} - {% set _ = index_settings.pop(k) %} + {% do index_settings.pop(k) %} {% endif %} {% endfor %} {{ data }} diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 32553b5c3..885f9b521 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -426,6 +426,7 @@ preupgrade_changes() { [[ "$INSTALLEDVERSION" == 2.4.160 ]] && up_to_2.4.170 [[ "$INSTALLEDVERSION" == 2.4.170 ]] && up_to_2.4.180 [[ "$INSTALLEDVERSION" == 2.4.180 ]] && up_to_2.4.190 + [[ "$INSTALLEDVERSION" == 2.4.190 ]] && up_to_2.4.200 true } @@ -457,6 +458,7 @@ postupgrade_changes() { [[ "$POSTVERSION" == 2.4.160 ]] && post_to_2.4.170 [[ "$POSTVERSION" == 2.4.170 ]] && post_to_2.4.180 [[ "$POSTVERSION" == 2.4.180 ]] && post_to_2.4.190 + [[ "$POSTVERSION" == 2.4.190 ]] && post_to_2.4.200 true } @@ -636,6 +638,11 @@ post_to_2.4.190() { POSTVERSION=2.4.190 } +post_to_2.4.200() { + echo "Nothing to apply" + POSTVERSION=2.4.200 +} + repo_sync() { echo "Sync the local repo." su socore -c '/usr/sbin/so-repo-sync' || fail "Unable to complete so-repo-sync." @@ -903,6 +910,12 @@ up_to_2.4.190() { INSTALLEDVERSION=2.4.190 } +up_to_2.4.200() { + touch /opt/so/state/esfleet_logstash_config_pillar + + INSTALLEDVERSION=2.4.200 +} + add_hydra_pillars() { mkdir -p /opt/so/saltstack/local/pillar/hydra touch /opt/so/saltstack/local/pillar/hydra/soc_hydra.sls