diff --git a/salt/elasticfleet/config.map.jinja b/salt/elasticfleet/config.map.jinja new file mode 100644 index 000000000..b95a3e895 --- /dev/null +++ b/salt/elasticfleet/config.map.jinja @@ -0,0 +1,34 @@ +{# Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one + or more contributor license agreements. Licensed under the Elastic License 2.0 as shown at + https://securityonion.net/license; you may not use this file except in compliance with the + Elastic License 2.0. #} + +{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} + +{# advanced config_yaml options for elasticfleet logstash output #} +{% set ADV_OUTPUT_LOGSTASH_RAW = ELASTICFLEETMERGED.config.outputs.logstash %} +{% set ADV_OUTPUT_LOGSTASH = {} %} +{% for k, v in ADV_OUTPUT_LOGSTASH_RAW.items() %} +{% if v != "" and v is not none %} +{% if k == 'queue_mem_events' %} +{# rename queue_mem_events queue.mem.events #} +{% do ADV_OUTPUT_LOGSTASH.update({'queue.mem.events':v}) %} +{% elif k == 'loadbalance' %} +{% if v %} +{# only include loadbalance config when its True #} +{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %} +{% endif %} +{% else %} +{% do ADV_OUTPUT_LOGSTASH.update({k:v}) %} +{% endif %} +{% endif %} +{% endfor %} + +{% set LOGSTASH_CONFIG_YAML_RAW = [] %} +{% if ADV_OUTPUT_LOGSTASH %} +{% for k, v in ADV_OUTPUT_LOGSTASH.items() %} +{% do LOGSTASH_CONFIG_YAML_RAW.append(k ~ ': ' ~ v) %} +{% endfor %} +{% endif %} + +{% set LOGSTASH_CONFIG_YAML = LOGSTASH_CONFIG_YAML_RAW | join('\\n') if LOGSTASH_CONFIG_YAML_RAW else '' %} diff --git a/salt/elasticfleet/defaults.yaml b/salt/elasticfleet/defaults.yaml index 0220428bf..a3132d3f4 100644 --- a/salt/elasticfleet/defaults.yaml +++ b/salt/elasticfleet/defaults.yaml @@ -10,12 +10,19 @@ elasticfleet: grid_enrollment: '' defend_filters: enable_auto_configuration: False + outputs: + logstash: + bulk_max_size: '' + worker: '' + queue_mem_events: '' + timeout: '' + loadbalance: False + compression_level: '' subscription_integrations: False auto_upgrade_integrations: False logging: zeek: excluded: - - analyzer - broker - capture_loss - cluster diff --git a/salt/elasticfleet/soc_elasticfleet.yaml b/salt/elasticfleet/soc_elasticfleet.yaml index 450e044e6..d7c324855 100644 --- a/salt/elasticfleet/soc_elasticfleet.yaml +++ b/salt/elasticfleet/soc_elasticfleet.yaml @@ -50,6 +50,46 @@ elasticfleet: global: True forcedType: bool helpLink: elastic-fleet.html + outputs: + logstash: + bulk_max_size: + description: The maximum number of events to bulk in a single Logstash request. + global: True + forcedType: int + advanced: True + helpLink: elastic-fleet.html + worker: + description: The number of workers per configured host publishing events. + global: True + forcedType: int + advanced: true + helpLink: elastic-fleet.html + queue_mem_events: + title: queued events + description: The number of events the queue can store. This value should be evenly divisible by the smaller of 'bulk_max_size' to avoid sending partial batches to the output. + global: True + forcedType: int + advanced: True + helpLink: elastic-fleet.html + timeout: + description: The number of seconds to wait for responses from the Logstash server before timing out. Eg 30s + regex: ^[0-9]+s$ + advanced: True + global: True + helpLink: elastic-fleet.html + loadbalance: + description: If true and multiple Logstash hosts are configured, the output plugin load balances published events onto all Logstash hosts. If false, the output plugin sends all events to one host (determined at random) and switches to another host if the selected one becomes unresponsive. + forcedType: bool + advanced: True + global: True + helpLink: elastic-fleet.html + compression: + description: The gzip compression level. The compression level must be in the range of 1 (best speed) to 9 (best compression). + regex: ^[1-9]$ + forcedType: int + advanced: True + global: True + helpLink: elastic-fleet.html server: custom_fqdn: description: Custom FQDN for Agents to connect to. One per line. diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update index 9efe8a19d..de9b5f93f 100644 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-outputs-update @@ -3,11 +3,13 @@ # Copyright Security Onion Solutions LLC and/or licensed to Security Onion Solutions LLC under one # or more contributor license agreements. Licensed under the Elastic License 2.0; you may not use # this file except in compliance with the Elastic License 2.0. -{% from 'vars/globals.map.jinja' import GLOBALS %} -{% from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} +{%- from 'vars/globals.map.jinja' import GLOBALS %} +{%- from 'elasticfleet/map.jinja' import ELASTICFLEETMERGED %} +{%- from 'elasticfleet/config.map.jinja' import LOGSTASH_CONFIG_YAML %} . /usr/sbin/so-common +FORCE_UPDATE=false # Only run on Managers if ! is_manager_node; then printf "Not a Manager Node... Exiting" @@ -22,7 +24,7 @@ function update_logstash_outputs() { --arg UPDATEDLIST "$NEW_LIST_JSON" \ --argjson SECRETS "$SECRETS" \ --argjson SSL_CONFIG "$SSL_CONFIG" \ - '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"","ssl": $SSL_CONFIG,"secrets": $SECRETS}') + '{"name":"grid-logstash","type":"logstash","hosts": $UPDATEDLIST,"is_default":true,"is_default_monitoring":true,"config_yaml":"{{ LOGSTASH_CONFIG_YAML }}","ssl": $SSL_CONFIG,"secrets": $SECRETS}') else JSON_STRING=$(jq -n \ --arg UPDATEDLIST "$NEW_LIST_JSON" \ @@ -97,9 +99,18 @@ function update_kafka_outputs() { exit 1 fi + CURRENT_LOGSTASH_ADV_CONFIG=$(jq -r '.item.config_yaml // ""' <<< "$RAW_JSON") + CURRENT_LOGSTASH_ADV_CONFIG_HASH=$(sha256sum <<< "$CURRENT_LOGSTASH_ADV_CONFIG" | awk '{print $1}') + NEW_LOGSTASH_ADV_CONFIG=$'{{ LOGSTASH_CONFIG_YAML }}' + NEW_LOGSTASH_ADV_CONFIG_HASH=$(sha256sum <<< "$NEW_LOGSTASH_ADV_CONFIG" | awk '{print $1}') + + if [ "$CURRENT_LOGSTASH_ADV_CONFIG_HASH" != "$NEW_LOGSTASH_ADV_CONFIG_HASH" ]; then + FORCE_UPDATE=true + fi + # Get the current list of Logstash outputs & hash them CURRENT_LIST=$(jq -c -r '.item.hosts' <<< "$RAW_JSON") - CURRENT_HASH=$(sha1sum <<< "$CURRENT_LIST" | awk '{print $1}') + CURRENT_HASH=$(sha256sum <<< "$CURRENT_LIST" | awk '{print $1}') declare -a NEW_LIST=() @@ -148,10 +159,10 @@ function update_kafka_outputs() { # Sort & hash the new list of Logstash Outputs NEW_LIST_JSON=$(jq --compact-output --null-input '$ARGS.positional' --args -- "${NEW_LIST[@]}") -NEW_HASH=$(sha1sum <<< "$NEW_LIST_JSON" | awk '{print $1}') +NEW_HASH=$(sha256sum <<< "$NEW_LIST_JSON" | awk '{print $1}') # Compare the current & new list of outputs - if different, update the Logstash outputs -if [ "$NEW_HASH" = "$CURRENT_HASH" ]; then +if [[ "$NEW_HASH" = "$CURRENT_HASH" ]] && [[ "$FORCE_UPDATE" != "true" ]]; then printf "\nHashes match - no update needed.\n" printf "Current List: $CURRENT_LIST\nNew List: $NEW_LIST_JSON\n" diff --git a/salt/elasticsearch/files/ingest/suricata.alert b/salt/elasticsearch/files/ingest/suricata.alert index 3d0241e48..ca5bef437 100644 --- a/salt/elasticsearch/files/ingest/suricata.alert +++ b/salt/elasticsearch/files/ingest/suricata.alert @@ -1,15 +1,79 @@ { - "description" : "suricata.alert", - "processors" : [ - { "set": { "if": "ctx.event?.imported != true", "field": "_index", "value": "logs-suricata.alerts-so" } }, - { "set": { "field": "tags","value": "alert" }}, - { "rename":{ "field": "message2.alert", "target_field": "rule", "ignore_failure": true } }, - { "rename":{ "field": "rule.signature", "target_field": "rule.name", "ignore_failure": true } }, - { "rename":{ "field": "rule.ref", "target_field": "rule.version", "ignore_failure": true } }, - { "rename":{ "field": "rule.signature_id", "target_field": "rule.uuid", "ignore_failure": true } }, - { "rename":{ "field": "rule.signature_id", "target_field": "rule.signature", "ignore_failure": true } }, - { "rename":{ "field": "message2.payload_printable", "target_field": "network.data.decoded", "ignore_failure": true } }, - { "dissect": { "field": "rule.rule", "pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}", "ignore_missing": true, "ignore_failure": true } }, - { "pipeline": { "name": "common.nids" } } - ] + "description": "suricata.alert", + "processors": [ + { + "set": { + "if": "ctx.event?.imported != true", + "field": "_index", + "value": "logs-suricata.alerts-so" + } + }, + { + "set": { + "field": "tags", + "value": "alert" + } + }, + { + "rename": { + "field": "message2.alert", + "target_field": "rule", + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "rename": { + "field": "rule.signature", + "target_field": "rule.name", + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "rename": { + "field": "rule.ref", + "target_field": "rule.version", + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "rename": { + "field": "rule.signature_id", + "target_field": "rule.uuid", + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "rename": { + "field": "rule.signature_id", + "target_field": "rule.signature", + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.payload_printable", + "target_field": "network.data.decoded", + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "dissect": { + "field": "rule.rule", + "pattern": "%{?prefix}content:\"%{dns.query_name}\"%{?remainder}", + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "pipeline": { + "name": "common.nids" + } + } + ] } \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/suricata.dns b/salt/elasticsearch/files/ingest/suricata.dns index 3ef68f28b..94ae5f73b 100644 --- a/salt/elasticsearch/files/ingest/suricata.dns +++ b/salt/elasticsearch/files/ingest/suricata.dns @@ -1,21 +1,136 @@ { - "description" : "suricata.dns", - "processors" : [ - { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } }, - { "rename": { "field": "message2.app_proto", "target_field": "network.protocol", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.type", "target_field": "dns.query.type", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.tx_id", "target_field": "dns.id", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.version", "target_field": "dns.version", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.rrname", "target_field": "dns.query.name", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.rrtype", "target_field": "dns.query.type_name", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.flags", "target_field": "dns.flags", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.qr", "target_field": "dns.qr", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.rd", "target_field": "dns.recursion.desired", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.ra", "target_field": "dns.recursion.available", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.rcode", "target_field": "dns.response.code_name", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.grouped.A", "target_field": "dns.answers.data", "ignore_missing": true } }, - { "rename": { "field": "message2.dns.grouped.CNAME", "target_field": "dns.answers.name", "ignore_missing": true } }, - { "pipeline": { "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } }, - { "pipeline": { "name": "common" } } - ] -} + "description": "suricata.dns", + "processors": [ + { + "rename": { + "field": "message2.proto", + "target_field": "network.transport", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.app_proto", + "target_field": "network.protocol", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.type", + "target_field": "dns.query.type", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.tx_id", + "target_field": "dns.tx_id", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.id", + "target_field": "dns.id", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.version", + "target_field": "dns.version", + "ignore_missing": true + } + }, + { + "pipeline": { + "name": "suricata.dnsv3", + "ignore_missing_pipeline": true, + "if": "ctx?.dns?.version != null && ctx?.dns?.version == 3", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.dns.rrname", + "target_field": "dns.query.name", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.rrtype", + "target_field": "dns.query.type_name", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.flags", + "target_field": "dns.flags", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.qr", + "target_field": "dns.qr", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.rd", + "target_field": "dns.recursion.desired", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.ra", + "target_field": "dns.recursion.available", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.opcode", + "target_field": "dns.opcode", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.rcode", + "target_field": "dns.response.code_name", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.grouped.A", + "target_field": "dns.answers.data", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.dns.grouped.CNAME", + "target_field": "dns.answers.name", + "ignore_missing": true + } + }, + { + "pipeline": { + "if": "ctx.dns.query?.name != null && ctx.dns.query.name.contains('.')", + "name": "dns.tld" + } + }, + { + "pipeline": { + "name": "common" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/suricata.dnsv3 b/salt/elasticsearch/files/ingest/suricata.dnsv3 new file mode 100644 index 000000000..0e804364b --- /dev/null +++ b/salt/elasticsearch/files/ingest/suricata.dnsv3 @@ -0,0 +1,56 @@ +{ + "processors": [ + { + "rename": { + "field": "message2.dns.queries", + "target_field": "dns.queries", + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "script": { + "source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.name = ctx?.dns?.queries[0].rrname;\n}" + } + }, + { + "script": { + "source": "if (ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0) {\n if (ctx.dns == null) {\n ctx.dns = new HashMap();\n }\n if (ctx.dns.query == null) {\n ctx.dns.query = new HashMap();\n }\n ctx.dns.query.type_name = ctx?.dns?.queries[0].rrtype;\n}" + } + }, + { + "foreach": { + "field": "dns.queries", + "processor": { + "rename": { + "field": "_ingest._value.rrname", + "target_field": "_ingest._value.name", + "ignore_missing": true + } + }, + "ignore_failure": true + } + }, + { + "foreach": { + "field": "dns.queries", + "processor": { + "rename": { + "field": "_ingest._value.rrtype", + "target_field": "_ingest._value.type_name", + "ignore_missing": true + } + }, + "ignore_failure": true + } + }, + { + "pipeline": { + "name": "suricata.tld", + "ignore_missing_pipeline": true, + "if": "ctx?.dns?.queries != null && ctx?.dns?.queries.length > 0", + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/suricata.tld b/salt/elasticsearch/files/ingest/suricata.tld new file mode 100644 index 000000000..aa0d67e1e --- /dev/null +++ b/salt/elasticsearch/files/ingest/suricata.tld @@ -0,0 +1,52 @@ +{ + "processors": [ + { + "script": { + "source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.top_level_domain = q.name.substring(q.name.lastIndexOf('.') + 1);\n }\n }\n}", + "ignore_failure": true + } + }, + { + "script": { + "source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.name != null && q.name.contains('.')) {\n q.query_without_tld = q.name.substring(0, q.name.lastIndexOf('.'));\n }\n }\n}", + "ignore_failure": true + } + }, + { + "script": { + "source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.parent_domain = q.query_without_tld.substring(q.query_without_tld.lastIndexOf('.') + 1);\n }\n }\n}", + "ignore_failure": true + } + }, + { + "script": { + "source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.query_without_tld != null && q.query_without_tld.contains('.')) {\n q.subdomain = q.query_without_tld.substring(0, q.query_without_tld.lastIndexOf('.'));\n }\n }\n}", + "ignore_failure": true + } + }, + { + "script": { + "source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null && q.top_level_domain != null) {\n q.highest_registered_domain = q.parent_domain + \".\" + q.top_level_domain;\n }\n }\n}", + "ignore_failure": true + } + }, + { + "script": { + "source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.subdomain != null) {\n q.subdomain_length = q.subdomain.length();\n }\n }\n}", + "ignore_failure": true + } + }, + { + "script": { + "source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n if (q.parent_domain != null) {\n q.parent_domain_length = q.parent_domain.length();\n }\n }\n}", + "ignore_failure": true + } + }, + { + "script": { + "source": "if (ctx.dns != null && ctx.dns.queries != null) {\n for (def q : ctx.dns.queries) {\n q.remove('query_without_tld');\n }\n}", + "ignore_failure": true + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/zeek.analyzer b/salt/elasticsearch/files/ingest/zeek.analyzer new file mode 100644 index 000000000..aa743b0ee --- /dev/null +++ b/salt/elasticsearch/files/ingest/zeek.analyzer @@ -0,0 +1,61 @@ +{ + "description": "zeek.analyzer", + "processors": [ + { + "set": { + "field": "event.dataset", + "value": "analyzer" + } + }, + { + "remove": { + "field": [ + "host" + ], + "ignore_failure": true + } + }, + { + "json": { + "field": "message", + "target_field": "message2", + "ignore_failure": true + } + }, + { + "set": { + "field": "network.protocol", + "copy_from": "message2.analyzer_name", + "ignore_empty_value": true, + "if": "ctx?.message2?.analyzer_kind == 'protocol'" + } + }, + { + "set": { + "field": "network.protocol", + "ignore_empty_value": true, + "if": "ctx?.message2?.analyzer_kind != 'protocol'", + "copy_from": "message2.proto" + } + }, + { + "lowercase": { + "field": "network.protocol", + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.failure_reason", + "target_field": "error.reason", + "ignore_missing": true + } + }, + { + "pipeline": { + "name": "zeek.common" + } + } + ] +} \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/zeek.dns b/salt/elasticsearch/files/ingest/zeek.dns index 7be8afec6..43853ffe8 100644 --- a/salt/elasticsearch/files/ingest/zeek.dns +++ b/salt/elasticsearch/files/ingest/zeek.dns @@ -1,35 +1,227 @@ { - "description" : "zeek.dns", - "processors" : [ - { "set": { "field": "event.dataset", "value": "dns" } }, - { "remove": { "field": ["host"], "ignore_failure": true } }, - { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, - { "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } }, - { "rename": { "field": "message2.proto", "target_field": "network.transport", "ignore_missing": true } }, - { "rename": { "field": "message2.trans_id", "target_field": "dns.id", "ignore_missing": true } }, - { "rename": { "field": "message2.rtt", "target_field": "event.duration", "ignore_missing": true } }, - { "rename": { "field": "message2.query", "target_field": "dns.query.name", "ignore_missing": true } }, - { "rename": { "field": "message2.qclass", "target_field": "dns.query.class", "ignore_missing": true } }, - { "rename": { "field": "message2.qclass_name", "target_field": "dns.query.class_name", "ignore_missing": true } }, - { "rename": { "field": "message2.qtype", "target_field": "dns.query.type", "ignore_missing": true } }, - { "rename": { "field": "message2.qtype_name", "target_field": "dns.query.type_name", "ignore_missing": true } }, - { "rename": { "field": "message2.rcode", "target_field": "dns.response.code", "ignore_missing": true } }, - { "rename": { "field": "message2.rcode_name", "target_field": "dns.response.code_name", "ignore_missing": true } }, - { "rename": { "field": "message2.AA", "target_field": "dns.authoritative", "ignore_missing": true } }, - { "rename": { "field": "message2.TC", "target_field": "dns.truncated", "ignore_missing": true } }, - { "rename": { "field": "message2.RD", "target_field": "dns.recursion.desired", "ignore_missing": true } }, - { "rename": { "field": "message2.RA", "target_field": "dns.recursion.available", "ignore_missing": true } }, - { "rename": { "field": "message2.Z", "target_field": "dns.reserved", "ignore_missing": true } }, - { "rename": { "field": "message2.answers", "target_field": "dns.answers.name", "ignore_missing": true } }, - { "foreach": {"field": "dns.answers.name","processor": {"pipeline": {"name": "common.ip_validation"}},"if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null","ignore_failure": true}}, - { "foreach": {"field": "temp._valid_ips","processor": {"append": {"field": "dns.resolved_ip","allow_duplicates": false,"value": "{{{_ingest._value}}}","ignore_failure": true}},"ignore_failure": true}}, - { "script": { "source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }","ignore_failure": true }}, - { "remove": {"field": ["temp"], "ignore_missing": true ,"ignore_failure": true } }, - { "rename": { "field": "message2.TTLs", "target_field": "dns.ttls", "ignore_missing": true } }, - { "rename": { "field": "message2.rejected", "target_field": "dns.query.rejected", "ignore_missing": true } }, - { "script": { "lang": "painless", "source": "ctx.dns.query.length = ctx.dns.query.name.length()", "ignore_failure": true } }, - { "set": { "if": "ctx._index == 'so-zeek'", "field": "_index", "value": "so-zeek_dns", "override": true } }, - { "pipeline": { "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", "name": "dns.tld" } }, - { "pipeline": { "name": "zeek.common" } } - ] + "description": "zeek.dns", + "processors": [ + { + "set": { + "field": "event.dataset", + "value": "dns" + } + }, + { + "remove": { + "field": [ + "host" + ], + "ignore_failure": true + } + }, + { + "json": { + "field": "message", + "target_field": "message2", + "ignore_failure": true + } + }, + { + "dot_expander": { + "field": "id.orig_h", + "path": "message2", + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.proto", + "target_field": "network.transport", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.trans_id", + "target_field": "dns.id", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.rtt", + "target_field": "event.duration", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.query", + "target_field": "dns.query.name", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.qclass", + "target_field": "dns.query.class", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.qclass_name", + "target_field": "dns.query.class_name", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.qtype", + "target_field": "dns.query.type", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.qtype_name", + "target_field": "dns.query.type_name", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.rcode", + "target_field": "dns.response.code", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.rcode_name", + "target_field": "dns.response.code_name", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.AA", + "target_field": "dns.authoritative", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.TC", + "target_field": "dns.truncated", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.RD", + "target_field": "dns.recursion.desired", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.RA", + "target_field": "dns.recursion.available", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.Z", + "target_field": "dns.reserved", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.answers", + "target_field": "dns.answers.name", + "ignore_missing": true + } + }, + { + "foreach": { + "field": "dns.answers.name", + "processor": { + "pipeline": { + "name": "common.ip_validation" + } + }, + "if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null", + "ignore_failure": true + } + }, + { + "foreach": { + "field": "temp._valid_ips", + "processor": { + "append": { + "field": "dns.resolved_ip", + "allow_duplicates": false, + "value": "{{{_ingest._value}}}", + "ignore_failure": true + } + }, + "if": "ctx.dns != null && ctx.dns.answers != null && ctx.dns.answers.name != null", + "ignore_failure": true + } + }, + { + "script": { + "source": "if (ctx.dns.resolved_ip != null && ctx.dns.resolved_ip instanceof List) {\n ctx.dns.resolved_ip.removeIf(item -> item == null || item.toString().trim().isEmpty());\n }", + "ignore_failure": true + } + }, + { + "remove": { + "field": [ + "temp" + ], + "ignore_missing": true, + "ignore_failure": true + } + }, + { + "rename": { + "field": "message2.TTLs", + "target_field": "dns.ttls", + "ignore_missing": true + } + }, + { + "rename": { + "field": "message2.rejected", + "target_field": "dns.query.rejected", + "ignore_missing": true + } + }, + { + "script": { + "lang": "painless", + "source": "ctx.dns.query.length = ctx.dns.query.name.length()", + "ignore_failure": true + } + }, + { + "set": { + "if": "ctx._index == 'so-zeek'", + "field": "_index", + "value": "so-zeek_dns", + "override": true + } + }, + { + "pipeline": { + "if": "ctx.dns?.query?.name != null && ctx.dns.query.name.contains('.')", + "name": "dns.tld" + } + }, + { + "pipeline": { + "name": "zeek.common" + } + } + ] } \ No newline at end of file diff --git a/salt/elasticsearch/files/ingest/zeek.dpd b/salt/elasticsearch/files/ingest/zeek.dpd deleted file mode 100644 index 2f76c5ecb..000000000 --- a/salt/elasticsearch/files/ingest/zeek.dpd +++ /dev/null @@ -1,20 +0,0 @@ -{ - "description" : "zeek.dpd", - "processors" : [ - { "set": { "field": "event.dataset", "value": "dpd" } }, - { "remove": { "field": ["host"], "ignore_failure": true } }, - { "json": { "field": "message", "target_field": "message2", "ignore_failure": true } }, - { "dot_expander": { "field": "id.orig_h", "path": "message2", "ignore_failure": true } }, - { "rename": { "field": "message2.id.orig_h", "target_field": "source.ip", "ignore_missing": true } }, - { "dot_expander": { "field": "id.orig_p", "path": "message2", "ignore_failure": true } }, - { "rename": { "field": "message2.id.orig_p", "target_field": "source.port", "ignore_missing": true } }, - { "dot_expander": { "field": "id.resp_h", "path": "message2", "ignore_failure": true } }, - { "rename": { "field": "message2.id.resp_h", "target_field": "destination.ip", "ignore_missing": true } }, - { "dot_expander": { "field": "id.resp_p", "path": "message2", "ignore_failure": true } }, - { "rename": { "field": "message2.id.resp_p", "target_field": "destination.port", "ignore_missing": true } }, - { "rename": { "field": "message2.proto", "target_field": "network.protocol", "ignore_missing": true } }, - { "rename": { "field": "message2.analyzer", "target_field": "observer.analyzer", "ignore_missing": true } }, - { "rename": { "field": "message2.failure_reason", "target_field": "error.reason", "ignore_missing": true } }, - { "pipeline": { "name": "zeek.common" } } - ] -} diff --git a/salt/elasticsearch/tools/sbin/so-elasticsearch-retention-estimate b/salt/elasticsearch/tools/sbin/so-elasticsearch-retention-estimate index 96219c50c..1eb1e33ca 100755 --- a/salt/elasticsearch/tools/sbin/so-elasticsearch-retention-estimate +++ b/salt/elasticsearch/tools/sbin/so-elasticsearch-retention-estimate @@ -206,7 +206,7 @@ fail() { exit 1 } -echo -e "\nDISCLAIMER: Script output is based on current data patterns, but are approximations soley intended to assist with getting a general ILM policy configured." +echo -e "\nDISCLAIMER: Script output is based on current data patterns, but are approximations solely intended to assist with getting a general ILM policy configured." ORG_ID=$(lookup_org_id) [ -n "$ORG_ID" ] || fail "Unable to resolve InfluxDB org id" @@ -756,7 +756,7 @@ if [ "$should_trigger_recommendations" = true ]; then ilm_output=$(so-elasticsearch-query "${index}/_ilm/explain" --fail 2>/dev/null) || true if [ -n "$ilm_output" ]; then - policy=$(echo "$ilm_output" | jq --arg idx "$index" -r ".indices[$idx].policy // empty" 2>/dev/null) + policy=$(echo "$ilm_output" | jq -r '.indices | to_entries | .[0].value.policy // empty' 2>/dev/null) fi if [ -n "$policy" ] && [ -n "${policy_ages[$policy]:-}" ]; then delete_min_age=${policy_ages[$policy]} @@ -1024,8 +1024,12 @@ else if [ "$ilm_indices_immediate" -gt 0 ]; then echo -e "${BOLD}Deleting now:${NC} $ilm_indices_immediate indices (~${ilm_delete_immediate_gb} GB, $ilm_shards_immediate shards)" fi - if [ "$ilm_indices_7d" -gt 0 ]; then - echo -e "${BOLD}Storage to be freed (7d):${NC} $ilm_indices_7d indices (~${ilm_delete_7d_gb} GB, $ilm_shards_7d shards)" + if [ "$ilm_indices_30d" -gt 0 ]; then + if [ "$ilm_delete_scheduled_30d" -gt 0 ] && [ "$ilm_indices_scheduled_30d" -gt 0 ]; then + echo -e "${BOLD}Storage to be freed (30d):${NC} $ilm_indices_30d indices (~${ilm_delete_30d_gb} GB, $ilm_shards_30d shards)" + elif [ "$ilm_indices_7d" -gt 0 ]; then + echo -e "${BOLD}Storage to be freed (7d):${NC} $ilm_indices_7d indices (~${ilm_delete_7d_gb} GB, $ilm_shards_7d shards)" + fi fi log_title "LOG" "Retention Projection" diff --git a/salt/manager/tools/sbin/soup b/salt/manager/tools/sbin/soup index 276bbc94c..32553b5c3 100755 --- a/salt/manager/tools/sbin/soup +++ b/salt/manager/tools/sbin/soup @@ -274,7 +274,7 @@ check_os_updates() { if [[ "$confirm" == [cC] ]]; then echo "Continuing without updating packages" elif [[ "$confirm" == [uU] ]]; then - echo "Applying Grid Updates" + echo "Applying Grid Updates. The following patch.os salt state may take a while depending on how many packages need to be updated." update_flag=true else echo "Exiting soup" @@ -1318,6 +1318,8 @@ upgrade_salt() { fi # Else do Ubuntu things elif [[ $is_deb ]]; then + # ensure these files don't exist when upgrading from 3006.9 to 3006.16 + rm -f /etc/apt/keyrings/salt-archive-keyring-2023.pgp /etc/apt/sources.list.d/salt.list echo "Removing apt hold for Salt." echo "" apt-mark unhold "salt-common" diff --git a/salt/registry/enabled.sls b/salt/registry/enabled.sls index 88eea6dc3..ed5b180cd 100644 --- a/salt/registry/enabled.sls +++ b/salt/registry/enabled.sls @@ -5,6 +5,7 @@ {% from 'allowed_states.map.jinja' import allowed_states %} {% if sls.split('.')[0] in allowed_states %} +{% from 'vars/globals.map.jinja' import GLOBALS %} {% from 'docker/docker.map.jinja' import DOCKER %} include: @@ -57,6 +58,17 @@ so-dockerregistry: - x509: registry_crt - x509: registry_key +wait_for_so-dockerregistry: + http.wait_for_successful_query: + - name: 'https://{{ GLOBALS.registry_host }}:5000/v2/' + - ssl: True + - verify_ssl: False + - status: 200 + - wait_for: 120 + - request_interval: 5 + - require: + - docker_container: so-dockerregistry + delete_so-dockerregistry_so-status.disabled: file.uncomment: - name: /opt/so/conf/so-status/so-status.conf diff --git a/salt/salt/scripts/bootstrap-salt.sh b/salt/salt/scripts/bootstrap-salt.sh index 8f2956606..861f22de5 100644 --- a/salt/salt/scripts/bootstrap-salt.sh +++ b/salt/salt/scripts/bootstrap-salt.sh @@ -26,7 +26,7 @@ #====================================================================================================================== set -o nounset # Treat unset variables as an error -__ScriptVersion="2025.02.24" +__ScriptVersion="2025.09.03" __ScriptName="bootstrap-salt.sh" __ScriptFullName="$0" @@ -48,6 +48,7 @@ __ScriptArgs="$*" # * BS_GENTOO_USE_BINHOST: If 1 add `--getbinpkg` to gentoo's emerge # * BS_SALT_MASTER_ADDRESS: The IP or DNS name of the salt-master the minion should connect to # * BS_SALT_GIT_CHECKOUT_DIR: The directory where to clone Salt on git installations +# * BS_TMP_DIR: The directory to use for executing the installation (defaults to /tmp) #====================================================================================================================== @@ -171,12 +172,12 @@ __check_config_dir() { case "$CC_DIR_NAME" in http://*|https://*) - __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" - CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + __fetch_url "${_TMP_DIR}/${CC_DIR_BASE}" "${CC_DIR_NAME}" + CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}" ;; ftp://*) - __fetch_url "/tmp/${CC_DIR_BASE}" "${CC_DIR_NAME}" - CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + __fetch_url "${_TMP_DIR}/${CC_DIR_BASE}" "${CC_DIR_NAME}" + CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}" ;; *://*) echoerror "Unsupported URI scheme for $CC_DIR_NAME" @@ -194,22 +195,22 @@ __check_config_dir() { case "$CC_DIR_NAME" in *.tgz|*.tar.gz) - tar -zxf "${CC_DIR_NAME}" -C /tmp + tar -zxf "${CC_DIR_NAME}" -C ${_TMP_DIR} CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tgz") CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.gz") - CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}" ;; *.tbz|*.tar.bz2) - tar -xjf "${CC_DIR_NAME}" -C /tmp + tar -xjf "${CC_DIR_NAME}" -C ${_TMP_DIR} CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tbz") CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.bz2") - CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}" ;; *.txz|*.tar.xz) - tar -xJf "${CC_DIR_NAME}" -C /tmp + tar -xJf "${CC_DIR_NAME}" -C ${_TMP_DIR} CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".txz") CC_DIR_BASE=$(basename "${CC_DIR_BASE}" ".tar.xz") - CC_DIR_NAME="/tmp/${CC_DIR_BASE}" + CC_DIR_NAME="${_TMP_DIR}/${CC_DIR_BASE}" ;; esac @@ -245,6 +246,7 @@ __check_unparsed_options() { #---------------------------------------------------------------------------------------------------------------------- _KEEP_TEMP_FILES=${BS_KEEP_TEMP_FILES:-$BS_FALSE} _TEMP_CONFIG_DIR="null" +_TMP_DIR=${BS_TMP_DIR:-"/tmp"} _SALTSTACK_REPO_URL="https://github.com/saltstack/salt.git" _SALT_REPO_URL=${_SALTSTACK_REPO_URL} _TEMP_KEYS_DIR="null" @@ -281,7 +283,7 @@ _SIMPLIFY_VERSION=$BS_TRUE _LIBCLOUD_MIN_VERSION="0.14.0" _EXTRA_PACKAGES="" _HTTP_PROXY="" -_SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-/tmp/git/salt} +_SALT_GIT_CHECKOUT_DIR=${BS_SALT_GIT_CHECKOUT_DIR:-${_TMP_DIR}/git/salt} _NO_DEPS=$BS_FALSE _FORCE_SHALLOW_CLONE=$BS_FALSE _DISABLE_SSL=$BS_FALSE @@ -367,7 +369,7 @@ __usage() { also be specified. Salt installation will be ommitted, but some of the dependencies could be installed to write configuration with -j or -J. -d Disables checking if Salt services are enabled to start on system boot. - You can also do this by touching /tmp/disable_salt_checks on the target + You can also do this by touching ${BS_TMP_DIR}/disable_salt_checks on the target host. Default: \${BS_FALSE} -D Show debug output -f Force shallow cloning for git installations. @@ -424,6 +426,9 @@ __usage() { -r Disable all repository configuration performed by this script. This option assumes all necessary repository configuration is already present on the system. + -T If set this overrides the use of /tmp for script execution. This is + to allow for systems in which noexec is applied to temp filesystem mounts + for security reasons -U If set, fully upgrade the system prior to bootstrapping Salt -v Display script version -V Install Salt into virtualenv @@ -436,7 +441,7 @@ __usage() { EOT } # ---------- end of function __usage ---------- -while getopts ':hvnDc:g:Gx:k:s:MSWNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:aqQ' opt +while getopts ':hvnDc:g:Gx:k:s:MSWNXCPFUKIA:i:Lp:dH:bflV:J:j:rR:T:aqQ' opt do case "${opt}" in @@ -478,6 +483,7 @@ do a ) _PIP_ALL=$BS_TRUE ;; r ) _DISABLE_REPOS=$BS_TRUE ;; R ) _CUSTOM_REPO_URL=$OPTARG ;; + T ) _TMP_DIR="$OPTARG" ;; J ) _CUSTOM_MASTER_CONFIG=$OPTARG ;; j ) _CUSTOM_MINION_CONFIG=$OPTARG ;; q ) _QUIET_GIT_INSTALLATION=$BS_TRUE ;; @@ -495,10 +501,10 @@ done shift $((OPTIND-1)) # Define our logging file and pipe paths -LOGFILE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.log/g )" -LOGPIPE="/tmp/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )" +LOGFILE="${_TMP_DIR}/$( echo "$__ScriptName" | sed s/.sh/.log/g )" +LOGPIPE="${_TMP_DIR}/$( echo "$__ScriptName" | sed s/.sh/.logpipe/g )" # Ensure no residual pipe exists -rm "$LOGPIPE" 2>/dev/null +rm -f "$LOGPIPE" 2>/dev/null # Create our logging pipe # On FreeBSD we have to use mkfifo instead of mknod @@ -534,7 +540,7 @@ exec 2>"$LOGPIPE" # 14 SIGALRM # 15 SIGTERM #---------------------------------------------------------------------------------------------------------------------- -APT_ERR=$(mktemp /tmp/apt_error.XXXXXX) +APT_ERR=$(mktemp ${_TMP_DIR}/apt_error.XXXXXX) __exit_cleanup() { EXIT_CODE=$? @@ -927,6 +933,11 @@ if [ -d "${_VIRTUALENV_DIR}" ]; then exit 1 fi +# Make sure the designated temp directory exists +if [ ! -d "${_TMP_DIR}" ]; then + mkdir -p "${_TMP_DIR}" +fi + #--- FUNCTION ------------------------------------------------------------------------------------------------------- # NAME: __fetch_url # DESCRIPTION: Retrieves a URL and writes it to a given path @@ -1941,11 +1952,6 @@ __wait_for_apt(){ # Timeout set at 15 minutes WAIT_TIMEOUT=900 - ## see if sync'ing the clocks helps - if [ -f /usr/sbin/hwclock ]; then - /usr/sbin/hwclock -s - fi - # Run our passed in apt command "${@}" 2>"$APT_ERR" APT_RETURN=$? @@ -1996,14 +2002,14 @@ __apt_get_upgrade_noinput() { #---------------------------------------------------------------------------------------------------------------------- __temp_gpg_pub() { if __check_command_exists mktemp; then - tempfile="$(mktemp /tmp/salt-gpg-XXXXXXXX.pub 2>/dev/null)" + tempfile="$(mktemp ${_TMP_DIR}/salt-gpg-XXXXXXXX.pub 2>/dev/null)" if [ -z "$tempfile" ]; then - echoerror "Failed to create temporary file in /tmp" + echoerror "Failed to create temporary file in ${_TMP_DIR}" return 1 fi else - tempfile="/tmp/salt-gpg-$$.pub" + tempfile="${_TMP_DIR}/salt-gpg-$$.pub" fi echo $tempfile @@ -2043,7 +2049,7 @@ __rpm_import_gpg() { __fetch_url "$tempfile" "$url" || return 1 # At least on CentOS 8, a missing newline at the end causes: - # error: /tmp/salt-gpg-n1gKUb1u.pub: key 1 not an armored public key. + # error: ${_TMP_DIR}/salt-gpg-n1gKUb1u.pub: key 1 not an armored public key. # shellcheck disable=SC1003,SC2086 sed -i -e '$a\' $tempfile @@ -2109,7 +2115,7 @@ __git_clone_and_checkout() { fi __SALT_GIT_CHECKOUT_PARENT_DIR=$(dirname "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null) - __SALT_GIT_CHECKOUT_PARENT_DIR="${__SALT_GIT_CHECKOUT_PARENT_DIR:-/tmp/git}" + __SALT_GIT_CHECKOUT_PARENT_DIR="${__SALT_GIT_CHECKOUT_PARENT_DIR:-${_TMP_DIR}/git}" __SALT_CHECKOUT_REPONAME="$(basename "${_SALT_GIT_CHECKOUT_DIR}" 2>/dev/null)" __SALT_CHECKOUT_REPONAME="${__SALT_CHECKOUT_REPONAME:-salt}" [ -d "${__SALT_GIT_CHECKOUT_PARENT_DIR}" ] || mkdir "${__SALT_GIT_CHECKOUT_PARENT_DIR}" @@ -2162,7 +2168,7 @@ __git_clone_and_checkout() { if [ "$__SHALLOW_CLONE" -eq $BS_TRUE ]; then # Let's try 'treeless' cloning to speed up. Treeless cloning omits trees and blobs ('files') - # but includes metadata (commit history, tags, branches etc. + # but includes metadata (commit history, tags, branches etc. # Test for "--filter" option introduced in git 2.19, the minimal version of git where the treeless # cloning we need actually works if [ "$(git clone 2>&1 | grep 'filter')" != "" ]; then @@ -2390,14 +2396,14 @@ __overwriteconfig() { # Make a tempfile to dump any python errors into. if __check_command_exists mktemp; then - tempfile="$(mktemp /tmp/salt-config-XXXXXXXX 2>/dev/null)" + tempfile="$(mktemp ${_TMP_DIR}/salt-config-XXXXXXXX 2>/dev/null)" if [ -z "$tempfile" ]; then - echoerror "Failed to create temporary file in /tmp" + echoerror "Failed to create temporary file in ${_TMP_DIR}" return 1 fi else - tempfile="/tmp/salt-config-$$" + tempfile="${_TMP_DIR}/salt-config-$$" fi if [ -n "$_PY_EXE" ]; then @@ -2760,8 +2766,8 @@ __install_salt_from_repo() { echoinfo "Installing salt using ${_py_exe}, $(${_py_exe} --version)" cd "${_SALT_GIT_CHECKOUT_DIR}" || return 1 - mkdir -p /tmp/git/deps - echodebug "Created directory /tmp/git/deps" + mkdir -p ${_TMP_DIR}/git/deps + echodebug "Created directory ${_TMP_DIR}/git/deps" if [ ${DISTRO_NAME_L} = "ubuntu" ] && [ "$DISTRO_MAJOR_VERSION" -eq 22 ]; then echodebug "Ubuntu 22.04 has problem with base.txt requirements file, not parsing sys_platform == 'win32', upgrading from default pip works" @@ -2774,7 +2780,7 @@ __install_salt_from_repo() { fi fi - rm -f /tmp/git/deps/* + rm -f ${_TMP_DIR}/git/deps/* echodebug "Installing Salt requirements from PyPi, ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --ignore-installed ${_PIP_INSTALL_ARGS} -r requirements/static/ci/py${_py_version}/linux.txt" ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --ignore-installed ${_PIP_INSTALL_ARGS} -r "requirements/static/ci/py${_py_version}/linux.txt" @@ -2799,7 +2805,7 @@ __install_salt_from_repo() { echodebug "Running '${_py_exe} setup.py --salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} bdist_wheel'" ${_py_exe} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" bdist_wheel || return 1 - mv dist/salt*.whl /tmp/git/deps/ || return 1 + mv dist/salt*.whl ${_TMP_DIR}/git/deps/ || return 1 cd "${__SALT_GIT_CHECKOUT_PARENT_DIR}" || return 1 @@ -2813,14 +2819,14 @@ __install_salt_from_repo() { ${_pip_cmd} install --force-reinstall --break-system-packages "${_arch_dep}" fi - echodebug "Running '${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} /tmp/git/deps/salt*.whl'" + echodebug "Running '${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl'" - echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} /tmp/git/deps/salt*.whl" + echodebug "Running ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall ${_PIP_INSTALL_ARGS} --global-option=--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS} ${_TMP_DIR}/git/deps/salt*.whl" ${_pip_cmd} install ${_USE_BREAK_SYSTEM_PACKAGES} --no-deps --force-reinstall \ ${_PIP_INSTALL_ARGS} \ --global-option="--salt-config-dir=$_SALT_ETC_DIR --salt-cache-dir=${_SALT_CACHE_DIR} ${SETUP_PY_INSTALL_ARGS}" \ - /tmp/git/deps/salt*.whl || return 1 + ${_TMP_DIR}/git/deps/salt*.whl || return 1 echoinfo "Checking if Salt can be imported using ${_py_exe}" CHECK_SALT_SCRIPT=$(cat << EOM @@ -6295,8 +6301,8 @@ __get_packagesite_onedir_latest() { } -__install_saltstack_photon_onedir_repository() { - echodebug "__install_saltstack_photon_onedir_repository() entry" +__install_saltstack_vmware_photon_os_onedir_repository() { + echodebug "__install_saltstack_vmware_photon_os_onedir_repository() entry" if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then echoerror "Python version is no longer supported, only Python 3" @@ -6376,8 +6382,8 @@ __install_saltstack_photon_onedir_repository() { return 0 } -install_photon_deps() { - echodebug "install_photon_deps() entry" +install_vmware_photon_os_deps() { + echodebug "install_vmware_photon_os_deps() entry" if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then echoerror "Python version is no longer supported, only Python 3" @@ -6406,8 +6412,8 @@ install_photon_deps() { return 0 } -install_photon_stable_post() { - echodebug "install_photon_stable_post() entry" +install_vmware_photon_os_stable_post() { + echodebug "install_vmware_photon_os_stable_post() entry" for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -6424,8 +6430,8 @@ install_photon_stable_post() { done } -install_photon_git_deps() { - echodebug "install_photon_git_deps() entry" +install_vmware_photon_os_git_deps() { + echodebug "install_vmware_photon_os_git_deps() entry" if [ -n "$_PY_EXE" ] && [ "$_PY_MAJOR_VERSION" -ne 3 ]; then echoerror "Python version is no longer supported, only Python 3" @@ -6463,7 +6469,7 @@ install_photon_git_deps() { __PACKAGES="python${PY_PKG_VER}-devel python${PY_PKG_VER}-pip python${PY_PKG_VER}-setuptools gcc glibc-devel linux-devel.x86_64 cython${PY_PKG_VER}" - echodebug "install_photon_git_deps() distro major version, ${DISTRO_MAJOR_VERSION}" + echodebug "install_vmware_photon_os_git_deps() distro major version, ${DISTRO_MAJOR_VERSION}" ## Photon 5 container is missing systemd on default installation if [ "${DISTRO_MAJOR_VERSION}" -lt 5 ]; then @@ -6489,8 +6495,8 @@ install_photon_git_deps() { return 0 } -install_photon_git() { - echodebug "install_photon_git() entry" +install_vmware_photon_os_git() { + echodebug "install_vmware_photon_os_git() entry" if [ "${_PY_EXE}" != "" ]; then _PYEXE=${_PY_EXE} @@ -6500,7 +6506,7 @@ install_photon_git() { return 1 fi - install_photon_git_deps + install_vmware_photon_os_git_deps if [ -f "${_SALT_GIT_CHECKOUT_DIR}/salt/syspaths.py" ]; then ${_PYEXE} setup.py --salt-config-dir="$_SALT_ETC_DIR" --salt-cache-dir="${_SALT_CACHE_DIR}" ${SETUP_PY_INSTALL_ARGS} install --prefix=/usr || return 1 @@ -6510,8 +6516,8 @@ install_photon_git() { return 0 } -install_photon_git_post() { - echodebug "install_photon_git_post() entry" +install_vmware_photon_os_git_post() { + echodebug "install_vmware_photon_os_git_post() entry" for fname in api master minion syndic; do # Skip if not meant to be installed @@ -6543,9 +6549,9 @@ install_photon_git_post() { done } -install_photon_restart_daemons() { +install_vmware_photon_os_restart_daemons() { [ "$_START_DAEMONS" -eq $BS_FALSE ] && return - echodebug "install_photon_restart_daemons() entry" + echodebug "install_vmware_photon_os_restart_daemons() entry" for fname in api master minion syndic; do @@ -6567,8 +6573,8 @@ install_photon_restart_daemons() { done } -install_photon_check_services() { - echodebug "install_photon_check_services() entry" +install_vmware_photon_os_check_services() { + echodebug "install_vmware_photon_os_check_services() entry" for fname in api master minion syndic; do # Skip salt-api since the service should be opt-in and not necessarily started on boot @@ -6585,8 +6591,8 @@ install_photon_check_services() { return 0 } -install_photon_onedir_deps() { - echodebug "install_photon_onedir_deps() entry" +install_vmware_photon_os_onedir_deps() { + echodebug "install_vmware_photon_os_onedir_deps() entry" if [ "$_UPGRADE_SYS" -eq $BS_TRUE ]; then @@ -6600,17 +6606,17 @@ install_photon_onedir_deps() { fi if [ "$_DISABLE_REPOS" -eq "$BS_FALSE" ]; then - __install_saltstack_photon_onedir_repository || return 1 + __install_saltstack_vmware_photon_os_onedir_repository || return 1 fi # If -R was passed, we need to configure custom repo url with rsync-ed packages # Which was handled in __install_saltstack_rhel_repository buu that hanlded old-stable which is for # releases which are End-Of-Life. This call has its own check in case -r was passed without -R. if [ "$_CUSTOM_REPO_URL" != "null" ]; then - __install_saltstack_photon_onedir_repository || return 1 + __install_saltstack_vmware_photon_os_onedir_repository || return 1 fi - __PACKAGES="procps-ng sudo shadow" + __PACKAGES="procps-ng sudo shadow wget" # shellcheck disable=SC2086 __tdnf_install_noinput ${__PACKAGES} || return 1 @@ -6626,9 +6632,9 @@ install_photon_onedir_deps() { } -install_photon_onedir() { +install_vmware_photon_os_onedir() { - echodebug "install_photon_onedir() entry" + echodebug "install_vmware_photon_os_onedir() entry" STABLE_REV=$ONEDIR_REV _GENERIC_PKG_VERSION="" @@ -6672,9 +6678,9 @@ install_photon_onedir() { return 0 } -install_photon_onedir_post() { +install_vmware_photon_os_onedir_post() { STABLE_REV=$ONEDIR_REV - install_photon_stable_post || return 1 + install_vmware_photon_os_stable_post || return 1 return 0 } @@ -7797,7 +7803,7 @@ install_macosx_git_deps() { export PATH=/usr/local/bin:$PATH fi - __fetch_url "/tmp/get-pip.py" "https://bootstrap.pypa.io/get-pip.py" || return 1 + __fetch_url "${_TMP_DIR}/get-pip.py" "https://bootstrap.pypa.io/get-pip.py" || return 1 if [ -n "$_PY_EXE" ]; then _PYEXE="${_PY_EXE}" @@ -7807,7 +7813,7 @@ install_macosx_git_deps() { fi # Install PIP - $_PYEXE /tmp/get-pip.py || return 1 + $_PYEXE ${_TMP_DIR}/get-pip.py || return 1 # shellcheck disable=SC2119 __git_clone_and_checkout || return 1 @@ -7819,9 +7825,9 @@ install_macosx_stable() { install_macosx_stable_deps || return 1 - __fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1 + __fetch_url "${_TMP_DIR}/${PKG}" "${SALTPKGCONFURL}" || return 1 - /usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1 + /usr/sbin/installer -pkg "${_TMP_DIR}/${PKG}" -target / || return 1 return 0 } @@ -7830,9 +7836,9 @@ install_macosx_onedir() { install_macosx_onedir_deps || return 1 - __fetch_url "/tmp/${PKG}" "${SALTPKGCONFURL}" || return 1 + __fetch_url "${_TMP_DIR}/${PKG}" "${SALTPKGCONFURL}" || return 1 - /usr/sbin/installer -pkg "/tmp/${PKG}" -target / || return 1 + /usr/sbin/installer -pkg "${_TMP_DIR}/${PKG}" -target / || return 1 return 0 } diff --git a/salt/soc/defaults.yaml b/salt/soc/defaults.yaml index f95456924..b3bbfa659 100644 --- a/salt/soc/defaults.yaml +++ b/salt/soc/defaults.yaml @@ -1364,6 +1364,8 @@ soc: cases: soc filedatastore: jobDir: jobs + retryFailureIntervalMs: 600000 + retryFailureMaxAttempts: 5 kratos: hostUrl: hydra: @@ -1744,7 +1746,7 @@ soc: showSubtitle: true - name: DPD description: Dynamic Protocol Detection errors - query: 'tags:dpd | groupby error.reason' + query: '(tags:dpd OR tags:analyzer) | groupby error.reason' showSubtitle: true - name: Files description: Files grouped by mimetype @@ -2010,7 +2012,7 @@ soc: query: 'tags:dns | groupby dns.query.name | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby dns.highest_registered_domain | groupby dns.parent_domain | groupby dns.query.type_name | groupby dns.response.code_name | groupby dns.answers.name | groupby destination.as.organization.name' - name: DPD description: DPD (Dynamic Protocol Detection) errors - query: 'tags:dpd | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination.as.organization.name' + query: '(tags:dpd OR tags:analyzer) | groupby error.reason | groupby -sankey error.reason source.ip | groupby source.ip | groupby -sankey source.ip destination.ip | groupby destination.ip | groupby destination.port | groupby network.protocol | groupby destination.as.organization.name' - name: Files description: Files seen in network traffic query: 'tags:file | groupby file.mime_type | groupby -sankey file.mime_type file.source | groupby file.source | groupby file.bytes.total | groupby source.ip | groupby destination.ip | groupby destination.as.organization.name' @@ -2552,6 +2554,7 @@ soc: assistant: enabled: false investigationPrompt: Investigate Alert ID {socId} + compressContextPrompt: Summarize the conversation for context compaction thresholdColorRatioLow: 0.5 thresholdColorRatioMed: 0.75 thresholdColorRatioMax: 1 @@ -2561,18 +2564,22 @@ soc: contextLimitSmall: 200000 contextLimitLarge: 1000000 lowBalanceColorAlert: 500000 + enabled: true - id: sonnet-4.5 displayName: Claude Sonnet 4.5 contextLimitSmall: 200000 contextLimitLarge: 1000000 lowBalanceColorAlert: 500000 + enabled: true - id: gptoss-120b displayName: GPT-OSS 120B contextLimitSmall: 128000 contextLimitLarge: 128000 lowBalanceColorAlert: 500000 + enabled: true - id: qwen-235b displayName: QWEN 235B contextLimitSmall: 256000 contextLimitLarge: 256000 lowBalanceColorAlert: 500000 + enabled: true diff --git a/salt/soc/soc_soc.yaml b/salt/soc/soc_soc.yaml index 8a31c977d..ed3615bb8 100644 --- a/salt/soc/soc_soc.yaml +++ b/salt/soc/soc_soc.yaml @@ -424,6 +424,17 @@ soc: description: The maximum number of documents to request in a single Elasticsearch scroll request. bulkIndexWorkerCount: description: The number of worker threads to use when bulk indexing data into Elasticsearch. A value below 1 will default to the number of CPUs available. + filedatastore: + jobDir: + description: The location where local job files are stored on the manager. + global: True + advanced: True + retryFailureIntervalMs: + description: The interval, in milliseconds, to wait before attempting to reprocess a failed job. + global: True + retryFailureMaxAttempts: + description: The max number of attempts to process a job, in the event the job fails to complete. + global: True sostatus: refreshIntervalMs: description: Duration (in milliseconds) between refreshes of the grid status. Shortening this duration may not have expected results, as the backend systems feeding this sostatus data will continue their updates as scheduled. @@ -606,6 +617,9 @@ soc: investigationPrompt: description: Prompt given to Onion AI when beginning an investigation. global: True + compressContextPrompt: + description: Prompt given to Onion AI when summarizing a conversation in order to compress context. + global: True thresholdColorRatioLow: description: Lower visual context color change threshold. global: True @@ -648,6 +662,9 @@ soc: label: Low Balance Color Alert forcedType: int required: True + - field: enabled + label: Enabled + forcedType: bool apiTimeoutMs: description: Duration (in milliseconds) to wait for a response from the SOC server API before giving up and showing an error on the SOC UI. global: True diff --git a/salt/suricata/suricata_mdengine.yaml b/salt/suricata/suricata_mdengine.yaml index 1c3855501..8a0c502fc 100644 --- a/salt/suricata/suricata_mdengine.yaml +++ b/salt/suricata/suricata_mdengine.yaml @@ -29,7 +29,7 @@ suricata: #custom: [Accept-Encoding, Accept-Language, Authorization] # dump-all-headers: none - dns: - version: 2 + version: 3 enabled: "yes" #requests: "no" #responses: "no" diff --git a/salt/zeek/defaults.yaml b/salt/zeek/defaults.yaml index 81bfa3d9d..169b6521a 100644 --- a/salt/zeek/defaults.yaml +++ b/salt/zeek/defaults.yaml @@ -45,7 +45,7 @@ zeek: - protocols/ssh/geo-data - protocols/ssh/detect-bruteforcing - protocols/ssh/interesting-hostnames - - protocols/http/detect-sqli + - protocols/http/detect-sql-injection - frameworks/files/hash-all-files - frameworks/files/detect-MHR - policy/frameworks/notice/extend-email/hostnames diff --git a/setup/so-functions b/setup/so-functions index 88da7ee9e..a8414d0e8 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -502,6 +502,7 @@ configure_minion() { minion_type=desktop fi info "Configuring minion type as $minion_type" + logCmd "mkdir -p /etc/salt/minion.d" echo "role: so-$minion_type" > /etc/salt/grains local minion_config=/etc/salt/minion @@ -541,20 +542,6 @@ configure_minion() { "log_file: /opt/so/log/salt/minion"\ "#startup_states: highstate" >> "$minion_config" - # At the time the so-managerhype node does not yet have the bridge configured. - # The so-hypervisor node doesn't either, but it doesn't cause issues here. - local usebr0=false - if [ "$minion_type" == 'hypervisor' ]; then - usebr0=true - fi - local pillar_json="{\"host\": {\"mainint\": \"$MNIC\"}, \"usebr0\": $usebr0}" - info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='$pillar_json'" - salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="$pillar_json" - - { - logCmd "systemctl enable salt-minion"; - logCmd "systemctl restart salt-minion"; - } >> "$setup_log" 2>&1 } checkin_at_boot() { @@ -729,7 +716,7 @@ configure_network_sensor() { fi # Create the bond interface only if it doesn't already exist - nmcli -f name,uuid -p con | grep -q '$INTERFACE' + nmcli -f name,uuid -p con | grep -q "$INTERFACE" local found_int=$? if [[ $found_int != 0 ]]; then @@ -798,25 +785,18 @@ configure_hyper_bridge() { } copy_salt_master_config() { - + logCmd "mkdir /etc/salt" title "Copy the Salt master config template to the proper directory" if [ "$setup_type" = 'iso' ]; then logCmd "cp /root/SecurityOnion/files/salt/master/master /etc/salt/master" - #logCmd "cp /root/SecurityOnion/files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service" else logCmd "cp ../files/salt/master/master /etc/salt/master" - #logCmd "cp ../files/salt/master/salt-master.service /usr/lib/systemd/system/salt-master.service" fi info "Copying pillar and salt files in $temp_install_dir to $local_salt_dir" logCmd "cp -R $temp_install_dir/pillar/ $local_salt_dir/" if [ -d "$temp_install_dir"/salt ] ; then logCmd "cp -R $temp_install_dir/salt/ $local_salt_dir/" fi - - # Restart the service so it picks up the changes - logCmd "systemctl daemon-reload" - logCmd "systemctl enable salt-master" - logCmd "systemctl restart salt-master" } create_local_nids_rules() { @@ -1941,11 +1921,12 @@ repo_sync_local() { } saltify() { - info "Installing Salt" SALTVERSION=$(grep "version:" ../salt/salt/master.defaults.yaml | grep -o "[0-9]\+\.[0-9]\+") + info "Installing Salt $SALTVERSION" + chmod u+x ../salt/salt/scripts/bootstrap-salt.sh if [[ $is_deb ]]; then - DEBIAN_FRONTEND=noninteractive retry 150 20 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1 || fail_setup + DEBIAN_FRONTEND=noninteractive retry 30 10 "apt-get -y -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\" upgrade" >> "$setup_log" 2>&1 || fail_setup if [ $OSVER == "focal" ]; then update-alternatives --install /usr/bin/python python /usr/bin/python3.10 10; fi local pkg_arr=( 'apache2-utils' @@ -1958,16 +1939,11 @@ saltify() { 'jq' 'gnupg' ) - retry 150 20 "apt-get -y install ${pkg_arr[*]}" || fail_setup + retry 30 10 "apt-get -y install ${pkg_arr[*]}" || fail_setup logCmd "mkdir -vp /etc/apt/keyrings" logCmd "wget -q --inet4-only -O /etc/apt/keyrings/docker.pub https://download.docker.com/linux/ubuntu/gpg" - # Download public key - logCmd "curl -fsSL -o /etc/apt/keyrings/salt-archive-keyring-2023.pgp https://packages.broadcom.com/artifactory/api/security/keypair/SaltProjectKey/public" - # Create apt repo target configuration - echo "deb [signed-by=/etc/apt/keyrings/salt-archive-keyring-2023.pgp arch=amd64] https://packages.broadcom.com/artifactory/saltproject-deb/ stable main" | sudo tee /etc/apt/sources.list.d/salt.list - if [[ $is_ubuntu ]]; then # Add Docker Repo add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" @@ -1978,45 +1954,50 @@ saltify() { echo "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian $OSVER stable" > /etc/apt/sources.list.d/docker.list fi - logCmd "apt-key add /etc/apt/keyrings/salt-archive-keyring-2023.pgp" - - #logCmd "apt-key add /opt/so/gpg/SALTSTACK-GPG-KEY.pub" logCmd "apt-key add /etc/apt/keyrings/docker.pub" - # Add SO Saltstack Repo - #echo "deb https://repo.securityonion.net/file/securityonion-repo/ubuntu/20.04/amd64/salt3004.2/ focal main" > /etc/apt/sources.list.d/saltstack.list - - # Ain't nothing but a GPG - - retry 150 20 "apt-get update" "" "Err:" || fail_setup + retry 30 10 "apt-get update" "" "Err:" || fail_setup if [[ $waitforstate ]]; then - retry 150 20 "apt-get -y install salt-common=$SALTVERSION salt-minion=$SALTVERSION salt-master=$SALTVERSION" || fail_setup - retry 150 20 "apt-mark hold salt-minion salt-common salt-master" || fail_setup - retry 150 20 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-packaging python3-influxdb python3-lxml" || exit 1 + retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -M -X stable $SALTVERSION" || fail_setup + retry 30 10 "apt-mark hold salt-minion salt-common salt-master" || fail_setup + retry 30 10 "apt-get -y install python3-pip python3-dateutil python3-m2crypto python3-packaging python3-influxdb python3-lxml" || exit 1 else - retry 150 20 "apt-get -y install salt-common=$SALTVERSION salt-minion=$SALTVERSION" || fail_setup - retry 150 20 "apt-mark hold salt-minion salt-common" || fail_setup + retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -X stable $SALTVERSION" || fail_setup + retry 30 10 "apt-mark hold salt-minion salt-common" || fail_setup fi fi if [[ $is_rpm ]]; then if [[ $waitforstate ]]; then # install all for a manager - logCmd "dnf -y install salt-$SALTVERSION salt-master-$SALTVERSION salt-minion-$SALTVERSION" + retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -r -M -X stable $SALTVERSION" || fail_setup else - # We just need the minion - if [[ $is_airgap ]]; then - logCmd "dnf -y install salt salt-minion" - else - logCmd "dnf -y install salt-$SALTVERSION salt-minion-$SALTVERSION" - fi + # just a minion + retry 30 10 "bash ../salt/salt/scripts/bootstrap-salt.sh -r -X stable $SALTVERSION" || fail_setup fi fi - logCmd "mkdir -p /etc/salt/minion.d" salt_install_module_deps salt_patch_x509_v2 + # At the time the so-managerhype node does not yet have the bridge configured. + # The so-hypervisor node doesn't either, but it doesn't cause issues here. + local usebr0=false + if [ "$minion_type" == 'hypervisor' ]; then + usebr0=true + fi + local pillar_json="{\"host\": {\"mainint\": \"$MNIC\"}, \"usebr0\": $usebr0}" + info "Running: salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar='$pillar_json'" + salt-call state.apply salt.mine_functions --local --file-root=../salt/ -l info pillar="$pillar_json" + + if [[ $waitforstate ]]; then + logCmd "systemctl enable salt-master"; + logCmd "systemctl start salt-master"; + fi + + logCmd "systemctl enable salt-minion"; + logCmd "systemctl restart salt-minion"; + } salt_install_module_deps() { diff --git a/setup/so-setup b/setup/so-setup index bdb1c38e2..91f1fa9aa 100755 --- a/setup/so-setup +++ b/setup/so-setup @@ -745,13 +745,12 @@ if ! [[ -f $install_opt_file ]]; then securityonion_repo # Update existing packages update_packages - # Install salt - saltify - # Start the master service + # Put salt-master config in place copy_salt_master_config configure_minion "$minion_type" + # Install salt + saltify check_sos_appliance - logCmd "salt-key -yd $MINION_ID" sleep 2 # Debug RSA Key format errors logCmd "salt-call state.show_top" @@ -852,8 +851,8 @@ if ! [[ -f $install_opt_file ]]; then gpg_rpm_import securityonion_repo update_packages - saltify configure_minion "$minion_type" + saltify check_sos_appliance drop_install_options hypervisor_local_states