diff --git a/salt/elasticfleet/files/integrations/endpoints-initial/elastic-defend-endpoints.json b/salt/elasticfleet/files/integrations/endpoints-initial/elastic-defend-endpoints.json new file mode 100644 index 000000000..7d7f5bb35 --- /dev/null +++ b/salt/elasticfleet/files/integrations/endpoints-initial/elastic-defend-endpoints.json @@ -0,0 +1,28 @@ +{ + "name": "elastic-defend-endpoints", + "namespace": "default", + "description": "", + "package": { + "name": "endpoint", + "title": "Elastic Defend", + "version": "" + }, + "enabled": true, + "policy_id": "endpoints-initial", + "vars": {}, + "inputs": [{ + "type": "endpoint", + "enabled": true, + "streams": [], + "config": { + "integration_config": { + "value": { + "type": "endpoint", + "endpointConfig": { + "preset": "DataCollection" + } + } + } + } + }] +} \ No newline at end of file diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers index 2dd92d21b..84a519d37 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-agent-gen-installers @@ -10,12 +10,12 @@ . /usr/sbin/so-common -FLEETHOST="https://{{ GLOBALS.manager_ip }}:8220" +#FLEETHOST="https://{{ GLOBALS.manager_ip }}:8220" for i in {1..30} do - ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints")) | .api_key') - #FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts' | jq -r '.items[].host_urls[]' | paste -sd ',') + ENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key') + FLEETHOST=$(curl -K /opt/so/conf/elasticsearch/curl.config 'http://localhost:5601/api/fleet/fleet_server_hosts/grid-default' | jq -r '.item.host_urls[]' | paste -sd ',') if [[ $FLEETHOST ]] && [[ $ENROLLMENTOKEN ]]; then break; else sleep 10; fi done if [[ -z $FLEETHOST ]] || [[ -z $ENROLLMENTOKEN ]]; then printf "\nFleet Host URL or Enrollment Token empty - exiting..." && exit; fi diff --git a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup index 6ad97a223..c81d69282 100755 --- a/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup +++ b/salt/elasticfleet/tools/sbin_jinja/so-elastic-fleet-setup @@ -35,9 +35,16 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl printf "\n\n" {%- endif %} +# Add Manager IP & URL Base to Fleet Host URLs printf "\nAdd SO-Manager Fleet URL\n" +if [ "{{ GLOBALS.manager_ip }}" = "{{ GLOBALS.url_base }}" ]; then + JSON_STRING=$( jq -n '{"id":"grid-default","name":"grid-default","is_default":true,"host_urls":["https://{{ GLOBALS.url_base }}:8220"]}') +else + JSON_STRING=$( jq -n '{"id":"grid-default","name":"grid-default","is_default":true,"host_urls":["https://{{ GLOBALS.url_base }}:8220", "https://{{ GLOBALS.manager_ip }}:8220"]}') +fi + ## This array replaces whatever URLs are currently configured -curl -K /opt/so/conf/elasticsearch/curl.config -L -X PUT "localhost:5601/api/fleet/settings" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d '{"fleet_server_hosts":["https://{{ GLOBALS.manager_ip }}:8220", "https://{{ GLOBALS.manager }}:8220"]}' +curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fleet/fleet_server_hosts" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' -d "$JSON_STRING" printf "\n\n" @@ -74,7 +81,7 @@ curl -K /opt/so/conf/elasticsearch/curl.config -L -X POST "localhost:5601/api/fl ### Finalization ### # Query for Enrollment Tokens for default policies -ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-default")) | .api_key') +ENDPOINTSENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("endpoints-initial")) | .api_key') GRIDNODESENROLLMENTOKEN=$(curl -K /opt/so/conf/elasticsearch/curl.config -L "localhost:5601/api/fleet/enrollment_api_keys" -H 'kbn-xsrf: true' -H 'Content-Type: application/json' | jq .list | jq -r -c '.[] | select(.policy_id | contains("so-grid-nodes")) | .api_key') # Store needed data in minion pillar diff --git a/salt/influxdb/templates/alarm_high_redis_memory_usage.json b/salt/influxdb/templates/alarm_high_redis_memory_usage.json new file mode 100644 index 000000000..fe99ad430 --- /dev/null +++ b/salt/influxdb/templates/alarm_high_redis_memory_usage.json @@ -0,0 +1,28 @@ +[{ + "apiVersion": "influxdata.com/v2alpha1", + "kind": "CheckThreshold", + "metadata": { + "name": "high-redis-memory" + }, + "spec": { + "description": "Triggers when the average percent of used memory for Redis reaches a defined threshold. To tune this alert, modify the value for the appropriate alert level.", + "every": "1m", + "name": "High Redis Memory Usage", + "query": "from(bucket: \"telegraf/so_short_term\")\n |\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |\u003e filter(fn: (r) =\u003e r[\"_measurement\"] == \"redisqueue\")\n |\u003e filter(fn: (r) =\u003e r[\"_field\"] == \"mem_used\")\n |\u003e aggregateWindow(every: 1m, fn: mean, createEmpty: false)\n |\u003e yield(name: \"mean\")", + "status": "active", + "statusMessageTemplate": "The amount of available memory for Redis on the ${r.host} node has reached the ${r._level} threshold. The current percent of used memory is ${r.mem_used}.", + "thresholds": [ + { + "level": "WARN", + "type": "greater", + "value": 80 + }, + { + "level": "CRIT", + "type": "greater", + "value": 90 + } + ] + } +}] + diff --git a/salt/influxdb/templates/alarm_low_monitor_traffic.json b/salt/influxdb/templates/alarm_low_monitor_traffic.json new file mode 100644 index 000000000..167ae1b5a --- /dev/null +++ b/salt/influxdb/templates/alarm_low_monitor_traffic.json @@ -0,0 +1,22 @@ +[{ + "apiVersion": "influxdata.com/v2alpha1", + "kind": "CheckThreshold", + "metadata": { + "name": "monitor-interface-traffic" + }, + "spec": { + "description": "Triggers when the volume of network traffic (in MBs) received on the monitor interface, per sensor, falls below a defined threshold. To tune this alert, modify the value in MBs for the appropriate alert level.", + "every": "1m", + "name": "Low Traffic Volume on Monitor Interface", + "query": "from(bucket: \"telegraf/so_short_term\")\n |\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |\u003e filter(fn: (r) =\u003e r[\"_measurement\"] == \"net\")\n |\u003e filter(fn: (r) =\u003e r[\"_field\"] == \"bytes_recv\")\n |\u003e filter(fn: (r) =\u003e r[\"interface\"] == \"bond0\")\n |\u003e derivative(unit: 1s, nonNegative: true)\n |\u003e map(fn: (r) =\u003e ({r with \"_value\": r._value * 8.0 / 1000000.0}))\n |\u003e yield(name: \"nonnegative derivative\")", + "status": "active", + "statusMessageTemplate": "Interface ${r.interface} on node ${r.host} has reached the ${r._level} threshold. The current volume of traffic on interface ${r.interface} is ${r.bytes_recv}MB/s.", + "thresholds": [ + { + "level": "CRIT", + "type": "lesser", + "value": 5 + } + ] + } +}] diff --git a/salt/influxdb/templates/alarm_pcap_retention.json b/salt/influxdb/templates/alarm_pcap_retention.json new file mode 100644 index 000000000..969d462c9 --- /dev/null +++ b/salt/influxdb/templates/alarm_pcap_retention.json @@ -0,0 +1,27 @@ +[{ + "apiVersion": "influxdata.com/v2alpha1", + "kind": "CheckThreshold", + "metadata": { + "name": "alarm-pcap-retention" + }, + "spec": { + "description": "Triggers when the PCAP retention (in days), falls below the defined threshold. To tune this alert, modify the value for the appropriate alert level.", + "every": "1m0s", + "name": "Low PCAP Retention", + "query": "from(bucket: \"telegraf/so_short_term\")\n |> range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |> filter(fn: (r) => r[\"_measurement\"] == \"pcapage\")\n |> filter(fn: (r) => r[\"_field\"] == \"seconds\")\n |> map(fn: (r) => ({ r with _value: r._value / (24.0 * 3600.0)})) |\u003e map(fn: (r) =\u003e ({r with _value: int(v: r._value)}))\n |> aggregateWindow(every: 1m, fn: mean, createEmpty: false)\n |> yield(name: \"mean\")", + "status": "active", + "statusMessageTemplate": "PCAP retention on node ${r.host} has reached the ${r._level} threshold. Node ${r.host} currently has approximately ${r.seconds} days of PCAP data.", + "thresholds": [ + { + "level": "CRIT", + "type": "lesser", + "value": 1 + }, + { + "level": "WARN", + "type": "lesser", + "value": 3 + } + ] + } +}] diff --git a/salt/influxdb/templates/alarm_steno_packet_loss.json b/salt/influxdb/templates/alarm_steno_packet_loss.json new file mode 100644 index 000000000..c5cfb4297 --- /dev/null +++ b/salt/influxdb/templates/alarm_steno_packet_loss.json @@ -0,0 +1,27 @@ +[{ + "apiVersion": "influxdata.com/v2alpha1", + "kind": "CheckThreshold", + "metadata": { + "name": "steno-packet-loss" + }, + "spec": { + "description": "Triggers when the average percent of packet loss is above the defined threshold. To tune this alert, modify the value for the appropriate alert level.", + "every": "1m", + "name": "Stenographer Packet Loss", + "query": "from(bucket: \"telegraf/so_short_term\")\n |\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |\u003e filter(fn: (r) =\u003e r[\"_measurement\"] == \"stenodrop\")\n |\u003e filter(fn: (r) =\u003e r[\"_field\"] == \"drop\")\n |\u003e aggregateWindow(every: 1m, fn: mean, createEmpty: false)\n |\u003e yield(name: \"mean\")", + "status": "active", + "statusMessageTemplate": "Stenographer Packet Loss on node ${r.host} has reached the ${ r._level } threshold. The current packet loss is ${ r.drop }%.", + "thresholds": [ + { + "level": "CRIT", + "type": "greater", + "value": 5 + }, + { + "level": "WARN", + "type": "greater", + "value": 3 + } + ] + } +}] diff --git a/salt/influxdb/templates/alarm_suricata_packet_loss.json b/salt/influxdb/templates/alarm_suricata_packet_loss.json new file mode 100644 index 000000000..99fda6167 --- /dev/null +++ b/salt/influxdb/templates/alarm_suricata_packet_loss.json @@ -0,0 +1,27 @@ +[{ + "apiVersion": "influxdata.com/v2alpha1", + "kind": "CheckThreshold", + "metadata": { + "name": "suricata-packet-loss" + }, + "spec": { + "description": "Triggers when the average percent of packet loss is above the defined threshold. To tune this alert, modify the value for the appropriate alert level.", + "every": "1m", + "name": "Suricata Packet Loss", + "query": "from(bucket: \"telegraf/so_short_term\")\n |\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |\u003e filter(fn: (r) =\u003e r[\"_measurement\"] == \"suridrop\")\n |\u003e filter(fn: (r) =\u003e r[\"_field\"] == \"drop\")\n |\u003e map(fn: (r) =\u003e ({r with \"_value\": r._value * 100.0}))\n |\u003e map(fn: (r) =\u003e ({ r with _value: int(v: r._value) }))\n |\u003e aggregateWindow(every: 1m, fn: mean, createEmpty: false)\n |\u003e yield(name: \"mean\")", + "status": "active", + "statusMessageTemplate": "Suricata packet loss on node ${r.host} has reached the ${ r._level } threshold. The current packet loss is ${ r.drop }%.", + "thresholds": [ + { + "level": "CRIT", + "type": "greater", + "value": 5 + }, + { + "level": "WARN", + "type": "greater", + "value": 3 + } + ] + } +}] diff --git a/salt/influxdb/templates/alarm_zeek_packet_loss.json b/salt/influxdb/templates/alarm_zeek_packet_loss.json new file mode 100644 index 000000000..633ed5294 --- /dev/null +++ b/salt/influxdb/templates/alarm_zeek_packet_loss.json @@ -0,0 +1,27 @@ +[{ + "apiVersion": "influxdata.com/v2alpha1", + "kind": "CheckThreshold", + "metadata": { + "name": "zeek-packet-loss" + }, + "spec": { + "description": "Triggers when the average percent of packet loss is above the defined threshold. To tune this alert, modify the value for the appropriate alert level.", + "every": "1m", + "name": "Zeek Packet Loss", + "query": "from(bucket: \"telegraf/so_short_term\")\n |\u003e range(start: v.timeRangeStart, stop: v.timeRangeStop)\n |\u003e filter(fn: (r) =\u003e r[\"_measurement\"] == \"zeekdrop\")\n |\u003e filter(fn: (r) =\u003e r[\"_field\"] == \"drop\")\n |\u003e map(fn: (r) =\u003e ({r with \"_value\": r._value * 100.0}))\n |\u003e map(fn: (r) =\u003e ({ r with _value: int(v: r._value) }))\n |\u003e aggregateWindow(every: 1m, fn: mean, createEmpty: false)\n |\u003e yield(name: \"mean\")", + "status": "active", + "statusMessageTemplate": "Zeek Packet Loss on node ${r.host} has reached the ${ r._level } threshold. The current packet loss is ${ r.drop }%.", + "thresholds": [ + { + "level": "CRIT", + "type": "greater", + "value": 5 + }, + { + "level": "WARN", + "type": "greater", + "value": 3 + } + ] + } +}] diff --git a/salt/soctopus/files/templates/generic.template b/salt/soctopus/files/templates/generic.template index 035d38b24..74b40bef9 100644 --- a/salt/soctopus/files/templates/generic.template +++ b/salt/soctopus/files/templates/generic.template @@ -12,3 +12,13 @@ play_url: "https://{{ GLOBALS.url_base }}/playbook/issues/6000" kibana_pivot: "https://{{ GLOBALS.url_base }}/kibana/app/kibana#/discover?_g=()&_a=(columns:!(_source),interval:auto,query:(language:lucene,query:'_id:{[_id]}'),sort:!('@timestamp',desc))" soc_pivot: "https://{{ GLOBALS.url_base }}/#/hunt" sigma_level: "" + +index: '.ds-logs-*' +name: EQL +priority: 3 +realert: + minutes: 0 +type: any +filter: +- query: + query_string: diff --git a/salt/telegraf/scripts/redis.sh b/salt/telegraf/scripts/redis.sh index c730885d4..dba893c87 100644 --- a/salt/telegraf/scripts/redis.sh +++ b/salt/telegraf/scripts/redis.sh @@ -11,8 +11,9 @@ if [[ ! "`pidof -x $(basename $0) -o %PPID`" ]]; then UNPARSED=$(redis-cli llen logstash:unparsed | awk '{print $1}') PARSED=$(redis-cli llen logstash:parsed | awk '{print $1}') - - echo "redisqueue unparsed=$UNPARSED,parsed=$PARSED" + MEM_USED=$(redis-cli info memory | grep used_memory_peak_perc | cut -d ":" -f2 | sed "s/%//") + + echo "redisqueue unparsed=$UNPARSED,parsed=$PARSED,mem_used=$MEM_USED" fi diff --git a/setup/so-functions b/setup/so-functions index 09e219cfd..86ff5f5b0 100755 --- a/setup/so-functions +++ b/setup/so-functions @@ -972,7 +972,19 @@ download_elastic_agent_artifacts() { else logCmd "mkdir -p /nsm/elastic-fleet/artifacts/beats/elastic-agent/" logCmd "curl --retry 5 --retry-delay 60 https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.tar.gz --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz" - logCmd "tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/" + logCmd "curl --retry 5 --retry-delay 60 https://repo.securityonion.net/file/so-repo/prod/2.4/elasticagent/elastic-agent_SO-$SOVERSION.md5 --output /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.md5" + + SOURCEHASH=$(md5sum /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz | awk '{ print $1 }') + HASH=$(cat /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.md5) + + if [[ "$HASH" == "$SOURCEHASH" ]]; then + info "Elastic Agent source hash is good." + else + info "Unable to download the Elastic Agent source files." + exit 1 + fi + + logCmd "tar -xf /nsm/elastic-fleet/artifacts/elastic-agent_SO-$SOVERSION.tar.gz -C /nsm/elastic-fleet/artifacts/beats/elastic-agent/" fi }